]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/chelsio/cxgb/cxgb2.c
Merge remote-tracking branch 'net-next/master'
[karo-tx-linux.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69         cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES        16384
73 #define MAX_CMDQ1_ENTRIES       1024
74 #define MAX_RX_BUFFERS          16384
75 #define MAX_RX_JUMBO_BUFFERS    16384
76 #define MAX_TX_BUFFERS_HIGH     16384U
77 #define MAX_TX_BUFFERS_LOW      1536U
78 #define MAX_TX_BUFFERS          1460U
79 #define MIN_FL_ENTRIES          32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;     /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115         "33", "66", "100", "133"
116 };
117
118 /*
119  * Setup MAC to receive the types of packets we want.
120  */
121 static void t1_set_rxmode(struct net_device *dev)
122 {
123         struct adapter *adapter = dev->ml_priv;
124         struct cmac *mac = adapter->port[dev->if_port].mac;
125         struct t1_rx_mode rm;
126
127         rm.dev = dev;
128         mac->ops->set_rx_mode(mac, &rm);
129 }
130
131 static void link_report(struct port_info *p)
132 {
133         if (!netif_carrier_ok(p->dev))
134                 netdev_info(p->dev, "link down\n");
135         else {
136                 const char *s = "10Mbps";
137
138                 switch (p->link_config.speed) {
139                         case SPEED_10000: s = "10Gbps"; break;
140                         case SPEED_1000:  s = "1000Mbps"; break;
141                         case SPEED_100:   s = "100Mbps"; break;
142                 }
143
144                 netdev_info(p->dev, "link up, %s, %s-duplex\n",
145                             s, p->link_config.duplex == DUPLEX_FULL
146                             ? "full" : "half");
147         }
148 }
149
150 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
151                         int speed, int duplex, int pause)
152 {
153         struct port_info *p = &adapter->port[port_id];
154
155         if (link_stat != netif_carrier_ok(p->dev)) {
156                 if (link_stat)
157                         netif_carrier_on(p->dev);
158                 else
159                         netif_carrier_off(p->dev);
160                 link_report(p);
161
162                 /* multi-ports: inform toe */
163                 if ((speed > 0) && (adapter->params.nports > 1)) {
164                         unsigned int sched_speed = 10;
165                         switch (speed) {
166                         case SPEED_1000:
167                                 sched_speed = 1000;
168                                 break;
169                         case SPEED_100:
170                                 sched_speed = 100;
171                                 break;
172                         case SPEED_10:
173                                 sched_speed = 10;
174                                 break;
175                         }
176                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
177                 }
178         }
179 }
180
181 static void link_start(struct port_info *p)
182 {
183         struct cmac *mac = p->mac;
184
185         mac->ops->reset(mac);
186         if (mac->ops->macaddress_set)
187                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
188         t1_set_rxmode(p->dev);
189         t1_link_start(p->phy, mac, &p->link_config);
190         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
191 }
192
193 static void enable_hw_csum(struct adapter *adapter)
194 {
195         if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
196                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
197         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
198 }
199
200 /*
201  * Things to do upon first use of a card.
202  * This must run with the rtnl lock held.
203  */
204 static int cxgb_up(struct adapter *adapter)
205 {
206         int err = 0;
207
208         if (!(adapter->flags & FULL_INIT_DONE)) {
209                 err = t1_init_hw_modules(adapter);
210                 if (err)
211                         goto out_err;
212
213                 enable_hw_csum(adapter);
214                 adapter->flags |= FULL_INIT_DONE;
215         }
216
217         t1_interrupts_clear(adapter);
218
219         adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
220         err = request_irq(adapter->pdev->irq, t1_interrupt,
221                           adapter->params.has_msi ? 0 : IRQF_SHARED,
222                           adapter->name, adapter);
223         if (err) {
224                 if (adapter->params.has_msi)
225                         pci_disable_msi(adapter->pdev);
226
227                 goto out_err;
228         }
229
230         t1_sge_start(adapter->sge);
231         t1_interrupts_enable(adapter);
232 out_err:
233         return err;
234 }
235
236 /*
237  * Release resources when all the ports have been stopped.
238  */
239 static void cxgb_down(struct adapter *adapter)
240 {
241         t1_sge_stop(adapter->sge);
242         t1_interrupts_disable(adapter);
243         free_irq(adapter->pdev->irq, adapter);
244         if (adapter->params.has_msi)
245                 pci_disable_msi(adapter->pdev);
246 }
247
248 static int cxgb_open(struct net_device *dev)
249 {
250         int err;
251         struct adapter *adapter = dev->ml_priv;
252         int other_ports = adapter->open_device_map & PORT_MASK;
253
254         napi_enable(&adapter->napi);
255         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
256                 napi_disable(&adapter->napi);
257                 return err;
258         }
259
260         __set_bit(dev->if_port, &adapter->open_device_map);
261         link_start(&adapter->port[dev->if_port]);
262         netif_start_queue(dev);
263         if (!other_ports && adapter->params.stats_update_period)
264                 schedule_mac_stats_update(adapter,
265                                           adapter->params.stats_update_period);
266
267         t1_vlan_mode(adapter, dev->features);
268         return 0;
269 }
270
271 static int cxgb_close(struct net_device *dev)
272 {
273         struct adapter *adapter = dev->ml_priv;
274         struct port_info *p = &adapter->port[dev->if_port];
275         struct cmac *mac = p->mac;
276
277         netif_stop_queue(dev);
278         napi_disable(&adapter->napi);
279         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
280         netif_carrier_off(dev);
281
282         clear_bit(dev->if_port, &adapter->open_device_map);
283         if (adapter->params.stats_update_period &&
284             !(adapter->open_device_map & PORT_MASK)) {
285                 /* Stop statistics accumulation. */
286                 smp_mb__after_clear_bit();
287                 spin_lock(&adapter->work_lock);   /* sync with update task */
288                 spin_unlock(&adapter->work_lock);
289                 cancel_mac_stats_update(adapter);
290         }
291
292         if (!adapter->open_device_map)
293                 cxgb_down(adapter);
294         return 0;
295 }
296
297 static struct net_device_stats *t1_get_stats(struct net_device *dev)
298 {
299         struct adapter *adapter = dev->ml_priv;
300         struct port_info *p = &adapter->port[dev->if_port];
301         struct net_device_stats *ns = &p->netstats;
302         const struct cmac_statistics *pstats;
303
304         /* Do a full update of the MAC stats */
305         pstats = p->mac->ops->statistics_update(p->mac,
306                                                 MAC_STATS_UPDATE_FULL);
307
308         ns->tx_packets = pstats->TxUnicastFramesOK +
309                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
310
311         ns->rx_packets = pstats->RxUnicastFramesOK +
312                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
313
314         ns->tx_bytes = pstats->TxOctetsOK;
315         ns->rx_bytes = pstats->RxOctetsOK;
316
317         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
318                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
319         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
320                 pstats->RxFCSErrors + pstats->RxAlignErrors +
321                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
322                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
323
324         ns->multicast  = pstats->RxMulticastFramesOK;
325         ns->collisions = pstats->TxTotalCollisions;
326
327         /* detailed rx_errors */
328         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
329                 pstats->RxJabberErrors;
330         ns->rx_over_errors   = 0;
331         ns->rx_crc_errors    = pstats->RxFCSErrors;
332         ns->rx_frame_errors  = pstats->RxAlignErrors;
333         ns->rx_fifo_errors   = 0;
334         ns->rx_missed_errors = 0;
335
336         /* detailed tx_errors */
337         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
338         ns->tx_carrier_errors   = 0;
339         ns->tx_fifo_errors      = pstats->TxUnderrun;
340         ns->tx_heartbeat_errors = 0;
341         ns->tx_window_errors    = pstats->TxLateCollisions;
342         return ns;
343 }
344
345 static u32 get_msglevel(struct net_device *dev)
346 {
347         struct adapter *adapter = dev->ml_priv;
348
349         return adapter->msg_enable;
350 }
351
352 static void set_msglevel(struct net_device *dev, u32 val)
353 {
354         struct adapter *adapter = dev->ml_priv;
355
356         adapter->msg_enable = val;
357 }
358
359 static char stats_strings[][ETH_GSTRING_LEN] = {
360         "TxOctetsOK",
361         "TxOctetsBad",
362         "TxUnicastFramesOK",
363         "TxMulticastFramesOK",
364         "TxBroadcastFramesOK",
365         "TxPauseFrames",
366         "TxFramesWithDeferredXmissions",
367         "TxLateCollisions",
368         "TxTotalCollisions",
369         "TxFramesAbortedDueToXSCollisions",
370         "TxUnderrun",
371         "TxLengthErrors",
372         "TxInternalMACXmitError",
373         "TxFramesWithExcessiveDeferral",
374         "TxFCSErrors",
375         "TxJumboFramesOk",
376         "TxJumboOctetsOk",
377         
378         "RxOctetsOK",
379         "RxOctetsBad",
380         "RxUnicastFramesOK",
381         "RxMulticastFramesOK",
382         "RxBroadcastFramesOK",
383         "RxPauseFrames",
384         "RxFCSErrors",
385         "RxAlignErrors",
386         "RxSymbolErrors",
387         "RxDataErrors",
388         "RxSequenceErrors",
389         "RxRuntErrors",
390         "RxJabberErrors",
391         "RxInternalMACRcvError",
392         "RxInRangeLengthErrors",
393         "RxOutOfRangeLengthField",
394         "RxFrameTooLongErrors",
395         "RxJumboFramesOk",
396         "RxJumboOctetsOk",
397
398         /* Port stats */
399         "RxCsumGood",
400         "TxCsumOffload",
401         "TxTso",
402         "RxVlan",
403         "TxVlan",
404         "TxNeedHeadroom", 
405         
406         /* Interrupt stats */
407         "rx drops",
408         "pure_rsps",
409         "unhandled irqs",
410         "respQ_empty",
411         "respQ_overflow",
412         "freelistQ_empty",
413         "pkt_too_big",
414         "pkt_mismatch",
415         "cmdQ_full0",
416         "cmdQ_full1",
417
418         "espi_DIP2ParityErr",
419         "espi_DIP4Err",
420         "espi_RxDrops",
421         "espi_TxDrops",
422         "espi_RxOvfl",
423         "espi_ParityErr"
424 };
425
426 #define T2_REGMAP_SIZE (3 * 1024)
427
428 static int get_regs_len(struct net_device *dev)
429 {
430         return T2_REGMAP_SIZE;
431 }
432
433 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
434 {
435         struct adapter *adapter = dev->ml_priv;
436
437         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
438         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
439         strlcpy(info->bus_info, pci_name(adapter->pdev),
440                 sizeof(info->bus_info));
441 }
442
443 static int get_sset_count(struct net_device *dev, int sset)
444 {
445         switch (sset) {
446         case ETH_SS_STATS:
447                 return ARRAY_SIZE(stats_strings);
448         default:
449                 return -EOPNOTSUPP;
450         }
451 }
452
453 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
454 {
455         if (stringset == ETH_SS_STATS)
456                 memcpy(data, stats_strings, sizeof(stats_strings));
457 }
458
459 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
460                       u64 *data)
461 {
462         struct adapter *adapter = dev->ml_priv;
463         struct cmac *mac = adapter->port[dev->if_port].mac;
464         const struct cmac_statistics *s;
465         const struct sge_intr_counts *t;
466         struct sge_port_stats ss;
467
468         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
469         t = t1_sge_get_intr_counts(adapter->sge);
470         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
471
472         *data++ = s->TxOctetsOK;
473         *data++ = s->TxOctetsBad;
474         *data++ = s->TxUnicastFramesOK;
475         *data++ = s->TxMulticastFramesOK;
476         *data++ = s->TxBroadcastFramesOK;
477         *data++ = s->TxPauseFrames;
478         *data++ = s->TxFramesWithDeferredXmissions;
479         *data++ = s->TxLateCollisions;
480         *data++ = s->TxTotalCollisions;
481         *data++ = s->TxFramesAbortedDueToXSCollisions;
482         *data++ = s->TxUnderrun;
483         *data++ = s->TxLengthErrors;
484         *data++ = s->TxInternalMACXmitError;
485         *data++ = s->TxFramesWithExcessiveDeferral;
486         *data++ = s->TxFCSErrors;
487         *data++ = s->TxJumboFramesOK;
488         *data++ = s->TxJumboOctetsOK;
489
490         *data++ = s->RxOctetsOK;
491         *data++ = s->RxOctetsBad;
492         *data++ = s->RxUnicastFramesOK;
493         *data++ = s->RxMulticastFramesOK;
494         *data++ = s->RxBroadcastFramesOK;
495         *data++ = s->RxPauseFrames;
496         *data++ = s->RxFCSErrors;
497         *data++ = s->RxAlignErrors;
498         *data++ = s->RxSymbolErrors;
499         *data++ = s->RxDataErrors;
500         *data++ = s->RxSequenceErrors;
501         *data++ = s->RxRuntErrors;
502         *data++ = s->RxJabberErrors;
503         *data++ = s->RxInternalMACRcvError;
504         *data++ = s->RxInRangeLengthErrors;
505         *data++ = s->RxOutOfRangeLengthField;
506         *data++ = s->RxFrameTooLongErrors;
507         *data++ = s->RxJumboFramesOK;
508         *data++ = s->RxJumboOctetsOK;
509
510         *data++ = ss.rx_cso_good;
511         *data++ = ss.tx_cso;
512         *data++ = ss.tx_tso;
513         *data++ = ss.vlan_xtract;
514         *data++ = ss.vlan_insert;
515         *data++ = ss.tx_need_hdrroom;
516         
517         *data++ = t->rx_drops;
518         *data++ = t->pure_rsps;
519         *data++ = t->unhandled_irqs;
520         *data++ = t->respQ_empty;
521         *data++ = t->respQ_overflow;
522         *data++ = t->freelistQ_empty;
523         *data++ = t->pkt_too_big;
524         *data++ = t->pkt_mismatch;
525         *data++ = t->cmdQ_full[0];
526         *data++ = t->cmdQ_full[1];
527
528         if (adapter->espi) {
529                 const struct espi_intr_counts *e;
530
531                 e = t1_espi_get_intr_counts(adapter->espi);
532                 *data++ = e->DIP2_parity_err;
533                 *data++ = e->DIP4_err;
534                 *data++ = e->rx_drops;
535                 *data++ = e->tx_drops;
536                 *data++ = e->rx_ovflw;
537                 *data++ = e->parity_err;
538         }
539 }
540
541 static inline void reg_block_dump(struct adapter *ap, void *buf,
542                                   unsigned int start, unsigned int end)
543 {
544         u32 *p = buf + start;
545
546         for ( ; start <= end; start += sizeof(u32))
547                 *p++ = readl(ap->regs + start);
548 }
549
550 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
551                      void *buf)
552 {
553         struct adapter *ap = dev->ml_priv;
554
555         /*
556          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
557          */
558         regs->version = 2;
559
560         memset(buf, 0, T2_REGMAP_SIZE);
561         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
562         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
563         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
564         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
565         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
566         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
567         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
568         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
569         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
570         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
571 }
572
573 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
574 {
575         struct adapter *adapter = dev->ml_priv;
576         struct port_info *p = &adapter->port[dev->if_port];
577
578         cmd->supported = p->link_config.supported;
579         cmd->advertising = p->link_config.advertising;
580
581         if (netif_carrier_ok(dev)) {
582                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
583                 cmd->duplex = p->link_config.duplex;
584         } else {
585                 ethtool_cmd_speed_set(cmd, -1);
586                 cmd->duplex = -1;
587         }
588
589         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
590         cmd->phy_address = p->phy->mdio.prtad;
591         cmd->transceiver = XCVR_EXTERNAL;
592         cmd->autoneg = p->link_config.autoneg;
593         cmd->maxtxpkt = 0;
594         cmd->maxrxpkt = 0;
595         return 0;
596 }
597
598 static int speed_duplex_to_caps(int speed, int duplex)
599 {
600         int cap = 0;
601
602         switch (speed) {
603         case SPEED_10:
604                 if (duplex == DUPLEX_FULL)
605                         cap = SUPPORTED_10baseT_Full;
606                 else
607                         cap = SUPPORTED_10baseT_Half;
608                 break;
609         case SPEED_100:
610                 if (duplex == DUPLEX_FULL)
611                         cap = SUPPORTED_100baseT_Full;
612                 else
613                         cap = SUPPORTED_100baseT_Half;
614                 break;
615         case SPEED_1000:
616                 if (duplex == DUPLEX_FULL)
617                         cap = SUPPORTED_1000baseT_Full;
618                 else
619                         cap = SUPPORTED_1000baseT_Half;
620                 break;
621         case SPEED_10000:
622                 if (duplex == DUPLEX_FULL)
623                         cap = SUPPORTED_10000baseT_Full;
624         }
625         return cap;
626 }
627
628 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
629                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
630                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
631                       ADVERTISED_10000baseT_Full)
632
633 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
634 {
635         struct adapter *adapter = dev->ml_priv;
636         struct port_info *p = &adapter->port[dev->if_port];
637         struct link_config *lc = &p->link_config;
638
639         if (!(lc->supported & SUPPORTED_Autoneg))
640                 return -EOPNOTSUPP;             /* can't change speed/duplex */
641
642         if (cmd->autoneg == AUTONEG_DISABLE) {
643                 u32 speed = ethtool_cmd_speed(cmd);
644                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
645
646                 if (!(lc->supported & cap) || (speed == SPEED_1000))
647                         return -EINVAL;
648                 lc->requested_speed = speed;
649                 lc->requested_duplex = cmd->duplex;
650                 lc->advertising = 0;
651         } else {
652                 cmd->advertising &= ADVERTISED_MASK;
653                 if (cmd->advertising & (cmd->advertising - 1))
654                         cmd->advertising = lc->supported;
655                 cmd->advertising &= lc->supported;
656                 if (!cmd->advertising)
657                         return -EINVAL;
658                 lc->requested_speed = SPEED_INVALID;
659                 lc->requested_duplex = DUPLEX_INVALID;
660                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
661         }
662         lc->autoneg = cmd->autoneg;
663         if (netif_running(dev))
664                 t1_link_start(p->phy, p->mac, lc);
665         return 0;
666 }
667
668 static void get_pauseparam(struct net_device *dev,
669                            struct ethtool_pauseparam *epause)
670 {
671         struct adapter *adapter = dev->ml_priv;
672         struct port_info *p = &adapter->port[dev->if_port];
673
674         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
675         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
676         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
677 }
678
679 static int set_pauseparam(struct net_device *dev,
680                           struct ethtool_pauseparam *epause)
681 {
682         struct adapter *adapter = dev->ml_priv;
683         struct port_info *p = &adapter->port[dev->if_port];
684         struct link_config *lc = &p->link_config;
685
686         if (epause->autoneg == AUTONEG_DISABLE)
687                 lc->requested_fc = 0;
688         else if (lc->supported & SUPPORTED_Autoneg)
689                 lc->requested_fc = PAUSE_AUTONEG;
690         else
691                 return -EINVAL;
692
693         if (epause->rx_pause)
694                 lc->requested_fc |= PAUSE_RX;
695         if (epause->tx_pause)
696                 lc->requested_fc |= PAUSE_TX;
697         if (lc->autoneg == AUTONEG_ENABLE) {
698                 if (netif_running(dev))
699                         t1_link_start(p->phy, p->mac, lc);
700         } else {
701                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
702                 if (netif_running(dev))
703                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
704                                                          lc->fc);
705         }
706         return 0;
707 }
708
709 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
710 {
711         struct adapter *adapter = dev->ml_priv;
712         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
713
714         e->rx_max_pending = MAX_RX_BUFFERS;
715         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
716         e->tx_max_pending = MAX_CMDQ_ENTRIES;
717
718         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
719         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
720         e->tx_pending = adapter->params.sge.cmdQ_size[0];
721 }
722
723 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
724 {
725         struct adapter *adapter = dev->ml_priv;
726         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
727
728         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
729             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
730             e->tx_pending > MAX_CMDQ_ENTRIES ||
731             e->rx_pending < MIN_FL_ENTRIES ||
732             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
733             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
734                 return -EINVAL;
735
736         if (adapter->flags & FULL_INIT_DONE)
737                 return -EBUSY;
738
739         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
740         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
741         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
742         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
743                 MAX_CMDQ1_ENTRIES : e->tx_pending;
744         return 0;
745 }
746
747 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
748 {
749         struct adapter *adapter = dev->ml_priv;
750
751         adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
752         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
753         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
754         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
755         return 0;
756 }
757
758 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
759 {
760         struct adapter *adapter = dev->ml_priv;
761
762         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
763         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
764         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
765         return 0;
766 }
767
768 static int get_eeprom_len(struct net_device *dev)
769 {
770         struct adapter *adapter = dev->ml_priv;
771
772         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
773 }
774
775 #define EEPROM_MAGIC(ap) \
776         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
777
778 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
779                       u8 *data)
780 {
781         int i;
782         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
783         struct adapter *adapter = dev->ml_priv;
784
785         e->magic = EEPROM_MAGIC(adapter);
786         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
787                 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
788         memcpy(data, buf + e->offset, e->len);
789         return 0;
790 }
791
792 static const struct ethtool_ops t1_ethtool_ops = {
793         .get_settings      = get_settings,
794         .set_settings      = set_settings,
795         .get_drvinfo       = get_drvinfo,
796         .get_msglevel      = get_msglevel,
797         .set_msglevel      = set_msglevel,
798         .get_ringparam     = get_sge_param,
799         .set_ringparam     = set_sge_param,
800         .get_coalesce      = get_coalesce,
801         .set_coalesce      = set_coalesce,
802         .get_eeprom_len    = get_eeprom_len,
803         .get_eeprom        = get_eeprom,
804         .get_pauseparam    = get_pauseparam,
805         .set_pauseparam    = set_pauseparam,
806         .get_link          = ethtool_op_get_link,
807         .get_strings       = get_strings,
808         .get_sset_count    = get_sset_count,
809         .get_ethtool_stats = get_stats,
810         .get_regs_len      = get_regs_len,
811         .get_regs          = get_regs,
812 };
813
814 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
815 {
816         struct adapter *adapter = dev->ml_priv;
817         struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
818
819         return mdio_mii_ioctl(mdio, if_mii(req), cmd);
820 }
821
822 static int t1_change_mtu(struct net_device *dev, int new_mtu)
823 {
824         int ret;
825         struct adapter *adapter = dev->ml_priv;
826         struct cmac *mac = adapter->port[dev->if_port].mac;
827
828         if (!mac->ops->set_mtu)
829                 return -EOPNOTSUPP;
830         if (new_mtu < 68)
831                 return -EINVAL;
832         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
833                 return ret;
834         dev->mtu = new_mtu;
835         return 0;
836 }
837
838 static int t1_set_mac_addr(struct net_device *dev, void *p)
839 {
840         struct adapter *adapter = dev->ml_priv;
841         struct cmac *mac = adapter->port[dev->if_port].mac;
842         struct sockaddr *addr = p;
843
844         if (!mac->ops->macaddress_set)
845                 return -EOPNOTSUPP;
846
847         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
848         mac->ops->macaddress_set(mac, dev->dev_addr);
849         return 0;
850 }
851
852 static netdev_features_t t1_fix_features(struct net_device *dev,
853         netdev_features_t features)
854 {
855         /*
856          * Since there is no support for separate rx/tx vlan accel
857          * enable/disable make sure tx flag is always in same state as rx.
858          */
859         if (features & NETIF_F_HW_VLAN_CTAG_RX)
860                 features |= NETIF_F_HW_VLAN_CTAG_TX;
861         else
862                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
863
864         return features;
865 }
866
867 static int t1_set_features(struct net_device *dev, netdev_features_t features)
868 {
869         netdev_features_t changed = dev->features ^ features;
870         struct adapter *adapter = dev->ml_priv;
871
872         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
873                 t1_vlan_mode(adapter, features);
874
875         return 0;
876 }
877 #ifdef CONFIG_NET_POLL_CONTROLLER
878 static void t1_netpoll(struct net_device *dev)
879 {
880         unsigned long flags;
881         struct adapter *adapter = dev->ml_priv;
882
883         local_irq_save(flags);
884         t1_interrupt(adapter->pdev->irq, adapter);
885         local_irq_restore(flags);
886 }
887 #endif
888
889 /*
890  * Periodic accumulation of MAC statistics.  This is used only if the MAC
891  * does not have any other way to prevent stats counter overflow.
892  */
893 static void mac_stats_task(struct work_struct *work)
894 {
895         int i;
896         struct adapter *adapter =
897                 container_of(work, struct adapter, stats_update_task.work);
898
899         for_each_port(adapter, i) {
900                 struct port_info *p = &adapter->port[i];
901
902                 if (netif_running(p->dev))
903                         p->mac->ops->statistics_update(p->mac,
904                                                        MAC_STATS_UPDATE_FAST);
905         }
906
907         /* Schedule the next statistics update if any port is active. */
908         spin_lock(&adapter->work_lock);
909         if (adapter->open_device_map & PORT_MASK)
910                 schedule_mac_stats_update(adapter,
911                                           adapter->params.stats_update_period);
912         spin_unlock(&adapter->work_lock);
913 }
914
915 /*
916  * Processes elmer0 external interrupts in process context.
917  */
918 static void ext_intr_task(struct work_struct *work)
919 {
920         struct adapter *adapter =
921                 container_of(work, struct adapter, ext_intr_handler_task);
922
923         t1_elmer0_ext_intr_handler(adapter);
924
925         /* Now reenable external interrupts */
926         spin_lock_irq(&adapter->async_lock);
927         adapter->slow_intr_mask |= F_PL_INTR_EXT;
928         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
929         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
930                    adapter->regs + A_PL_ENABLE);
931         spin_unlock_irq(&adapter->async_lock);
932 }
933
934 /*
935  * Interrupt-context handler for elmer0 external interrupts.
936  */
937 void t1_elmer0_ext_intr(struct adapter *adapter)
938 {
939         /*
940          * Schedule a task to handle external interrupts as we require
941          * a process context.  We disable EXT interrupts in the interim
942          * and let the task reenable them when it's done.
943          */
944         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
945         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
946                    adapter->regs + A_PL_ENABLE);
947         schedule_work(&adapter->ext_intr_handler_task);
948 }
949
950 void t1_fatal_err(struct adapter *adapter)
951 {
952         if (adapter->flags & FULL_INIT_DONE) {
953                 t1_sge_stop(adapter->sge);
954                 t1_interrupts_disable(adapter);
955         }
956         pr_alert("%s: encountered fatal error, operation suspended\n",
957                  adapter->name);
958 }
959
960 static const struct net_device_ops cxgb_netdev_ops = {
961         .ndo_open               = cxgb_open,
962         .ndo_stop               = cxgb_close,
963         .ndo_start_xmit         = t1_start_xmit,
964         .ndo_get_stats          = t1_get_stats,
965         .ndo_validate_addr      = eth_validate_addr,
966         .ndo_set_rx_mode        = t1_set_rxmode,
967         .ndo_do_ioctl           = t1_ioctl,
968         .ndo_change_mtu         = t1_change_mtu,
969         .ndo_set_mac_address    = t1_set_mac_addr,
970         .ndo_fix_features       = t1_fix_features,
971         .ndo_set_features       = t1_set_features,
972 #ifdef CONFIG_NET_POLL_CONTROLLER
973         .ndo_poll_controller    = t1_netpoll,
974 #endif
975 };
976
977 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
978 {
979         int i, err, pci_using_dac = 0;
980         unsigned long mmio_start, mmio_len;
981         const struct board_info *bi;
982         struct adapter *adapter = NULL;
983         struct port_info *pi;
984
985         pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
986
987         err = pci_enable_device(pdev);
988         if (err)
989                 return err;
990
991         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
992                 pr_err("%s: cannot find PCI device memory base address\n",
993                        pci_name(pdev));
994                 err = -ENODEV;
995                 goto out_disable_pdev;
996         }
997
998         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
999                 pci_using_dac = 1;
1000
1001                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1002                         pr_err("%s: unable to obtain 64-bit DMA for "
1003                                "consistent allocations\n", pci_name(pdev));
1004                         err = -ENODEV;
1005                         goto out_disable_pdev;
1006                 }
1007
1008         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1009                 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1010                 goto out_disable_pdev;
1011         }
1012
1013         err = pci_request_regions(pdev, DRV_NAME);
1014         if (err) {
1015                 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1016                 goto out_disable_pdev;
1017         }
1018
1019         pci_set_master(pdev);
1020
1021         mmio_start = pci_resource_start(pdev, 0);
1022         mmio_len = pci_resource_len(pdev, 0);
1023         bi = t1_get_board_info(ent->driver_data);
1024
1025         for (i = 0; i < bi->port_number; ++i) {
1026                 struct net_device *netdev;
1027
1028                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1029                 if (!netdev) {
1030                         err = -ENOMEM;
1031                         goto out_free_dev;
1032                 }
1033
1034                 SET_NETDEV_DEV(netdev, &pdev->dev);
1035
1036                 if (!adapter) {
1037                         adapter = netdev_priv(netdev);
1038                         adapter->pdev = pdev;
1039                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1040
1041                         adapter->regs = ioremap(mmio_start, mmio_len);
1042                         if (!adapter->regs) {
1043                                 pr_err("%s: cannot map device registers\n",
1044                                        pci_name(pdev));
1045                                 err = -ENOMEM;
1046                                 goto out_free_dev;
1047                         }
1048
1049                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1050                                 err = -ENODEV;    /* Can't handle this chip rev */
1051                                 goto out_free_dev;
1052                         }
1053
1054                         adapter->name = pci_name(pdev);
1055                         adapter->msg_enable = dflt_msg_enable;
1056                         adapter->mmio_len = mmio_len;
1057
1058                         spin_lock_init(&adapter->tpi_lock);
1059                         spin_lock_init(&adapter->work_lock);
1060                         spin_lock_init(&adapter->async_lock);
1061                         spin_lock_init(&adapter->mac_lock);
1062
1063                         INIT_WORK(&adapter->ext_intr_handler_task,
1064                                   ext_intr_task);
1065                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1066                                           mac_stats_task);
1067
1068                         pci_set_drvdata(pdev, netdev);
1069                 }
1070
1071                 pi = &adapter->port[i];
1072                 pi->dev = netdev;
1073                 netif_carrier_off(netdev);
1074                 netdev->irq = pdev->irq;
1075                 netdev->if_port = i;
1076                 netdev->mem_start = mmio_start;
1077                 netdev->mem_end = mmio_start + mmio_len - 1;
1078                 netdev->ml_priv = adapter;
1079                 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1080                         NETIF_F_RXCSUM;
1081                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1082                         NETIF_F_RXCSUM | NETIF_F_LLTX;
1083
1084                 if (pci_using_dac)
1085                         netdev->features |= NETIF_F_HIGHDMA;
1086                 if (vlan_tso_capable(adapter)) {
1087                         netdev->features |=
1088                                 NETIF_F_HW_VLAN_CTAG_TX |
1089                                 NETIF_F_HW_VLAN_CTAG_RX;
1090                         netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1091
1092                         /* T204: disable TSO */
1093                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1094                                 netdev->hw_features |= NETIF_F_TSO;
1095                                 netdev->features |= NETIF_F_TSO;
1096                         }
1097                 }
1098
1099                 netdev->netdev_ops = &cxgb_netdev_ops;
1100                 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1101                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1102
1103                 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1104
1105                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1106         }
1107
1108         if (t1_init_sw_modules(adapter, bi) < 0) {
1109                 err = -ENODEV;
1110                 goto out_free_dev;
1111         }
1112
1113         /*
1114          * The card is now ready to go.  If any errors occur during device
1115          * registration we do not fail the whole card but rather proceed only
1116          * with the ports we manage to register successfully.  However we must
1117          * register at least one net device.
1118          */
1119         for (i = 0; i < bi->port_number; ++i) {
1120                 err = register_netdev(adapter->port[i].dev);
1121                 if (err)
1122                         pr_warn("%s: cannot register net device %s, skipping\n",
1123                                 pci_name(pdev), adapter->port[i].dev->name);
1124                 else {
1125                         /*
1126                          * Change the name we use for messages to the name of
1127                          * the first successfully registered interface.
1128                          */
1129                         if (!adapter->registered_device_map)
1130                                 adapter->name = adapter->port[i].dev->name;
1131
1132                         __set_bit(i, &adapter->registered_device_map);
1133                 }
1134         }
1135         if (!adapter->registered_device_map) {
1136                 pr_err("%s: could not register any net devices\n",
1137                        pci_name(pdev));
1138                 goto out_release_adapter_res;
1139         }
1140
1141         pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1142                 adapter->name, bi->desc, adapter->params.chip_revision,
1143                 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1144                 adapter->params.pci.speed, adapter->params.pci.width);
1145
1146         /*
1147          * Set the T1B ASIC and memory clocks.
1148          */
1149         if (t1powersave)
1150                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1151         else
1152                 adapter->t1powersave = HCLOCK;
1153         if (t1_is_T1B(adapter))
1154                 t1_clock(adapter, t1powersave);
1155
1156         return 0;
1157
1158 out_release_adapter_res:
1159         t1_free_sw_modules(adapter);
1160 out_free_dev:
1161         if (adapter) {
1162                 if (adapter->regs)
1163                         iounmap(adapter->regs);
1164                 for (i = bi->port_number - 1; i >= 0; --i)
1165                         if (adapter->port[i].dev)
1166                                 free_netdev(adapter->port[i].dev);
1167         }
1168         pci_release_regions(pdev);
1169 out_disable_pdev:
1170         pci_disable_device(pdev);
1171         return err;
1172 }
1173
1174 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1175 {
1176         int data;
1177         int i;
1178         u32 val;
1179
1180         enum {
1181                 S_CLOCK = 1 << 3,
1182                 S_DATA = 1 << 4
1183         };
1184
1185         for (i = (nbits - 1); i > -1; i--) {
1186
1187                 udelay(50);
1188
1189                 data = ((bitdata >> i) & 0x1);
1190                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1191
1192                 if (data)
1193                         val |= S_DATA;
1194                 else
1195                         val &= ~S_DATA;
1196
1197                 udelay(50);
1198
1199                 /* Set SCLOCK low */
1200                 val &= ~S_CLOCK;
1201                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1202
1203                 udelay(50);
1204
1205                 /* Write SCLOCK high */
1206                 val |= S_CLOCK;
1207                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1208
1209         }
1210 }
1211
1212 static int t1_clock(struct adapter *adapter, int mode)
1213 {
1214         u32 val;
1215         int M_CORE_VAL;
1216         int M_MEM_VAL;
1217
1218         enum {
1219                 M_CORE_BITS     = 9,
1220                 T_CORE_VAL      = 0,
1221                 T_CORE_BITS     = 2,
1222                 N_CORE_VAL      = 0,
1223                 N_CORE_BITS     = 2,
1224                 M_MEM_BITS      = 9,
1225                 T_MEM_VAL       = 0,
1226                 T_MEM_BITS      = 2,
1227                 N_MEM_VAL       = 0,
1228                 N_MEM_BITS      = 2,
1229                 NP_LOAD         = 1 << 17,
1230                 S_LOAD_MEM      = 1 << 5,
1231                 S_LOAD_CORE     = 1 << 6,
1232                 S_CLOCK         = 1 << 3
1233         };
1234
1235         if (!t1_is_T1B(adapter))
1236                 return -ENODEV; /* Can't re-clock this chip. */
1237
1238         if (mode & 2)
1239                 return 0;       /* show current mode. */
1240
1241         if ((adapter->t1powersave & 1) == (mode & 1))
1242                 return -EALREADY;       /* ASIC already running in mode. */
1243
1244         if ((mode & 1) == HCLOCK) {
1245                 M_CORE_VAL = 0x14;
1246                 M_MEM_VAL = 0x18;
1247                 adapter->t1powersave = HCLOCK;  /* overclock */
1248         } else {
1249                 M_CORE_VAL = 0xe;
1250                 M_MEM_VAL = 0x10;
1251                 adapter->t1powersave = LCLOCK;  /* underclock */
1252         }
1253
1254         /* Don't interrupt this serial stream! */
1255         spin_lock(&adapter->tpi_lock);
1256
1257         /* Initialize for ASIC core */
1258         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1259         val |= NP_LOAD;
1260         udelay(50);
1261         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1262         udelay(50);
1263         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1264         val &= ~S_LOAD_CORE;
1265         val &= ~S_CLOCK;
1266         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1267         udelay(50);
1268
1269         /* Serial program the ASIC clock synthesizer */
1270         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1271         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1272         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1273         udelay(50);
1274
1275         /* Finish ASIC core */
1276         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1277         val |= S_LOAD_CORE;
1278         udelay(50);
1279         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1280         udelay(50);
1281         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1282         val &= ~S_LOAD_CORE;
1283         udelay(50);
1284         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1285         udelay(50);
1286
1287         /* Initialize for memory */
1288         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1289         val |= NP_LOAD;
1290         udelay(50);
1291         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1292         udelay(50);
1293         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1294         val &= ~S_LOAD_MEM;
1295         val &= ~S_CLOCK;
1296         udelay(50);
1297         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1298         udelay(50);
1299
1300         /* Serial program the memory clock synthesizer */
1301         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1302         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1303         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1304         udelay(50);
1305
1306         /* Finish memory */
1307         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1308         val |= S_LOAD_MEM;
1309         udelay(50);
1310         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1311         udelay(50);
1312         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1313         val &= ~S_LOAD_MEM;
1314         udelay(50);
1315         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1316
1317         spin_unlock(&adapter->tpi_lock);
1318
1319         return 0;
1320 }
1321
1322 static inline void t1_sw_reset(struct pci_dev *pdev)
1323 {
1324         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1325         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1326 }
1327
1328 static void remove_one(struct pci_dev *pdev)
1329 {
1330         struct net_device *dev = pci_get_drvdata(pdev);
1331         struct adapter *adapter = dev->ml_priv;
1332         int i;
1333
1334         for_each_port(adapter, i) {
1335                 if (test_bit(i, &adapter->registered_device_map))
1336                         unregister_netdev(adapter->port[i].dev);
1337         }
1338
1339         t1_free_sw_modules(adapter);
1340         iounmap(adapter->regs);
1341
1342         while (--i >= 0) {
1343                 if (adapter->port[i].dev)
1344                         free_netdev(adapter->port[i].dev);
1345         }
1346
1347         pci_release_regions(pdev);
1348         pci_disable_device(pdev);
1349         t1_sw_reset(pdev);
1350 }
1351
1352 static struct pci_driver cxgb_pci_driver = {
1353         .name     = DRV_NAME,
1354         .id_table = t1_pci_tbl,
1355         .probe    = init_one,
1356         .remove   = remove_one,
1357 };
1358
1359 module_pci_driver(cxgb_pci_driver);