]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/cxgb3/cxgb3_main.c
cxgb3: reset the adapter on fatal error
[mv-sheeva.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s (queue %d)", d->name, i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310                               unsigned long n)
311 {
312         int attempts = 5;
313
314         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315                 if (!--attempts)
316                         return -ETIMEDOUT;
317                 msleep(10);
318         }
319         return 0;
320 }
321
322 static int init_tp_parity(struct adapter *adap)
323 {
324         int i;
325         struct sk_buff *skb;
326         struct cpl_set_tcb_field *greq;
327         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329         t3_tp_set_offload_mode(adap, 1);
330
331         for (i = 0; i < 16; i++) {
332                 struct cpl_smt_write_req *req;
333
334                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336                 memset(req, 0, sizeof(*req));
337                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339                 req->iff = i;
340                 t3_mgmt_tx(adap, skb);
341         }
342
343         for (i = 0; i < 2048; i++) {
344                 struct cpl_l2t_write_req *req;
345
346                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348                 memset(req, 0, sizeof(*req));
349                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351                 req->params = htonl(V_L2T_W_IDX(i));
352                 t3_mgmt_tx(adap, skb);
353         }
354
355         for (i = 0; i < 2048; i++) {
356                 struct cpl_rte_write_req *req;
357
358                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360                 memset(req, 0, sizeof(*req));
361                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364                 t3_mgmt_tx(adap, skb);
365         }
366
367         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369         memset(greq, 0, sizeof(*greq));
370         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372         greq->mask = cpu_to_be64(1);
373         t3_mgmt_tx(adap, skb);
374
375         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376         t3_tp_set_offload_mode(adap, 0);
377         return i;
378 }
379
380 /**
381  *      setup_rss - configure RSS
382  *      @adap: the adapter
383  *
384  *      Sets up RSS to distribute packets to multiple receive queues.  We
385  *      configure the RSS CPU lookup table to distribute to the number of HW
386  *      receive queues, and the response queue lookup table to narrow that
387  *      down to the response queues actually configured for each port.
388  *      We always configure the RSS mapping for two ports since the mapping
389  *      table has plenty of entries.
390  */
391 static void setup_rss(struct adapter *adap)
392 {
393         int i;
394         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396         u8 cpus[SGE_QSETS + 1];
397         u16 rspq_map[RSS_TABLE_SIZE];
398
399         for (i = 0; i < SGE_QSETS; ++i)
400                 cpus[i] = i;
401         cpus[SGE_QSETS] = 0xff; /* terminator */
402
403         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404                 rspq_map[i] = i % nq0;
405                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406         }
407
408         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
411 }
412
413 static void init_napi(struct adapter *adap)
414 {
415         int i;
416
417         for (i = 0; i < SGE_QSETS; i++) {
418                 struct sge_qset *qs = &adap->sge.qs[i];
419
420                 if (qs->adap)
421                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422                                        64);
423         }
424
425         /*
426          * netif_napi_add() can be called only once per napi_struct because it
427          * adds each new napi_struct to a list.  Be careful not to call it a
428          * second time, e.g., during EEH recovery, by making a note of it.
429          */
430         adap->flags |= NAPI_INIT;
431 }
432
433 /*
434  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
435  * both netdevices representing interfaces and the dummy ones for the extra
436  * queues.
437  */
438 static void quiesce_rx(struct adapter *adap)
439 {
440         int i;
441
442         for (i = 0; i < SGE_QSETS; i++)
443                 if (adap->sge.qs[i].adap)
444                         napi_disable(&adap->sge.qs[i].napi);
445 }
446
447 static void enable_all_napi(struct adapter *adap)
448 {
449         int i;
450         for (i = 0; i < SGE_QSETS; i++)
451                 if (adap->sge.qs[i].adap)
452                         napi_enable(&adap->sge.qs[i].napi);
453 }
454
455 /**
456  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
457  *      @adap: the adapter
458  *
459  *      Determines how many sets of SGE queues to use and initializes them.
460  *      We support multiple queue sets per port if we have MSI-X, otherwise
461  *      just one queue set per port.
462  */
463 static int setup_sge_qsets(struct adapter *adap)
464 {
465         int i, j, err, irq_idx = 0, qset_idx = 0;
466         unsigned int ntxq = SGE_TXQ_PER_SET;
467
468         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
469                 irq_idx = -1;
470
471         for_each_port(adap, i) {
472                 struct net_device *dev = adap->port[i];
473                 struct port_info *pi = netdev_priv(dev);
474
475                 pi->qs = &adap->sge.qs[pi->first_qset];
476                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
477                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
478                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
479                                                              irq_idx,
480                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
481                         if (err) {
482                                 t3_stop_sge_timers(adap);
483                                 t3_free_sge_resources(adap);
484                                 return err;
485                         }
486                 }
487         }
488
489         return 0;
490 }
491
492 static ssize_t attr_show(struct device *d, char *buf,
493                          ssize_t(*format) (struct net_device *, char *))
494 {
495         ssize_t len;
496
497         /* Synchronize with ioctls that may shut down the device */
498         rtnl_lock();
499         len = (*format) (to_net_dev(d), buf);
500         rtnl_unlock();
501         return len;
502 }
503
504 static ssize_t attr_store(struct device *d,
505                           const char *buf, size_t len,
506                           ssize_t(*set) (struct net_device *, unsigned int),
507                           unsigned int min_val, unsigned int max_val)
508 {
509         char *endp;
510         ssize_t ret;
511         unsigned int val;
512
513         if (!capable(CAP_NET_ADMIN))
514                 return -EPERM;
515
516         val = simple_strtoul(buf, &endp, 0);
517         if (endp == buf || val < min_val || val > max_val)
518                 return -EINVAL;
519
520         rtnl_lock();
521         ret = (*set) (to_net_dev(d), val);
522         if (!ret)
523                 ret = len;
524         rtnl_unlock();
525         return ret;
526 }
527
528 #define CXGB3_SHOW(name, val_expr) \
529 static ssize_t format_##name(struct net_device *dev, char *buf) \
530 { \
531         struct port_info *pi = netdev_priv(dev); \
532         struct adapter *adap = pi->adapter; \
533         return sprintf(buf, "%u\n", val_expr); \
534 } \
535 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
536                            char *buf) \
537 { \
538         return attr_show(d, buf, format_##name); \
539 }
540
541 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
542 {
543         struct port_info *pi = netdev_priv(dev);
544         struct adapter *adap = pi->adapter;
545         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
546
547         if (adap->flags & FULL_INIT_DONE)
548                 return -EBUSY;
549         if (val && adap->params.rev == 0)
550                 return -EINVAL;
551         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
552             min_tids)
553                 return -EINVAL;
554         adap->params.mc5.nfilters = val;
555         return 0;
556 }
557
558 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
559                               const char *buf, size_t len)
560 {
561         return attr_store(d, buf, len, set_nfilters, 0, ~0);
562 }
563
564 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
565 {
566         struct port_info *pi = netdev_priv(dev);
567         struct adapter *adap = pi->adapter;
568
569         if (adap->flags & FULL_INIT_DONE)
570                 return -EBUSY;
571         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
572             MC5_MIN_TIDS)
573                 return -EINVAL;
574         adap->params.mc5.nservers = val;
575         return 0;
576 }
577
578 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
579                               const char *buf, size_t len)
580 {
581         return attr_store(d, buf, len, set_nservers, 0, ~0);
582 }
583
584 #define CXGB3_ATTR_R(name, val_expr) \
585 CXGB3_SHOW(name, val_expr) \
586 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
587
588 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
589 CXGB3_SHOW(name, val_expr) \
590 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
591
592 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
593 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
594 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
595
596 static struct attribute *cxgb3_attrs[] = {
597         &dev_attr_cam_size.attr,
598         &dev_attr_nfilters.attr,
599         &dev_attr_nservers.attr,
600         NULL
601 };
602
603 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
604
605 static ssize_t tm_attr_show(struct device *d,
606                             char *buf, int sched)
607 {
608         struct port_info *pi = netdev_priv(to_net_dev(d));
609         struct adapter *adap = pi->adapter;
610         unsigned int v, addr, bpt, cpt;
611         ssize_t len;
612
613         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
614         rtnl_lock();
615         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
616         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
617         if (sched & 1)
618                 v >>= 16;
619         bpt = (v >> 8) & 0xff;
620         cpt = v & 0xff;
621         if (!cpt)
622                 len = sprintf(buf, "disabled\n");
623         else {
624                 v = (adap->params.vpd.cclk * 1000) / cpt;
625                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
626         }
627         rtnl_unlock();
628         return len;
629 }
630
631 static ssize_t tm_attr_store(struct device *d,
632                              const char *buf, size_t len, int sched)
633 {
634         struct port_info *pi = netdev_priv(to_net_dev(d));
635         struct adapter *adap = pi->adapter;
636         unsigned int val;
637         char *endp;
638         ssize_t ret;
639
640         if (!capable(CAP_NET_ADMIN))
641                 return -EPERM;
642
643         val = simple_strtoul(buf, &endp, 0);
644         if (endp == buf || val > 10000000)
645                 return -EINVAL;
646
647         rtnl_lock();
648         ret = t3_config_sched(adap, val, sched);
649         if (!ret)
650                 ret = len;
651         rtnl_unlock();
652         return ret;
653 }
654
655 #define TM_ATTR(name, sched) \
656 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
657                            char *buf) \
658 { \
659         return tm_attr_show(d, buf, sched); \
660 } \
661 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
662                             const char *buf, size_t len) \
663 { \
664         return tm_attr_store(d, buf, len, sched); \
665 } \
666 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
667
668 TM_ATTR(sched0, 0);
669 TM_ATTR(sched1, 1);
670 TM_ATTR(sched2, 2);
671 TM_ATTR(sched3, 3);
672 TM_ATTR(sched4, 4);
673 TM_ATTR(sched5, 5);
674 TM_ATTR(sched6, 6);
675 TM_ATTR(sched7, 7);
676
677 static struct attribute *offload_attrs[] = {
678         &dev_attr_sched0.attr,
679         &dev_attr_sched1.attr,
680         &dev_attr_sched2.attr,
681         &dev_attr_sched3.attr,
682         &dev_attr_sched4.attr,
683         &dev_attr_sched5.attr,
684         &dev_attr_sched6.attr,
685         &dev_attr_sched7.attr,
686         NULL
687 };
688
689 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
690
691 /*
692  * Sends an sk_buff to an offload queue driver
693  * after dealing with any active network taps.
694  */
695 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
696 {
697         int ret;
698
699         local_bh_disable();
700         ret = t3_offload_tx(tdev, skb);
701         local_bh_enable();
702         return ret;
703 }
704
705 static int write_smt_entry(struct adapter *adapter, int idx)
706 {
707         struct cpl_smt_write_req *req;
708         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
709
710         if (!skb)
711                 return -ENOMEM;
712
713         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
714         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
715         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
716         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
717         req->iff = idx;
718         memset(req->src_mac1, 0, sizeof(req->src_mac1));
719         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
720         skb->priority = 1;
721         offload_tx(&adapter->tdev, skb);
722         return 0;
723 }
724
725 static int init_smt(struct adapter *adapter)
726 {
727         int i;
728
729         for_each_port(adapter, i)
730             write_smt_entry(adapter, i);
731         return 0;
732 }
733
734 static void init_port_mtus(struct adapter *adapter)
735 {
736         unsigned int mtus = adapter->port[0]->mtu;
737
738         if (adapter->port[1])
739                 mtus |= adapter->port[1]->mtu << 16;
740         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
741 }
742
743 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
744                               int hi, int port)
745 {
746         struct sk_buff *skb;
747         struct mngt_pktsched_wr *req;
748
749         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
750         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
751         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
752         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
753         req->sched = sched;
754         req->idx = qidx;
755         req->min = lo;
756         req->max = hi;
757         req->binding = port;
758         t3_mgmt_tx(adap, skb);
759 }
760
761 static void bind_qsets(struct adapter *adap)
762 {
763         int i, j;
764
765         for_each_port(adap, i) {
766                 const struct port_info *pi = adap2pinfo(adap, i);
767
768                 for (j = 0; j < pi->nqsets; ++j)
769                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
770                                           -1, i);
771         }
772 }
773
774 #define FW_FNAME "t3fw-%d.%d.%d.bin"
775 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
776
777 static int upgrade_fw(struct adapter *adap)
778 {
779         int ret;
780         char buf[64];
781         const struct firmware *fw;
782         struct device *dev = &adap->pdev->dev;
783
784         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
785                  FW_VERSION_MINOR, FW_VERSION_MICRO);
786         ret = request_firmware(&fw, buf, dev);
787         if (ret < 0) {
788                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
789                         buf);
790                 return ret;
791         }
792         ret = t3_load_fw(adap, fw->data, fw->size);
793         release_firmware(fw);
794
795         if (ret == 0)
796                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
797                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
798         else
799                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
800                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
801
802         return ret;
803 }
804
805 static inline char t3rev2char(struct adapter *adapter)
806 {
807         char rev = 0;
808
809         switch(adapter->params.rev) {
810         case T3_REV_B:
811         case T3_REV_B2:
812                 rev = 'b';
813                 break;
814         case T3_REV_C:
815                 rev = 'c';
816                 break;
817         }
818         return rev;
819 }
820
821 static int update_tpsram(struct adapter *adap)
822 {
823         const struct firmware *tpsram;
824         char buf[64];
825         struct device *dev = &adap->pdev->dev;
826         int ret;
827         char rev;
828
829         rev = t3rev2char(adap);
830         if (!rev)
831                 return 0;
832
833         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
834                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
835
836         ret = request_firmware(&tpsram, buf, dev);
837         if (ret < 0) {
838                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
839                         buf);
840                 return ret;
841         }
842
843         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
844         if (ret)
845                 goto release_tpsram;
846
847         ret = t3_set_proto_sram(adap, tpsram->data);
848         if (ret == 0)
849                 dev_info(dev,
850                          "successful update of protocol engine "
851                          "to %d.%d.%d\n",
852                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
853         else
854                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
855                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
856         if (ret)
857                 dev_err(dev, "loading protocol SRAM failed\n");
858
859 release_tpsram:
860         release_firmware(tpsram);
861
862         return ret;
863 }
864
865 /**
866  *      cxgb_up - enable the adapter
867  *      @adapter: adapter being enabled
868  *
869  *      Called when the first port is enabled, this function performs the
870  *      actions necessary to make an adapter operational, such as completing
871  *      the initialization of HW modules, and enabling interrupts.
872  *
873  *      Must be called with the rtnl lock held.
874  */
875 static int cxgb_up(struct adapter *adap)
876 {
877         int err;
878         int must_load;
879
880         if (!(adap->flags & FULL_INIT_DONE)) {
881                 err = t3_check_fw_version(adap, &must_load);
882                 if (err == -EINVAL) {
883                         err = upgrade_fw(adap);
884                         if (err && must_load)
885                                 goto out;
886                 }
887
888                 err = t3_check_tpsram_version(adap, &must_load);
889                 if (err == -EINVAL) {
890                         err = update_tpsram(adap);
891                         if (err && must_load)
892                                 goto out;
893                 }
894
895                 /*
896                  * Clear interrupts now to catch errors if t3_init_hw fails.
897                  * We clear them again later as initialization may trigger
898                  * conditions that can interrupt.
899                  */
900                 t3_intr_clear(adap);
901
902                 err = t3_init_hw(adap, 0);
903                 if (err)
904                         goto out;
905
906                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
907                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
908
909                 err = setup_sge_qsets(adap);
910                 if (err)
911                         goto out;
912
913                 setup_rss(adap);
914                 if (!(adap->flags & NAPI_INIT))
915                         init_napi(adap);
916                 adap->flags |= FULL_INIT_DONE;
917         }
918
919         t3_intr_clear(adap);
920
921         if (adap->flags & USING_MSIX) {
922                 name_msix_vecs(adap);
923                 err = request_irq(adap->msix_info[0].vec,
924                                   t3_async_intr_handler, 0,
925                                   adap->msix_info[0].desc, adap);
926                 if (err)
927                         goto irq_err;
928
929                 err = request_msix_data_irqs(adap);
930                 if (err) {
931                         free_irq(adap->msix_info[0].vec, adap);
932                         goto irq_err;
933                 }
934         } else if ((err = request_irq(adap->pdev->irq,
935                                       t3_intr_handler(adap,
936                                                       adap->sge.qs[0].rspq.
937                                                       polling),
938                                       (adap->flags & USING_MSI) ?
939                                        0 : IRQF_SHARED,
940                                       adap->name, adap)))
941                 goto irq_err;
942
943         enable_all_napi(adap);
944         t3_sge_start(adap);
945         t3_intr_enable(adap);
946
947         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
948             is_offload(adap) && init_tp_parity(adap) == 0)
949                 adap->flags |= TP_PARITY_INIT;
950
951         if (adap->flags & TP_PARITY_INIT) {
952                 t3_write_reg(adap, A_TP_INT_CAUSE,
953                              F_CMCACHEPERR | F_ARPLUTPERR);
954                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
955         }
956
957         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
958                 bind_qsets(adap);
959         adap->flags |= QUEUES_BOUND;
960
961 out:
962         return err;
963 irq_err:
964         CH_ERR(adap, "request_irq failed, err %d\n", err);
965         goto out;
966 }
967
968 /*
969  * Release resources when all the ports and offloading have been stopped.
970  */
971 static void cxgb_down(struct adapter *adapter)
972 {
973         t3_sge_stop(adapter);
974         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
975         t3_intr_disable(adapter);
976         spin_unlock_irq(&adapter->work_lock);
977
978         if (adapter->flags & USING_MSIX) {
979                 int i, n = 0;
980
981                 free_irq(adapter->msix_info[0].vec, adapter);
982                 for_each_port(adapter, i)
983                     n += adap2pinfo(adapter, i)->nqsets;
984
985                 for (i = 0; i < n; ++i)
986                         free_irq(adapter->msix_info[i + 1].vec,
987                                  &adapter->sge.qs[i]);
988         } else
989                 free_irq(adapter->pdev->irq, adapter);
990
991         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
992         quiesce_rx(adapter);
993 }
994
995 static void schedule_chk_task(struct adapter *adap)
996 {
997         unsigned int timeo;
998
999         timeo = adap->params.linkpoll_period ?
1000             (HZ * adap->params.linkpoll_period) / 10 :
1001             adap->params.stats_update_period * HZ;
1002         if (timeo)
1003                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1004 }
1005
1006 static int offload_open(struct net_device *dev)
1007 {
1008         struct port_info *pi = netdev_priv(dev);
1009         struct adapter *adapter = pi->adapter;
1010         struct t3cdev *tdev = dev2t3cdev(dev);
1011         int adap_up = adapter->open_device_map & PORT_MASK;
1012         int err;
1013
1014         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1015                 return 0;
1016
1017         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1018                 goto out;
1019
1020         t3_tp_set_offload_mode(adapter, 1);
1021         tdev->lldev = adapter->port[0];
1022         err = cxgb3_offload_activate(adapter);
1023         if (err)
1024                 goto out;
1025
1026         init_port_mtus(adapter);
1027         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1028                      adapter->params.b_wnd,
1029                      adapter->params.rev == 0 ?
1030                      adapter->port[0]->mtu : 0xffff);
1031         init_smt(adapter);
1032
1033         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1034                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1035
1036         /* Call back all registered clients */
1037         cxgb3_add_clients(tdev);
1038
1039 out:
1040         /* restore them in case the offload module has changed them */
1041         if (err) {
1042                 t3_tp_set_offload_mode(adapter, 0);
1043                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1044                 cxgb3_set_dummy_ops(tdev);
1045         }
1046         return err;
1047 }
1048
1049 static int offload_close(struct t3cdev *tdev)
1050 {
1051         struct adapter *adapter = tdev2adap(tdev);
1052
1053         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1054                 return 0;
1055
1056         /* Call back all registered clients */
1057         cxgb3_remove_clients(tdev);
1058
1059         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1060
1061         tdev->lldev = NULL;
1062         cxgb3_set_dummy_ops(tdev);
1063         t3_tp_set_offload_mode(adapter, 0);
1064         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1065
1066         if (!adapter->open_device_map)
1067                 cxgb_down(adapter);
1068
1069         cxgb3_offload_deactivate(adapter);
1070         return 0;
1071 }
1072
1073 static int cxgb_open(struct net_device *dev)
1074 {
1075         struct port_info *pi = netdev_priv(dev);
1076         struct adapter *adapter = pi->adapter;
1077         int other_ports = adapter->open_device_map & PORT_MASK;
1078         int err;
1079
1080         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1081                 return err;
1082
1083         set_bit(pi->port_id, &adapter->open_device_map);
1084         if (is_offload(adapter) && !ofld_disable) {
1085                 err = offload_open(dev);
1086                 if (err)
1087                         printk(KERN_WARNING
1088                                "Could not initialize offload capabilities\n");
1089         }
1090
1091         link_start(dev);
1092         t3_port_intr_enable(adapter, pi->port_id);
1093         netif_start_queue(dev);
1094         if (!other_ports)
1095                 schedule_chk_task(adapter);
1096
1097         return 0;
1098 }
1099
1100 static int cxgb_close(struct net_device *dev)
1101 {
1102         struct port_info *pi = netdev_priv(dev);
1103         struct adapter *adapter = pi->adapter;
1104
1105         t3_port_intr_disable(adapter, pi->port_id);
1106         netif_stop_queue(dev);
1107         pi->phy.ops->power_down(&pi->phy, 1);
1108         netif_carrier_off(dev);
1109         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1110
1111         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1112         clear_bit(pi->port_id, &adapter->open_device_map);
1113         spin_unlock_irq(&adapter->work_lock);
1114
1115         if (!(adapter->open_device_map & PORT_MASK))
1116                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1117                                                   &adapter->adap_check_task);
1118
1119         if (!adapter->open_device_map)
1120                 cxgb_down(adapter);
1121
1122         return 0;
1123 }
1124
1125 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1126 {
1127         struct port_info *pi = netdev_priv(dev);
1128         struct adapter *adapter = pi->adapter;
1129         struct net_device_stats *ns = &pi->netstats;
1130         const struct mac_stats *pstats;
1131
1132         spin_lock(&adapter->stats_lock);
1133         pstats = t3_mac_update_stats(&pi->mac);
1134         spin_unlock(&adapter->stats_lock);
1135
1136         ns->tx_bytes = pstats->tx_octets;
1137         ns->tx_packets = pstats->tx_frames;
1138         ns->rx_bytes = pstats->rx_octets;
1139         ns->rx_packets = pstats->rx_frames;
1140         ns->multicast = pstats->rx_mcast_frames;
1141
1142         ns->tx_errors = pstats->tx_underrun;
1143         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1144             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1145             pstats->rx_fifo_ovfl;
1146
1147         /* detailed rx_errors */
1148         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1149         ns->rx_over_errors = 0;
1150         ns->rx_crc_errors = pstats->rx_fcs_errs;
1151         ns->rx_frame_errors = pstats->rx_symbol_errs;
1152         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1153         ns->rx_missed_errors = pstats->rx_cong_drops;
1154
1155         /* detailed tx_errors */
1156         ns->tx_aborted_errors = 0;
1157         ns->tx_carrier_errors = 0;
1158         ns->tx_fifo_errors = pstats->tx_underrun;
1159         ns->tx_heartbeat_errors = 0;
1160         ns->tx_window_errors = 0;
1161         return ns;
1162 }
1163
1164 static u32 get_msglevel(struct net_device *dev)
1165 {
1166         struct port_info *pi = netdev_priv(dev);
1167         struct adapter *adapter = pi->adapter;
1168
1169         return adapter->msg_enable;
1170 }
1171
1172 static void set_msglevel(struct net_device *dev, u32 val)
1173 {
1174         struct port_info *pi = netdev_priv(dev);
1175         struct adapter *adapter = pi->adapter;
1176
1177         adapter->msg_enable = val;
1178 }
1179
1180 static char stats_strings[][ETH_GSTRING_LEN] = {
1181         "TxOctetsOK         ",
1182         "TxFramesOK         ",
1183         "TxMulticastFramesOK",
1184         "TxBroadcastFramesOK",
1185         "TxPauseFrames      ",
1186         "TxUnderrun         ",
1187         "TxExtUnderrun      ",
1188
1189         "TxFrames64         ",
1190         "TxFrames65To127    ",
1191         "TxFrames128To255   ",
1192         "TxFrames256To511   ",
1193         "TxFrames512To1023  ",
1194         "TxFrames1024To1518 ",
1195         "TxFrames1519ToMax  ",
1196
1197         "RxOctetsOK         ",
1198         "RxFramesOK         ",
1199         "RxMulticastFramesOK",
1200         "RxBroadcastFramesOK",
1201         "RxPauseFrames      ",
1202         "RxFCSErrors        ",
1203         "RxSymbolErrors     ",
1204         "RxShortErrors      ",
1205         "RxJabberErrors     ",
1206         "RxLengthErrors     ",
1207         "RxFIFOoverflow     ",
1208
1209         "RxFrames64         ",
1210         "RxFrames65To127    ",
1211         "RxFrames128To255   ",
1212         "RxFrames256To511   ",
1213         "RxFrames512To1023  ",
1214         "RxFrames1024To1518 ",
1215         "RxFrames1519ToMax  ",
1216
1217         "PhyFIFOErrors      ",
1218         "TSO                ",
1219         "VLANextractions    ",
1220         "VLANinsertions     ",
1221         "TxCsumOffload      ",
1222         "RxCsumGood         ",
1223         "LroAggregated      ",
1224         "LroFlushed         ",
1225         "LroNoDesc          ",
1226         "RxDrops            ",
1227
1228         "CheckTXEnToggled   ",
1229         "CheckResets        ",
1230
1231 };
1232
1233 static int get_sset_count(struct net_device *dev, int sset)
1234 {
1235         switch (sset) {
1236         case ETH_SS_STATS:
1237                 return ARRAY_SIZE(stats_strings);
1238         default:
1239                 return -EOPNOTSUPP;
1240         }
1241 }
1242
1243 #define T3_REGMAP_SIZE (3 * 1024)
1244
1245 static int get_regs_len(struct net_device *dev)
1246 {
1247         return T3_REGMAP_SIZE;
1248 }
1249
1250 static int get_eeprom_len(struct net_device *dev)
1251 {
1252         return EEPROMSIZE;
1253 }
1254
1255 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1256 {
1257         struct port_info *pi = netdev_priv(dev);
1258         struct adapter *adapter = pi->adapter;
1259         u32 fw_vers = 0;
1260         u32 tp_vers = 0;
1261
1262         t3_get_fw_version(adapter, &fw_vers);
1263         t3_get_tp_version(adapter, &tp_vers);
1264
1265         strcpy(info->driver, DRV_NAME);
1266         strcpy(info->version, DRV_VERSION);
1267         strcpy(info->bus_info, pci_name(adapter->pdev));
1268         if (!fw_vers)
1269                 strcpy(info->fw_version, "N/A");
1270         else {
1271                 snprintf(info->fw_version, sizeof(info->fw_version),
1272                          "%s %u.%u.%u TP %u.%u.%u",
1273                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1274                          G_FW_VERSION_MAJOR(fw_vers),
1275                          G_FW_VERSION_MINOR(fw_vers),
1276                          G_FW_VERSION_MICRO(fw_vers),
1277                          G_TP_VERSION_MAJOR(tp_vers),
1278                          G_TP_VERSION_MINOR(tp_vers),
1279                          G_TP_VERSION_MICRO(tp_vers));
1280         }
1281 }
1282
1283 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1284 {
1285         if (stringset == ETH_SS_STATS)
1286                 memcpy(data, stats_strings, sizeof(stats_strings));
1287 }
1288
1289 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1290                                             struct port_info *p, int idx)
1291 {
1292         int i;
1293         unsigned long tot = 0;
1294
1295         for (i = 0; i < p->nqsets; ++i)
1296                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1297         return tot;
1298 }
1299
1300 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1301                       u64 *data)
1302 {
1303         struct port_info *pi = netdev_priv(dev);
1304         struct adapter *adapter = pi->adapter;
1305         const struct mac_stats *s;
1306
1307         spin_lock(&adapter->stats_lock);
1308         s = t3_mac_update_stats(&pi->mac);
1309         spin_unlock(&adapter->stats_lock);
1310
1311         *data++ = s->tx_octets;
1312         *data++ = s->tx_frames;
1313         *data++ = s->tx_mcast_frames;
1314         *data++ = s->tx_bcast_frames;
1315         *data++ = s->tx_pause;
1316         *data++ = s->tx_underrun;
1317         *data++ = s->tx_fifo_urun;
1318
1319         *data++ = s->tx_frames_64;
1320         *data++ = s->tx_frames_65_127;
1321         *data++ = s->tx_frames_128_255;
1322         *data++ = s->tx_frames_256_511;
1323         *data++ = s->tx_frames_512_1023;
1324         *data++ = s->tx_frames_1024_1518;
1325         *data++ = s->tx_frames_1519_max;
1326
1327         *data++ = s->rx_octets;
1328         *data++ = s->rx_frames;
1329         *data++ = s->rx_mcast_frames;
1330         *data++ = s->rx_bcast_frames;
1331         *data++ = s->rx_pause;
1332         *data++ = s->rx_fcs_errs;
1333         *data++ = s->rx_symbol_errs;
1334         *data++ = s->rx_short;
1335         *data++ = s->rx_jabber;
1336         *data++ = s->rx_too_long;
1337         *data++ = s->rx_fifo_ovfl;
1338
1339         *data++ = s->rx_frames_64;
1340         *data++ = s->rx_frames_65_127;
1341         *data++ = s->rx_frames_128_255;
1342         *data++ = s->rx_frames_256_511;
1343         *data++ = s->rx_frames_512_1023;
1344         *data++ = s->rx_frames_1024_1518;
1345         *data++ = s->rx_frames_1519_max;
1346
1347         *data++ = pi->phy.fifo_errors;
1348
1349         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1350         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1351         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1352         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1353         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1354         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1355         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1356         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1357         *data++ = s->rx_cong_drops;
1358
1359         *data++ = s->num_toggled;
1360         *data++ = s->num_resets;
1361 }
1362
1363 static inline void reg_block_dump(struct adapter *ap, void *buf,
1364                                   unsigned int start, unsigned int end)
1365 {
1366         u32 *p = buf + start;
1367
1368         for (; start <= end; start += sizeof(u32))
1369                 *p++ = t3_read_reg(ap, start);
1370 }
1371
1372 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1373                      void *buf)
1374 {
1375         struct port_info *pi = netdev_priv(dev);
1376         struct adapter *ap = pi->adapter;
1377
1378         /*
1379          * Version scheme:
1380          * bits 0..9: chip version
1381          * bits 10..15: chip revision
1382          * bit 31: set for PCIe cards
1383          */
1384         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1385
1386         /*
1387          * We skip the MAC statistics registers because they are clear-on-read.
1388          * Also reading multi-register stats would need to synchronize with the
1389          * periodic mac stats accumulation.  Hard to justify the complexity.
1390          */
1391         memset(buf, 0, T3_REGMAP_SIZE);
1392         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1393         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1394         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1395         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1396         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1397         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1398                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1399         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1400                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1401 }
1402
1403 static int restart_autoneg(struct net_device *dev)
1404 {
1405         struct port_info *p = netdev_priv(dev);
1406
1407         if (!netif_running(dev))
1408                 return -EAGAIN;
1409         if (p->link_config.autoneg != AUTONEG_ENABLE)
1410                 return -EINVAL;
1411         p->phy.ops->autoneg_restart(&p->phy);
1412         return 0;
1413 }
1414
1415 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1416 {
1417         struct port_info *pi = netdev_priv(dev);
1418         struct adapter *adapter = pi->adapter;
1419         int i;
1420
1421         if (data == 0)
1422                 data = 2;
1423
1424         for (i = 0; i < data * 2; i++) {
1425                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1426                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1427                 if (msleep_interruptible(500))
1428                         break;
1429         }
1430         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1431                          F_GPIO0_OUT_VAL);
1432         return 0;
1433 }
1434
1435 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1436 {
1437         struct port_info *p = netdev_priv(dev);
1438
1439         cmd->supported = p->link_config.supported;
1440         cmd->advertising = p->link_config.advertising;
1441
1442         if (netif_carrier_ok(dev)) {
1443                 cmd->speed = p->link_config.speed;
1444                 cmd->duplex = p->link_config.duplex;
1445         } else {
1446                 cmd->speed = -1;
1447                 cmd->duplex = -1;
1448         }
1449
1450         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1451         cmd->phy_address = p->phy.addr;
1452         cmd->transceiver = XCVR_EXTERNAL;
1453         cmd->autoneg = p->link_config.autoneg;
1454         cmd->maxtxpkt = 0;
1455         cmd->maxrxpkt = 0;
1456         return 0;
1457 }
1458
1459 static int speed_duplex_to_caps(int speed, int duplex)
1460 {
1461         int cap = 0;
1462
1463         switch (speed) {
1464         case SPEED_10:
1465                 if (duplex == DUPLEX_FULL)
1466                         cap = SUPPORTED_10baseT_Full;
1467                 else
1468                         cap = SUPPORTED_10baseT_Half;
1469                 break;
1470         case SPEED_100:
1471                 if (duplex == DUPLEX_FULL)
1472                         cap = SUPPORTED_100baseT_Full;
1473                 else
1474                         cap = SUPPORTED_100baseT_Half;
1475                 break;
1476         case SPEED_1000:
1477                 if (duplex == DUPLEX_FULL)
1478                         cap = SUPPORTED_1000baseT_Full;
1479                 else
1480                         cap = SUPPORTED_1000baseT_Half;
1481                 break;
1482         case SPEED_10000:
1483                 if (duplex == DUPLEX_FULL)
1484                         cap = SUPPORTED_10000baseT_Full;
1485         }
1486         return cap;
1487 }
1488
1489 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1490                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1491                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1492                       ADVERTISED_10000baseT_Full)
1493
1494 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1495 {
1496         struct port_info *p = netdev_priv(dev);
1497         struct link_config *lc = &p->link_config;
1498
1499         if (!(lc->supported & SUPPORTED_Autoneg))
1500                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1501
1502         if (cmd->autoneg == AUTONEG_DISABLE) {
1503                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1504
1505                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1506                         return -EINVAL;
1507                 lc->requested_speed = cmd->speed;
1508                 lc->requested_duplex = cmd->duplex;
1509                 lc->advertising = 0;
1510         } else {
1511                 cmd->advertising &= ADVERTISED_MASK;
1512                 cmd->advertising &= lc->supported;
1513                 if (!cmd->advertising)
1514                         return -EINVAL;
1515                 lc->requested_speed = SPEED_INVALID;
1516                 lc->requested_duplex = DUPLEX_INVALID;
1517                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1518         }
1519         lc->autoneg = cmd->autoneg;
1520         if (netif_running(dev))
1521                 t3_link_start(&p->phy, &p->mac, lc);
1522         return 0;
1523 }
1524
1525 static void get_pauseparam(struct net_device *dev,
1526                            struct ethtool_pauseparam *epause)
1527 {
1528         struct port_info *p = netdev_priv(dev);
1529
1530         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1531         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1532         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1533 }
1534
1535 static int set_pauseparam(struct net_device *dev,
1536                           struct ethtool_pauseparam *epause)
1537 {
1538         struct port_info *p = netdev_priv(dev);
1539         struct link_config *lc = &p->link_config;
1540
1541         if (epause->autoneg == AUTONEG_DISABLE)
1542                 lc->requested_fc = 0;
1543         else if (lc->supported & SUPPORTED_Autoneg)
1544                 lc->requested_fc = PAUSE_AUTONEG;
1545         else
1546                 return -EINVAL;
1547
1548         if (epause->rx_pause)
1549                 lc->requested_fc |= PAUSE_RX;
1550         if (epause->tx_pause)
1551                 lc->requested_fc |= PAUSE_TX;
1552         if (lc->autoneg == AUTONEG_ENABLE) {
1553                 if (netif_running(dev))
1554                         t3_link_start(&p->phy, &p->mac, lc);
1555         } else {
1556                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1557                 if (netif_running(dev))
1558                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1559         }
1560         return 0;
1561 }
1562
1563 static u32 get_rx_csum(struct net_device *dev)
1564 {
1565         struct port_info *p = netdev_priv(dev);
1566
1567         return p->rx_csum_offload;
1568 }
1569
1570 static int set_rx_csum(struct net_device *dev, u32 data)
1571 {
1572         struct port_info *p = netdev_priv(dev);
1573
1574         p->rx_csum_offload = data;
1575         if (!data) {
1576                 struct adapter *adap = p->adapter;
1577                 int i;
1578
1579                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1580                         adap->sge.qs[i].lro_enabled = 0;
1581         }
1582         return 0;
1583 }
1584
1585 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1586 {
1587         struct port_info *pi = netdev_priv(dev);
1588         struct adapter *adapter = pi->adapter;
1589         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1590
1591         e->rx_max_pending = MAX_RX_BUFFERS;
1592         e->rx_mini_max_pending = 0;
1593         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1594         e->tx_max_pending = MAX_TXQ_ENTRIES;
1595
1596         e->rx_pending = q->fl_size;
1597         e->rx_mini_pending = q->rspq_size;
1598         e->rx_jumbo_pending = q->jumbo_size;
1599         e->tx_pending = q->txq_size[0];
1600 }
1601
1602 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1603 {
1604         struct port_info *pi = netdev_priv(dev);
1605         struct adapter *adapter = pi->adapter;
1606         struct qset_params *q;
1607         int i;
1608
1609         if (e->rx_pending > MAX_RX_BUFFERS ||
1610             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1611             e->tx_pending > MAX_TXQ_ENTRIES ||
1612             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1613             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1614             e->rx_pending < MIN_FL_ENTRIES ||
1615             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1616             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1617                 return -EINVAL;
1618
1619         if (adapter->flags & FULL_INIT_DONE)
1620                 return -EBUSY;
1621
1622         q = &adapter->params.sge.qset[pi->first_qset];
1623         for (i = 0; i < pi->nqsets; ++i, ++q) {
1624                 q->rspq_size = e->rx_mini_pending;
1625                 q->fl_size = e->rx_pending;
1626                 q->jumbo_size = e->rx_jumbo_pending;
1627                 q->txq_size[0] = e->tx_pending;
1628                 q->txq_size[1] = e->tx_pending;
1629                 q->txq_size[2] = e->tx_pending;
1630         }
1631         return 0;
1632 }
1633
1634 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1635 {
1636         struct port_info *pi = netdev_priv(dev);
1637         struct adapter *adapter = pi->adapter;
1638         struct qset_params *qsp = &adapter->params.sge.qset[0];
1639         struct sge_qset *qs = &adapter->sge.qs[0];
1640
1641         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1642                 return -EINVAL;
1643
1644         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1645         t3_update_qset_coalesce(qs, qsp);
1646         return 0;
1647 }
1648
1649 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1650 {
1651         struct port_info *pi = netdev_priv(dev);
1652         struct adapter *adapter = pi->adapter;
1653         struct qset_params *q = adapter->params.sge.qset;
1654
1655         c->rx_coalesce_usecs = q->coalesce_usecs;
1656         return 0;
1657 }
1658
1659 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1660                       u8 * data)
1661 {
1662         struct port_info *pi = netdev_priv(dev);
1663         struct adapter *adapter = pi->adapter;
1664         int i, err = 0;
1665
1666         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1667         if (!buf)
1668                 return -ENOMEM;
1669
1670         e->magic = EEPROM_MAGIC;
1671         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1672                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1673
1674         if (!err)
1675                 memcpy(data, buf + e->offset, e->len);
1676         kfree(buf);
1677         return err;
1678 }
1679
1680 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1681                       u8 * data)
1682 {
1683         struct port_info *pi = netdev_priv(dev);
1684         struct adapter *adapter = pi->adapter;
1685         u32 aligned_offset, aligned_len;
1686         __le32 *p;
1687         u8 *buf;
1688         int err;
1689
1690         if (eeprom->magic != EEPROM_MAGIC)
1691                 return -EINVAL;
1692
1693         aligned_offset = eeprom->offset & ~3;
1694         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1695
1696         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1697                 buf = kmalloc(aligned_len, GFP_KERNEL);
1698                 if (!buf)
1699                         return -ENOMEM;
1700                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1701                 if (!err && aligned_len > 4)
1702                         err = t3_seeprom_read(adapter,
1703                                               aligned_offset + aligned_len - 4,
1704                                               (__le32 *) & buf[aligned_len - 4]);
1705                 if (err)
1706                         goto out;
1707                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1708         } else
1709                 buf = data;
1710
1711         err = t3_seeprom_wp(adapter, 0);
1712         if (err)
1713                 goto out;
1714
1715         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1716                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1717                 aligned_offset += 4;
1718         }
1719
1720         if (!err)
1721                 err = t3_seeprom_wp(adapter, 1);
1722 out:
1723         if (buf != data)
1724                 kfree(buf);
1725         return err;
1726 }
1727
1728 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1729 {
1730         wol->supported = 0;
1731         wol->wolopts = 0;
1732         memset(&wol->sopass, 0, sizeof(wol->sopass));
1733 }
1734
1735 static const struct ethtool_ops cxgb_ethtool_ops = {
1736         .get_settings = get_settings,
1737         .set_settings = set_settings,
1738         .get_drvinfo = get_drvinfo,
1739         .get_msglevel = get_msglevel,
1740         .set_msglevel = set_msglevel,
1741         .get_ringparam = get_sge_param,
1742         .set_ringparam = set_sge_param,
1743         .get_coalesce = get_coalesce,
1744         .set_coalesce = set_coalesce,
1745         .get_eeprom_len = get_eeprom_len,
1746         .get_eeprom = get_eeprom,
1747         .set_eeprom = set_eeprom,
1748         .get_pauseparam = get_pauseparam,
1749         .set_pauseparam = set_pauseparam,
1750         .get_rx_csum = get_rx_csum,
1751         .set_rx_csum = set_rx_csum,
1752         .set_tx_csum = ethtool_op_set_tx_csum,
1753         .set_sg = ethtool_op_set_sg,
1754         .get_link = ethtool_op_get_link,
1755         .get_strings = get_strings,
1756         .phys_id = cxgb3_phys_id,
1757         .nway_reset = restart_autoneg,
1758         .get_sset_count = get_sset_count,
1759         .get_ethtool_stats = get_stats,
1760         .get_regs_len = get_regs_len,
1761         .get_regs = get_regs,
1762         .get_wol = get_wol,
1763         .set_tso = ethtool_op_set_tso,
1764 };
1765
1766 static int in_range(int val, int lo, int hi)
1767 {
1768         return val < 0 || (val <= hi && val >= lo);
1769 }
1770
1771 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1772 {
1773         struct port_info *pi = netdev_priv(dev);
1774         struct adapter *adapter = pi->adapter;
1775         u32 cmd;
1776         int ret;
1777
1778         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1779                 return -EFAULT;
1780
1781         switch (cmd) {
1782         case CHELSIO_SET_QSET_PARAMS:{
1783                 int i;
1784                 struct qset_params *q;
1785                 struct ch_qset_params t;
1786
1787                 if (!capable(CAP_NET_ADMIN))
1788                         return -EPERM;
1789                 if (copy_from_user(&t, useraddr, sizeof(t)))
1790                         return -EFAULT;
1791                 if (t.qset_idx >= SGE_QSETS)
1792                         return -EINVAL;
1793                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1794                         !in_range(t.cong_thres, 0, 255) ||
1795                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1796                                 MAX_TXQ_ENTRIES) ||
1797                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1798                                 MAX_TXQ_ENTRIES) ||
1799                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1800                                 MAX_CTRL_TXQ_ENTRIES) ||
1801                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1802                                 MAX_RX_BUFFERS)
1803                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1804                                         MAX_RX_JUMBO_BUFFERS)
1805                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1806                                         MAX_RSPQ_ENTRIES))
1807                         return -EINVAL;
1808                 if ((adapter->flags & FULL_INIT_DONE) &&
1809                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1810                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1811                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1812                         t.polling >= 0 || t.cong_thres >= 0))
1813                         return -EBUSY;
1814
1815                 q = &adapter->params.sge.qset[t.qset_idx];
1816
1817                 if (t.rspq_size >= 0)
1818                         q->rspq_size = t.rspq_size;
1819                 if (t.fl_size[0] >= 0)
1820                         q->fl_size = t.fl_size[0];
1821                 if (t.fl_size[1] >= 0)
1822                         q->jumbo_size = t.fl_size[1];
1823                 if (t.txq_size[0] >= 0)
1824                         q->txq_size[0] = t.txq_size[0];
1825                 if (t.txq_size[1] >= 0)
1826                         q->txq_size[1] = t.txq_size[1];
1827                 if (t.txq_size[2] >= 0)
1828                         q->txq_size[2] = t.txq_size[2];
1829                 if (t.cong_thres >= 0)
1830                         q->cong_thres = t.cong_thres;
1831                 if (t.intr_lat >= 0) {
1832                         struct sge_qset *qs =
1833                                 &adapter->sge.qs[t.qset_idx];
1834
1835                         q->coalesce_usecs = t.intr_lat;
1836                         t3_update_qset_coalesce(qs, q);
1837                 }
1838                 if (t.polling >= 0) {
1839                         if (adapter->flags & USING_MSIX)
1840                                 q->polling = t.polling;
1841                         else {
1842                                 /* No polling with INTx for T3A */
1843                                 if (adapter->params.rev == 0 &&
1844                                         !(adapter->flags & USING_MSI))
1845                                         t.polling = 0;
1846
1847                                 for (i = 0; i < SGE_QSETS; i++) {
1848                                         q = &adapter->params.sge.
1849                                                 qset[i];
1850                                         q->polling = t.polling;
1851                                 }
1852                         }
1853                 }
1854                 if (t.lro >= 0) {
1855                         struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1856                         q->lro = t.lro;
1857                         qs->lro_enabled = t.lro;
1858                 }
1859                 break;
1860         }
1861         case CHELSIO_GET_QSET_PARAMS:{
1862                 struct qset_params *q;
1863                 struct ch_qset_params t;
1864
1865                 if (copy_from_user(&t, useraddr, sizeof(t)))
1866                         return -EFAULT;
1867                 if (t.qset_idx >= SGE_QSETS)
1868                         return -EINVAL;
1869
1870                 q = &adapter->params.sge.qset[t.qset_idx];
1871                 t.rspq_size = q->rspq_size;
1872                 t.txq_size[0] = q->txq_size[0];
1873                 t.txq_size[1] = q->txq_size[1];
1874                 t.txq_size[2] = q->txq_size[2];
1875                 t.fl_size[0] = q->fl_size;
1876                 t.fl_size[1] = q->jumbo_size;
1877                 t.polling = q->polling;
1878                 t.lro = q->lro;
1879                 t.intr_lat = q->coalesce_usecs;
1880                 t.cong_thres = q->cong_thres;
1881
1882                 if (copy_to_user(useraddr, &t, sizeof(t)))
1883                         return -EFAULT;
1884                 break;
1885         }
1886         case CHELSIO_SET_QSET_NUM:{
1887                 struct ch_reg edata;
1888                 unsigned int i, first_qset = 0, other_qsets = 0;
1889
1890                 if (!capable(CAP_NET_ADMIN))
1891                         return -EPERM;
1892                 if (adapter->flags & FULL_INIT_DONE)
1893                         return -EBUSY;
1894                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1895                         return -EFAULT;
1896                 if (edata.val < 1 ||
1897                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1898                         return -EINVAL;
1899
1900                 for_each_port(adapter, i)
1901                         if (adapter->port[i] && adapter->port[i] != dev)
1902                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1903
1904                 if (edata.val + other_qsets > SGE_QSETS)
1905                         return -EINVAL;
1906
1907                 pi->nqsets = edata.val;
1908
1909                 for_each_port(adapter, i)
1910                         if (adapter->port[i]) {
1911                                 pi = adap2pinfo(adapter, i);
1912                                 pi->first_qset = first_qset;
1913                                 first_qset += pi->nqsets;
1914                         }
1915                 break;
1916         }
1917         case CHELSIO_GET_QSET_NUM:{
1918                 struct ch_reg edata;
1919
1920                 edata.cmd = CHELSIO_GET_QSET_NUM;
1921                 edata.val = pi->nqsets;
1922                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1923                         return -EFAULT;
1924                 break;
1925         }
1926         case CHELSIO_LOAD_FW:{
1927                 u8 *fw_data;
1928                 struct ch_mem_range t;
1929
1930                 if (!capable(CAP_SYS_RAWIO))
1931                         return -EPERM;
1932                 if (copy_from_user(&t, useraddr, sizeof(t)))
1933                         return -EFAULT;
1934                 /* Check t.len sanity ? */
1935                 fw_data = kmalloc(t.len, GFP_KERNEL);
1936                 if (!fw_data)
1937                         return -ENOMEM;
1938
1939                 if (copy_from_user
1940                         (fw_data, useraddr + sizeof(t), t.len)) {
1941                         kfree(fw_data);
1942                         return -EFAULT;
1943                 }
1944
1945                 ret = t3_load_fw(adapter, fw_data, t.len);
1946                 kfree(fw_data);
1947                 if (ret)
1948                         return ret;
1949                 break;
1950         }
1951         case CHELSIO_SETMTUTAB:{
1952                 struct ch_mtus m;
1953                 int i;
1954
1955                 if (!is_offload(adapter))
1956                         return -EOPNOTSUPP;
1957                 if (!capable(CAP_NET_ADMIN))
1958                         return -EPERM;
1959                 if (offload_running(adapter))
1960                         return -EBUSY;
1961                 if (copy_from_user(&m, useraddr, sizeof(m)))
1962                         return -EFAULT;
1963                 if (m.nmtus != NMTUS)
1964                         return -EINVAL;
1965                 if (m.mtus[0] < 81)     /* accommodate SACK */
1966                         return -EINVAL;
1967
1968                 /* MTUs must be in ascending order */
1969                 for (i = 1; i < NMTUS; ++i)
1970                         if (m.mtus[i] < m.mtus[i - 1])
1971                                 return -EINVAL;
1972
1973                 memcpy(adapter->params.mtus, m.mtus,
1974                         sizeof(adapter->params.mtus));
1975                 break;
1976         }
1977         case CHELSIO_GET_PM:{
1978                 struct tp_params *p = &adapter->params.tp;
1979                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1980
1981                 if (!is_offload(adapter))
1982                         return -EOPNOTSUPP;
1983                 m.tx_pg_sz = p->tx_pg_size;
1984                 m.tx_num_pg = p->tx_num_pgs;
1985                 m.rx_pg_sz = p->rx_pg_size;
1986                 m.rx_num_pg = p->rx_num_pgs;
1987                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1988                 if (copy_to_user(useraddr, &m, sizeof(m)))
1989                         return -EFAULT;
1990                 break;
1991         }
1992         case CHELSIO_SET_PM:{
1993                 struct ch_pm m;
1994                 struct tp_params *p = &adapter->params.tp;
1995
1996                 if (!is_offload(adapter))
1997                         return -EOPNOTSUPP;
1998                 if (!capable(CAP_NET_ADMIN))
1999                         return -EPERM;
2000                 if (adapter->flags & FULL_INIT_DONE)
2001                         return -EBUSY;
2002                 if (copy_from_user(&m, useraddr, sizeof(m)))
2003                         return -EFAULT;
2004                 if (!is_power_of_2(m.rx_pg_sz) ||
2005                         !is_power_of_2(m.tx_pg_sz))
2006                         return -EINVAL; /* not power of 2 */
2007                 if (!(m.rx_pg_sz & 0x14000))
2008                         return -EINVAL; /* not 16KB or 64KB */
2009                 if (!(m.tx_pg_sz & 0x1554000))
2010                         return -EINVAL;
2011                 if (m.tx_num_pg == -1)
2012                         m.tx_num_pg = p->tx_num_pgs;
2013                 if (m.rx_num_pg == -1)
2014                         m.rx_num_pg = p->rx_num_pgs;
2015                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2016                         return -EINVAL;
2017                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2018                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2019                         return -EINVAL;
2020                 p->rx_pg_size = m.rx_pg_sz;
2021                 p->tx_pg_size = m.tx_pg_sz;
2022                 p->rx_num_pgs = m.rx_num_pg;
2023                 p->tx_num_pgs = m.tx_num_pg;
2024                 break;
2025         }
2026         case CHELSIO_GET_MEM:{
2027                 struct ch_mem_range t;
2028                 struct mc7 *mem;
2029                 u64 buf[32];
2030
2031                 if (!is_offload(adapter))
2032                         return -EOPNOTSUPP;
2033                 if (!(adapter->flags & FULL_INIT_DONE))
2034                         return -EIO;    /* need the memory controllers */
2035                 if (copy_from_user(&t, useraddr, sizeof(t)))
2036                         return -EFAULT;
2037                 if ((t.addr & 7) || (t.len & 7))
2038                         return -EINVAL;
2039                 if (t.mem_id == MEM_CM)
2040                         mem = &adapter->cm;
2041                 else if (t.mem_id == MEM_PMRX)
2042                         mem = &adapter->pmrx;
2043                 else if (t.mem_id == MEM_PMTX)
2044                         mem = &adapter->pmtx;
2045                 else
2046                         return -EINVAL;
2047
2048                 /*
2049                  * Version scheme:
2050                  * bits 0..9: chip version
2051                  * bits 10..15: chip revision
2052                  */
2053                 t.version = 3 | (adapter->params.rev << 10);
2054                 if (copy_to_user(useraddr, &t, sizeof(t)))
2055                         return -EFAULT;
2056
2057                 /*
2058                  * Read 256 bytes at a time as len can be large and we don't
2059                  * want to use huge intermediate buffers.
2060                  */
2061                 useraddr += sizeof(t);  /* advance to start of buffer */
2062                 while (t.len) {
2063                         unsigned int chunk =
2064                                 min_t(unsigned int, t.len, sizeof(buf));
2065
2066                         ret =
2067                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2068                                                 buf);
2069                         if (ret)
2070                                 return ret;
2071                         if (copy_to_user(useraddr, buf, chunk))
2072                                 return -EFAULT;
2073                         useraddr += chunk;
2074                         t.addr += chunk;
2075                         t.len -= chunk;
2076                 }
2077                 break;
2078         }
2079         case CHELSIO_SET_TRACE_FILTER:{
2080                 struct ch_trace t;
2081                 const struct trace_params *tp;
2082
2083                 if (!capable(CAP_NET_ADMIN))
2084                         return -EPERM;
2085                 if (!offload_running(adapter))
2086                         return -EAGAIN;
2087                 if (copy_from_user(&t, useraddr, sizeof(t)))
2088                         return -EFAULT;
2089
2090                 tp = (const struct trace_params *)&t.sip;
2091                 if (t.config_tx)
2092                         t3_config_trace_filter(adapter, tp, 0,
2093                                                 t.invert_match,
2094                                                 t.trace_tx);
2095                 if (t.config_rx)
2096                         t3_config_trace_filter(adapter, tp, 1,
2097                                                 t.invert_match,
2098                                                 t.trace_rx);
2099                 break;
2100         }
2101         default:
2102                 return -EOPNOTSUPP;
2103         }
2104         return 0;
2105 }
2106
2107 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2108 {
2109         struct mii_ioctl_data *data = if_mii(req);
2110         struct port_info *pi = netdev_priv(dev);
2111         struct adapter *adapter = pi->adapter;
2112         int ret, mmd;
2113
2114         switch (cmd) {
2115         case SIOCGMIIPHY:
2116                 data->phy_id = pi->phy.addr;
2117                 /* FALLTHRU */
2118         case SIOCGMIIREG:{
2119                 u32 val;
2120                 struct cphy *phy = &pi->phy;
2121
2122                 if (!phy->mdio_read)
2123                         return -EOPNOTSUPP;
2124                 if (is_10G(adapter)) {
2125                         mmd = data->phy_id >> 8;
2126                         if (!mmd)
2127                                 mmd = MDIO_DEV_PCS;
2128                         else if (mmd > MDIO_DEV_XGXS)
2129                                 return -EINVAL;
2130
2131                         ret =
2132                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2133                                                 mmd, data->reg_num, &val);
2134                 } else
2135                         ret =
2136                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2137                                                 0, data->reg_num & 0x1f,
2138                                                 &val);
2139                 if (!ret)
2140                         data->val_out = val;
2141                 break;
2142         }
2143         case SIOCSMIIREG:{
2144                 struct cphy *phy = &pi->phy;
2145
2146                 if (!capable(CAP_NET_ADMIN))
2147                         return -EPERM;
2148                 if (!phy->mdio_write)
2149                         return -EOPNOTSUPP;
2150                 if (is_10G(adapter)) {
2151                         mmd = data->phy_id >> 8;
2152                         if (!mmd)
2153                                 mmd = MDIO_DEV_PCS;
2154                         else if (mmd > MDIO_DEV_XGXS)
2155                                 return -EINVAL;
2156
2157                         ret =
2158                                 phy->mdio_write(adapter,
2159                                                 data->phy_id & 0x1f, mmd,
2160                                                 data->reg_num,
2161                                                 data->val_in);
2162                 } else
2163                         ret =
2164                                 phy->mdio_write(adapter,
2165                                                 data->phy_id & 0x1f, 0,
2166                                                 data->reg_num & 0x1f,
2167                                                 data->val_in);
2168                 break;
2169         }
2170         case SIOCCHIOCTL:
2171                 return cxgb_extension_ioctl(dev, req->ifr_data);
2172         default:
2173                 return -EOPNOTSUPP;
2174         }
2175         return ret;
2176 }
2177
2178 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2179 {
2180         struct port_info *pi = netdev_priv(dev);
2181         struct adapter *adapter = pi->adapter;
2182         int ret;
2183
2184         if (new_mtu < 81)       /* accommodate SACK */
2185                 return -EINVAL;
2186         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2187                 return ret;
2188         dev->mtu = new_mtu;
2189         init_port_mtus(adapter);
2190         if (adapter->params.rev == 0 && offload_running(adapter))
2191                 t3_load_mtus(adapter, adapter->params.mtus,
2192                              adapter->params.a_wnd, adapter->params.b_wnd,
2193                              adapter->port[0]->mtu);
2194         return 0;
2195 }
2196
2197 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2198 {
2199         struct port_info *pi = netdev_priv(dev);
2200         struct adapter *adapter = pi->adapter;
2201         struct sockaddr *addr = p;
2202
2203         if (!is_valid_ether_addr(addr->sa_data))
2204                 return -EINVAL;
2205
2206         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2207         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2208         if (offload_running(adapter))
2209                 write_smt_entry(adapter, pi->port_id);
2210         return 0;
2211 }
2212
2213 /**
2214  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2215  * @adap: the adapter
2216  * @p: the port
2217  *
2218  * Ensures that current Rx processing on any of the queues associated with
2219  * the given port completes before returning.  We do this by acquiring and
2220  * releasing the locks of the response queues associated with the port.
2221  */
2222 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2223 {
2224         int i;
2225
2226         for (i = 0; i < p->nqsets; i++) {
2227                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2228
2229                 spin_lock_irq(&q->lock);
2230                 spin_unlock_irq(&q->lock);
2231         }
2232 }
2233
2234 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2235 {
2236         struct port_info *pi = netdev_priv(dev);
2237         struct adapter *adapter = pi->adapter;
2238
2239         pi->vlan_grp = grp;
2240         if (adapter->params.rev > 0)
2241                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2242         else {
2243                 /* single control for all ports */
2244                 unsigned int i, have_vlans = 0;
2245                 for_each_port(adapter, i)
2246                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2247
2248                 t3_set_vlan_accel(adapter, 1, have_vlans);
2249         }
2250         t3_synchronize_rx(adapter, pi);
2251 }
2252
2253 #ifdef CONFIG_NET_POLL_CONTROLLER
2254 static void cxgb_netpoll(struct net_device *dev)
2255 {
2256         struct port_info *pi = netdev_priv(dev);
2257         struct adapter *adapter = pi->adapter;
2258         int qidx;
2259
2260         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2261                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2262                 void *source;
2263
2264                 if (adapter->flags & USING_MSIX)
2265                         source = qs;
2266                 else
2267                         source = adapter;
2268
2269                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2270         }
2271 }
2272 #endif
2273
2274 /*
2275  * Periodic accumulation of MAC statistics.
2276  */
2277 static void mac_stats_update(struct adapter *adapter)
2278 {
2279         int i;
2280
2281         for_each_port(adapter, i) {
2282                 struct net_device *dev = adapter->port[i];
2283                 struct port_info *p = netdev_priv(dev);
2284
2285                 if (netif_running(dev)) {
2286                         spin_lock(&adapter->stats_lock);
2287                         t3_mac_update_stats(&p->mac);
2288                         spin_unlock(&adapter->stats_lock);
2289                 }
2290         }
2291 }
2292
2293 static void check_link_status(struct adapter *adapter)
2294 {
2295         int i;
2296
2297         for_each_port(adapter, i) {
2298                 struct net_device *dev = adapter->port[i];
2299                 struct port_info *p = netdev_priv(dev);
2300
2301                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2302                         t3_link_changed(adapter, i);
2303         }
2304 }
2305
2306 static void check_t3b2_mac(struct adapter *adapter)
2307 {
2308         int i;
2309
2310         if (!rtnl_trylock())    /* synchronize with ifdown */
2311                 return;
2312
2313         for_each_port(adapter, i) {
2314                 struct net_device *dev = adapter->port[i];
2315                 struct port_info *p = netdev_priv(dev);
2316                 int status;
2317
2318                 if (!netif_running(dev))
2319                         continue;
2320
2321                 status = 0;
2322                 if (netif_running(dev) && netif_carrier_ok(dev))
2323                         status = t3b2_mac_watchdog_task(&p->mac);
2324                 if (status == 1)
2325                         p->mac.stats.num_toggled++;
2326                 else if (status == 2) {
2327                         struct cmac *mac = &p->mac;
2328
2329                         t3_mac_set_mtu(mac, dev->mtu);
2330                         t3_mac_set_address(mac, 0, dev->dev_addr);
2331                         cxgb_set_rxmode(dev);
2332                         t3_link_start(&p->phy, mac, &p->link_config);
2333                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2334                         t3_port_intr_enable(adapter, p->port_id);
2335                         p->mac.stats.num_resets++;
2336                 }
2337         }
2338         rtnl_unlock();
2339 }
2340
2341
2342 static void t3_adap_check_task(struct work_struct *work)
2343 {
2344         struct adapter *adapter = container_of(work, struct adapter,
2345                                                adap_check_task.work);
2346         const struct adapter_params *p = &adapter->params;
2347
2348         adapter->check_task_cnt++;
2349
2350         /* Check link status for PHYs without interrupts */
2351         if (p->linkpoll_period)
2352                 check_link_status(adapter);
2353
2354         /* Accumulate MAC stats if needed */
2355         if (!p->linkpoll_period ||
2356             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2357             p->stats_update_period) {
2358                 mac_stats_update(adapter);
2359                 adapter->check_task_cnt = 0;
2360         }
2361
2362         if (p->rev == T3_REV_B2)
2363                 check_t3b2_mac(adapter);
2364
2365         /* Schedule the next check update if any port is active. */
2366         spin_lock_irq(&adapter->work_lock);
2367         if (adapter->open_device_map & PORT_MASK)
2368                 schedule_chk_task(adapter);
2369         spin_unlock_irq(&adapter->work_lock);
2370 }
2371
2372 /*
2373  * Processes external (PHY) interrupts in process context.
2374  */
2375 static void ext_intr_task(struct work_struct *work)
2376 {
2377         struct adapter *adapter = container_of(work, struct adapter,
2378                                                ext_intr_handler_task);
2379
2380         t3_phy_intr_handler(adapter);
2381
2382         /* Now reenable external interrupts */
2383         spin_lock_irq(&adapter->work_lock);
2384         if (adapter->slow_intr_mask) {
2385                 adapter->slow_intr_mask |= F_T3DBG;
2386                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2387                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2388                              adapter->slow_intr_mask);
2389         }
2390         spin_unlock_irq(&adapter->work_lock);
2391 }
2392
2393 /*
2394  * Interrupt-context handler for external (PHY) interrupts.
2395  */
2396 void t3_os_ext_intr_handler(struct adapter *adapter)
2397 {
2398         /*
2399          * Schedule a task to handle external interrupts as they may be slow
2400          * and we use a mutex to protect MDIO registers.  We disable PHY
2401          * interrupts in the meantime and let the task reenable them when
2402          * it's done.
2403          */
2404         spin_lock(&adapter->work_lock);
2405         if (adapter->slow_intr_mask) {
2406                 adapter->slow_intr_mask &= ~F_T3DBG;
2407                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2408                              adapter->slow_intr_mask);
2409                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2410         }
2411         spin_unlock(&adapter->work_lock);
2412 }
2413
2414 static int t3_adapter_error(struct adapter *adapter, int reset)
2415 {
2416         int i, ret = 0;
2417
2418         /* Stop all ports */
2419         for_each_port(adapter, i) {
2420                 struct net_device *netdev = adapter->port[i];
2421
2422                 if (netif_running(netdev))
2423                         cxgb_close(netdev);
2424         }
2425
2426         if (is_offload(adapter) &&
2427             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2428                 offload_close(&adapter->tdev);
2429
2430         /* Stop SGE timers */
2431         t3_stop_sge_timers(adapter);
2432
2433         adapter->flags &= ~FULL_INIT_DONE;
2434
2435         if (reset)
2436                 ret = t3_reset_adapter(adapter);
2437
2438         pci_disable_device(adapter->pdev);
2439
2440         return ret;
2441 }
2442
2443 static int t3_reenable_adapter(struct adapter *adapter)
2444 {
2445         if (pci_enable_device(adapter->pdev)) {
2446                 dev_err(&adapter->pdev->dev,
2447                         "Cannot re-enable PCI device after reset.\n");
2448                 goto err;
2449         }
2450         pci_set_master(adapter->pdev);
2451         pci_restore_state(adapter->pdev);
2452
2453         /* Free sge resources */
2454         t3_free_sge_resources(adapter);
2455
2456         if (t3_replay_prep_adapter(adapter))
2457                 goto err;
2458
2459         return 0;
2460 err:
2461         return -1;
2462 }
2463
2464 static void t3_resume_ports(struct adapter *adapter)
2465 {
2466         int i;
2467
2468         /* Restart the ports */
2469         for_each_port(adapter, i) {
2470                 struct net_device *netdev = adapter->port[i];
2471
2472                 if (netif_running(netdev)) {
2473                         if (cxgb_open(netdev)) {
2474                                 dev_err(&adapter->pdev->dev,
2475                                         "can't bring device back up"
2476                                         " after reset\n");
2477                                 continue;
2478                         }
2479                 }
2480         }
2481 }
2482
2483 /*
2484  * processes a fatal error.
2485  * Bring the ports down, reset the chip, bring the ports back up.
2486  */
2487 static void fatal_error_task(struct work_struct *work)
2488 {
2489         struct adapter *adapter = container_of(work, struct adapter,
2490                                                fatal_error_handler_task);
2491         int err = 0;
2492
2493         rtnl_lock();
2494         err = t3_adapter_error(adapter, 1);
2495         if (!err)
2496                 err = t3_reenable_adapter(adapter);
2497         if (!err)
2498                 t3_resume_ports(adapter);
2499
2500         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2501         rtnl_unlock();
2502 }
2503
2504 void t3_fatal_err(struct adapter *adapter)
2505 {
2506         unsigned int fw_status[4];
2507
2508         if (adapter->flags & FULL_INIT_DONE) {
2509                 t3_sge_stop(adapter);
2510                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2511                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2512                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2513                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2514
2515                 spin_lock(&adapter->work_lock);
2516                 t3_intr_disable(adapter);
2517                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2518                 spin_unlock(&adapter->work_lock);
2519         }
2520         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2521         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2522                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2523                          fw_status[0], fw_status[1],
2524                          fw_status[2], fw_status[3]);
2525
2526 }
2527
2528 /**
2529  * t3_io_error_detected - called when PCI error is detected
2530  * @pdev: Pointer to PCI device
2531  * @state: The current pci connection state
2532  *
2533  * This function is called after a PCI bus error affecting
2534  * this device has been detected.
2535  */
2536 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2537                                              pci_channel_state_t state)
2538 {
2539         struct adapter *adapter = pci_get_drvdata(pdev);
2540         int ret;
2541
2542         ret = t3_adapter_error(adapter, 0);
2543
2544         /* Request a slot reset. */
2545         return PCI_ERS_RESULT_NEED_RESET;
2546 }
2547
2548 /**
2549  * t3_io_slot_reset - called after the pci bus has been reset.
2550  * @pdev: Pointer to PCI device
2551  *
2552  * Restart the card from scratch, as if from a cold-boot.
2553  */
2554 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2555 {
2556         struct adapter *adapter = pci_get_drvdata(pdev);
2557
2558         if (!t3_reenable_adapter(adapter))
2559                 return PCI_ERS_RESULT_RECOVERED;
2560
2561         return PCI_ERS_RESULT_DISCONNECT;
2562 }
2563
2564 /**
2565  * t3_io_resume - called when traffic can start flowing again.
2566  * @pdev: Pointer to PCI device
2567  *
2568  * This callback is called when the error recovery driver tells us that
2569  * its OK to resume normal operation.
2570  */
2571 static void t3_io_resume(struct pci_dev *pdev)
2572 {
2573         struct adapter *adapter = pci_get_drvdata(pdev);
2574
2575         t3_resume_ports(adapter);
2576 }
2577
2578 static struct pci_error_handlers t3_err_handler = {
2579         .error_detected = t3_io_error_detected,
2580         .slot_reset = t3_io_slot_reset,
2581         .resume = t3_io_resume,
2582 };
2583
2584 static int __devinit cxgb_enable_msix(struct adapter *adap)
2585 {
2586         struct msix_entry entries[SGE_QSETS + 1];
2587         int i, err;
2588
2589         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2590                 entries[i].entry = i;
2591
2592         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2593         if (!err) {
2594                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2595                         adap->msix_info[i].vec = entries[i].vector;
2596         } else if (err > 0)
2597                 dev_info(&adap->pdev->dev,
2598                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2599         return err;
2600 }
2601
2602 static void __devinit print_port_info(struct adapter *adap,
2603                                       const struct adapter_info *ai)
2604 {
2605         static const char *pci_variant[] = {
2606                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2607         };
2608
2609         int i;
2610         char buf[80];
2611
2612         if (is_pcie(adap))
2613                 snprintf(buf, sizeof(buf), "%s x%d",
2614                          pci_variant[adap->params.pci.variant],
2615                          adap->params.pci.width);
2616         else
2617                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2618                          pci_variant[adap->params.pci.variant],
2619                          adap->params.pci.speed, adap->params.pci.width);
2620
2621         for_each_port(adap, i) {
2622                 struct net_device *dev = adap->port[i];
2623                 const struct port_info *pi = netdev_priv(dev);
2624
2625                 if (!test_bit(i, &adap->registered_device_map))
2626                         continue;
2627                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2628                        dev->name, ai->desc, pi->port_type->desc,
2629                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2630                        (adap->flags & USING_MSIX) ? " MSI-X" :
2631                        (adap->flags & USING_MSI) ? " MSI" : "");
2632                 if (adap->name == dev->name && adap->params.vpd.mclk)
2633                         printk(KERN_INFO
2634                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2635                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2636                                t3_mc7_size(&adap->pmtx) >> 20,
2637                                t3_mc7_size(&adap->pmrx) >> 20,
2638                                adap->params.vpd.sn);
2639         }
2640 }
2641
2642 static int __devinit init_one(struct pci_dev *pdev,
2643                               const struct pci_device_id *ent)
2644 {
2645         static int version_printed;
2646
2647         int i, err, pci_using_dac = 0;
2648         unsigned long mmio_start, mmio_len;
2649         const struct adapter_info *ai;
2650         struct adapter *adapter = NULL;
2651         struct port_info *pi;
2652
2653         if (!version_printed) {
2654                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2655                 ++version_printed;
2656         }
2657
2658         if (!cxgb3_wq) {
2659                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2660                 if (!cxgb3_wq) {
2661                         printk(KERN_ERR DRV_NAME
2662                                ": cannot initialize work queue\n");
2663                         return -ENOMEM;
2664                 }
2665         }
2666
2667         err = pci_request_regions(pdev, DRV_NAME);
2668         if (err) {
2669                 /* Just info, some other driver may have claimed the device. */
2670                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2671                 return err;
2672         }
2673
2674         err = pci_enable_device(pdev);
2675         if (err) {
2676                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2677                 goto out_release_regions;
2678         }
2679
2680         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2681                 pci_using_dac = 1;
2682                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2683                 if (err) {
2684                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2685                                "coherent allocations\n");
2686                         goto out_disable_device;
2687                 }
2688         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2689                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2690                 goto out_disable_device;
2691         }
2692
2693         pci_set_master(pdev);
2694         pci_save_state(pdev);
2695
2696         mmio_start = pci_resource_start(pdev, 0);
2697         mmio_len = pci_resource_len(pdev, 0);
2698         ai = t3_get_adapter_info(ent->driver_data);
2699
2700         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2701         if (!adapter) {
2702                 err = -ENOMEM;
2703                 goto out_disable_device;
2704         }
2705
2706         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2707         if (!adapter->regs) {
2708                 dev_err(&pdev->dev, "cannot map device registers\n");
2709                 err = -ENOMEM;
2710                 goto out_free_adapter;
2711         }
2712
2713         adapter->pdev = pdev;
2714         adapter->name = pci_name(pdev);
2715         adapter->msg_enable = dflt_msg_enable;
2716         adapter->mmio_len = mmio_len;
2717
2718         mutex_init(&adapter->mdio_lock);
2719         spin_lock_init(&adapter->work_lock);
2720         spin_lock_init(&adapter->stats_lock);
2721
2722         INIT_LIST_HEAD(&adapter->adapter_list);
2723         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2724         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2725         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2726
2727         for (i = 0; i < ai->nports; ++i) {
2728                 struct net_device *netdev;
2729
2730                 netdev = alloc_etherdev(sizeof(struct port_info));
2731                 if (!netdev) {
2732                         err = -ENOMEM;
2733                         goto out_free_dev;
2734                 }
2735
2736                 SET_NETDEV_DEV(netdev, &pdev->dev);
2737
2738                 adapter->port[i] = netdev;
2739                 pi = netdev_priv(netdev);
2740                 pi->adapter = adapter;
2741                 pi->rx_csum_offload = 1;
2742                 pi->nqsets = 1;
2743                 pi->first_qset = i;
2744                 pi->activity = 0;
2745                 pi->port_id = i;
2746                 netif_carrier_off(netdev);
2747                 netdev->irq = pdev->irq;
2748                 netdev->mem_start = mmio_start;
2749                 netdev->mem_end = mmio_start + mmio_len - 1;
2750                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2751                 netdev->features |= NETIF_F_LLTX;
2752                 if (pci_using_dac)
2753                         netdev->features |= NETIF_F_HIGHDMA;
2754
2755                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2756                 netdev->vlan_rx_register = vlan_rx_register;
2757
2758                 netdev->open = cxgb_open;
2759                 netdev->stop = cxgb_close;
2760                 netdev->hard_start_xmit = t3_eth_xmit;
2761                 netdev->get_stats = cxgb_get_stats;
2762                 netdev->set_multicast_list = cxgb_set_rxmode;
2763                 netdev->do_ioctl = cxgb_ioctl;
2764                 netdev->change_mtu = cxgb_change_mtu;
2765                 netdev->set_mac_address = cxgb_set_mac_addr;
2766 #ifdef CONFIG_NET_POLL_CONTROLLER
2767                 netdev->poll_controller = cxgb_netpoll;
2768 #endif
2769
2770                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2771         }
2772
2773         pci_set_drvdata(pdev, adapter);
2774         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2775                 err = -ENODEV;
2776                 goto out_free_dev;
2777         }
2778
2779         /*
2780          * The card is now ready to go.  If any errors occur during device
2781          * registration we do not fail the whole card but rather proceed only
2782          * with the ports we manage to register successfully.  However we must
2783          * register at least one net device.
2784          */
2785         for_each_port(adapter, i) {
2786                 err = register_netdev(adapter->port[i]);
2787                 if (err)
2788                         dev_warn(&pdev->dev,
2789                                  "cannot register net device %s, skipping\n",
2790                                  adapter->port[i]->name);
2791                 else {
2792                         /*
2793                          * Change the name we use for messages to the name of
2794                          * the first successfully registered interface.
2795                          */
2796                         if (!adapter->registered_device_map)
2797                                 adapter->name = adapter->port[i]->name;
2798
2799                         __set_bit(i, &adapter->registered_device_map);
2800                 }
2801         }
2802         if (!adapter->registered_device_map) {
2803                 dev_err(&pdev->dev, "could not register any net devices\n");
2804                 goto out_free_dev;
2805         }
2806
2807         /* Driver's ready. Reflect it on LEDs */
2808         t3_led_ready(adapter);
2809
2810         if (is_offload(adapter)) {
2811                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2812                 cxgb3_adapter_ofld(adapter);
2813         }
2814
2815         /* See what interrupts we'll be using */
2816         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2817                 adapter->flags |= USING_MSIX;
2818         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2819                 adapter->flags |= USING_MSI;
2820
2821         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2822                                  &cxgb3_attr_group);
2823
2824         print_port_info(adapter, ai);
2825         return 0;
2826
2827 out_free_dev:
2828         iounmap(adapter->regs);
2829         for (i = ai->nports - 1; i >= 0; --i)
2830                 if (adapter->port[i])
2831                         free_netdev(adapter->port[i]);
2832
2833 out_free_adapter:
2834         kfree(adapter);
2835
2836 out_disable_device:
2837         pci_disable_device(pdev);
2838 out_release_regions:
2839         pci_release_regions(pdev);
2840         pci_set_drvdata(pdev, NULL);
2841         return err;
2842 }
2843
2844 static void __devexit remove_one(struct pci_dev *pdev)
2845 {
2846         struct adapter *adapter = pci_get_drvdata(pdev);
2847
2848         if (adapter) {
2849                 int i;
2850
2851                 t3_sge_stop(adapter);
2852                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2853                                    &cxgb3_attr_group);
2854
2855                 if (is_offload(adapter)) {
2856                         cxgb3_adapter_unofld(adapter);
2857                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2858                                      &adapter->open_device_map))
2859                                 offload_close(&adapter->tdev);
2860                 }
2861
2862                 for_each_port(adapter, i)
2863                     if (test_bit(i, &adapter->registered_device_map))
2864                         unregister_netdev(adapter->port[i]);
2865
2866                 t3_stop_sge_timers(adapter);
2867                 t3_free_sge_resources(adapter);
2868                 cxgb_disable_msi(adapter);
2869
2870                 for_each_port(adapter, i)
2871                         if (adapter->port[i])
2872                                 free_netdev(adapter->port[i]);
2873
2874                 iounmap(adapter->regs);
2875                 kfree(adapter);
2876                 pci_release_regions(pdev);
2877                 pci_disable_device(pdev);
2878                 pci_set_drvdata(pdev, NULL);
2879         }
2880 }
2881
2882 static struct pci_driver driver = {
2883         .name = DRV_NAME,
2884         .id_table = cxgb3_pci_tbl,
2885         .probe = init_one,
2886         .remove = __devexit_p(remove_one),
2887         .err_handler = &t3_err_handler,
2888 };
2889
2890 static int __init cxgb3_init_module(void)
2891 {
2892         int ret;
2893
2894         cxgb3_offload_init();
2895
2896         ret = pci_register_driver(&driver);
2897         return ret;
2898 }
2899
2900 static void __exit cxgb3_cleanup_module(void)
2901 {
2902         pci_unregister_driver(&driver);
2903         if (cxgb3_wq)
2904                 destroy_workqueue(cxgb3_wq);
2905 }
2906
2907 module_init(cxgb3_init_module);
2908 module_exit(cxgb3_cleanup_module);