2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
177 struct net_device *dev = adap->port[port_id];
178 struct port_info *pi = netdev_priv(dev);
180 if (state == netif_carrier_ok(dev))
184 struct cmac *mac = &pi->mac;
186 netif_carrier_on(dev);
188 /* Clear local faults */
189 t3_xgm_intr_disable(adap, pi->port_id);
190 t3_read_reg(adap, A_XGM_INT_STATUS +
193 A_XGM_INT_CAUSE + pi->mac.offset,
196 t3_set_reg_field(adap,
199 F_XGM_INT, F_XGM_INT);
200 t3_xgm_intr_enable(adap, pi->port_id);
202 t3_mac_enable(mac, MAC_DIRECTION_TX);
204 netif_carrier_off(dev);
210 * t3_os_link_changed - handle link status changes
211 * @adapter: the adapter associated with the link change
212 * @port_id: the port index whose limk status has changed
213 * @link_stat: the new status of the link
214 * @speed: the new speed setting
215 * @duplex: the new duplex setting
216 * @pause: the new flow-control setting
218 * This is the OS-dependent handler for link status changes. The OS
219 * neutral handler takes care of most of the processing for these events,
220 * then calls this handler for any OS-specific processing.
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223 int speed, int duplex, int pause)
225 struct net_device *dev = adapter->port[port_id];
226 struct port_info *pi = netdev_priv(dev);
227 struct cmac *mac = &pi->mac;
229 /* Skip changes from disabled ports. */
230 if (!netif_running(dev))
233 if (link_stat != netif_carrier_ok(dev)) {
235 t3_mac_enable(mac, MAC_DIRECTION_RX);
237 /* Clear local faults */
238 t3_xgm_intr_disable(adapter, pi->port_id);
239 t3_read_reg(adapter, A_XGM_INT_STATUS +
241 t3_write_reg(adapter,
242 A_XGM_INT_CAUSE + pi->mac.offset,
245 t3_set_reg_field(adapter,
246 A_XGM_INT_ENABLE + pi->mac.offset,
247 F_XGM_INT, F_XGM_INT);
248 t3_xgm_intr_enable(adapter, pi->port_id);
250 netif_carrier_on(dev);
252 netif_carrier_off(dev);
254 t3_xgm_intr_disable(adapter, pi->port_id);
255 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256 t3_set_reg_field(adapter,
257 A_XGM_INT_ENABLE + pi->mac.offset,
261 pi->phy.ops->power_down(&pi->phy, 1);
263 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264 t3_mac_disable(mac, MAC_DIRECTION_RX);
265 t3_link_start(&pi->phy, mac, &pi->link_config);
273 * t3_os_phymod_changed - handle PHY module changes
274 * @phy: the PHY reporting the module change
275 * @mod_type: new module type
277 * This is the OS-dependent handler for PHY module changes. It is
278 * invoked when a PHY module is removed or inserted for any OS-specific
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
283 static const char *mod_str[] = {
284 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
287 const struct net_device *dev = adap->port[port_id];
288 const struct port_info *pi = netdev_priv(dev);
290 if (pi->phy.modtype == phy_modtype_none)
291 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
293 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294 mod_str[pi->phy.modtype]);
297 static void cxgb_set_rxmode(struct net_device *dev)
299 struct t3_rx_mode rm;
300 struct port_info *pi = netdev_priv(dev);
302 init_rx_mode(&rm, dev, dev->mc_list);
303 t3_mac_set_rx_mode(&pi->mac, &rm);
307 * link_start - enable a port
308 * @dev: the device to enable
310 * Performs the MAC and PHY actions needed to enable a port.
312 static void link_start(struct net_device *dev)
314 struct t3_rx_mode rm;
315 struct port_info *pi = netdev_priv(dev);
316 struct cmac *mac = &pi->mac;
318 init_rx_mode(&rm, dev, dev->mc_list);
320 t3_mac_set_mtu(mac, dev->mtu);
321 t3_mac_set_address(mac, 0, dev->dev_addr);
322 t3_mac_set_rx_mode(mac, &rm);
323 t3_link_start(&pi->phy, mac, &pi->link_config);
324 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
327 static inline void cxgb_disable_msi(struct adapter *adapter)
329 if (adapter->flags & USING_MSIX) {
330 pci_disable_msix(adapter->pdev);
331 adapter->flags &= ~USING_MSIX;
332 } else if (adapter->flags & USING_MSI) {
333 pci_disable_msi(adapter->pdev);
334 adapter->flags &= ~USING_MSI;
339 * Interrupt handler for asynchronous events used with MSI-X.
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
343 t3_slow_intr_handler(cookie);
348 * Name the MSI-X interrupts.
350 static void name_msix_vecs(struct adapter *adap)
352 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
354 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355 adap->msix_info[0].desc[n] = 0;
357 for_each_port(adap, j) {
358 struct net_device *d = adap->port[j];
359 const struct port_info *pi = netdev_priv(d);
361 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362 snprintf(adap->msix_info[msi_idx].desc, n,
363 "%s-%d", d->name, pi->first_qset + i);
364 adap->msix_info[msi_idx].desc[n] = 0;
369 static int request_msix_data_irqs(struct adapter *adap)
371 int i, j, err, qidx = 0;
373 for_each_port(adap, i) {
374 int nqsets = adap2pinfo(adap, i)->nqsets;
376 for (j = 0; j < nqsets; ++j) {
377 err = request_irq(adap->msix_info[qidx + 1].vec,
378 t3_intr_handler(adap,
381 adap->msix_info[qidx + 1].desc,
382 &adap->sge.qs[qidx]);
385 free_irq(adap->msix_info[qidx + 1].vec,
386 &adap->sge.qs[qidx]);
395 static void free_irq_resources(struct adapter *adapter)
397 if (adapter->flags & USING_MSIX) {
400 free_irq(adapter->msix_info[0].vec, adapter);
401 for_each_port(adapter, i)
402 n += adap2pinfo(adapter, i)->nqsets;
404 for (i = 0; i < n; ++i)
405 free_irq(adapter->msix_info[i + 1].vec,
406 &adapter->sge.qs[i]);
408 free_irq(adapter->pdev->irq, adapter);
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
416 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
424 static int init_tp_parity(struct adapter *adap)
428 struct cpl_set_tcb_field *greq;
429 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
431 t3_tp_set_offload_mode(adap, 1);
433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req;
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
438 skb = adap->nofail_skb;
442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443 memset(req, 0, sizeof(*req));
444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
456 for (i = 0; i < 2048; i++) {
457 struct cpl_l2t_write_req *req;
459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
461 skb = adap->nofail_skb;
465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466 memset(req, 0, sizeof(*req));
467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469 req->params = htonl(V_L2T_W_IDX(i));
470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
479 for (i = 0; i < 2048; i++) {
480 struct cpl_rte_write_req *req;
482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
484 skb = adap->nofail_skb;
488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489 memset(req, 0, sizeof(*req));
490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504 skb = adap->nofail_skb;
508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509 memset(greq, 0, sizeof(*greq));
510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512 greq->mask = cpu_to_be64(1);
513 t3_mgmt_tx(adap, skb);
515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
521 t3_tp_set_offload_mode(adap, 0);
525 t3_tp_set_offload_mode(adap, 0);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for two ports since the mapping
538 * table has plenty of entries.
540 static void setup_rss(struct adapter *adap)
543 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545 u8 cpus[SGE_QSETS + 1];
546 u16 rspq_map[RSS_TABLE_SIZE];
548 for (i = 0; i < SGE_QSETS; ++i)
550 cpus[SGE_QSETS] = 0xff; /* terminator */
552 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553 rspq_map[i] = i % nq0;
554 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
557 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
562 static void init_napi(struct adapter *adap)
566 for (i = 0; i < SGE_QSETS; i++) {
567 struct sge_qset *qs = &adap->sge.qs[i];
570 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
575 * netif_napi_add() can be called only once per napi_struct because it
576 * adds each new napi_struct to a list. Be careful not to call it a
577 * second time, e.g., during EEH recovery, by making a note of it.
579 adap->flags |= NAPI_INIT;
583 * Wait until all NAPI handlers are descheduled. This includes the handlers of
584 * both netdevices representing interfaces and the dummy ones for the extra
587 static void quiesce_rx(struct adapter *adap)
591 for (i = 0; i < SGE_QSETS; i++)
592 if (adap->sge.qs[i].adap)
593 napi_disable(&adap->sge.qs[i].napi);
596 static void enable_all_napi(struct adapter *adap)
599 for (i = 0; i < SGE_QSETS; i++)
600 if (adap->sge.qs[i].adap)
601 napi_enable(&adap->sge.qs[i].napi);
605 * set_qset_lro - Turn a queue set's LRO capability on and off
606 * @dev: the device the qset is attached to
607 * @qset_idx: the queue set index
608 * @val: the LRO switch
610 * Sets LRO on or off for a particular queue set.
611 * the device's features flag is updated to reflect the LRO
612 * capability when all queues belonging to the device are
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adapter = pi->adapter;
620 adapter->params.sge.qset[qset_idx].lro = !!val;
621 adapter->sge.qs[qset_idx].lro_enabled = !!val;
625 * setup_sge_qsets - configure SGE Tx/Rx/response queues
628 * Determines how many sets of SGE queues to use and initializes them.
629 * We support multiple queue sets per port if we have MSI-X, otherwise
630 * just one queue set per port.
632 static int setup_sge_qsets(struct adapter *adap)
634 int i, j, err, irq_idx = 0, qset_idx = 0;
635 unsigned int ntxq = SGE_TXQ_PER_SET;
637 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
640 for_each_port(adap, i) {
641 struct net_device *dev = adap->port[i];
642 struct port_info *pi = netdev_priv(dev);
644 pi->qs = &adap->sge.qs[pi->first_qset];
645 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
647 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
648 err = t3_sge_alloc_qset(adap, qset_idx, 1,
649 (adap->flags & USING_MSIX) ? qset_idx + 1 :
651 &adap->params.sge.qset[qset_idx], ntxq, dev,
652 netdev_get_tx_queue(dev, j));
654 t3_free_sge_resources(adap);
663 static ssize_t attr_show(struct device *d, char *buf,
664 ssize_t(*format) (struct net_device *, char *))
668 /* Synchronize with ioctls that may shut down the device */
670 len = (*format) (to_net_dev(d), buf);
675 static ssize_t attr_store(struct device *d,
676 const char *buf, size_t len,
677 ssize_t(*set) (struct net_device *, unsigned int),
678 unsigned int min_val, unsigned int max_val)
684 if (!capable(CAP_NET_ADMIN))
687 val = simple_strtoul(buf, &endp, 0);
688 if (endp == buf || val < min_val || val > max_val)
692 ret = (*set) (to_net_dev(d), val);
699 #define CXGB3_SHOW(name, val_expr) \
700 static ssize_t format_##name(struct net_device *dev, char *buf) \
702 struct port_info *pi = netdev_priv(dev); \
703 struct adapter *adap = pi->adapter; \
704 return sprintf(buf, "%u\n", val_expr); \
706 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
709 return attr_show(d, buf, format_##name); \
712 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
714 struct port_info *pi = netdev_priv(dev);
715 struct adapter *adap = pi->adapter;
716 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
718 if (adap->flags & FULL_INIT_DONE)
720 if (val && adap->params.rev == 0)
722 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
725 adap->params.mc5.nfilters = val;
729 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
730 const char *buf, size_t len)
732 return attr_store(d, buf, len, set_nfilters, 0, ~0);
735 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
740 if (adap->flags & FULL_INIT_DONE)
742 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
745 adap->params.mc5.nservers = val;
749 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
750 const char *buf, size_t len)
752 return attr_store(d, buf, len, set_nservers, 0, ~0);
755 #define CXGB3_ATTR_R(name, val_expr) \
756 CXGB3_SHOW(name, val_expr) \
757 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
759 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
760 CXGB3_SHOW(name, val_expr) \
761 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
763 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
764 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
765 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
767 static struct attribute *cxgb3_attrs[] = {
768 &dev_attr_cam_size.attr,
769 &dev_attr_nfilters.attr,
770 &dev_attr_nservers.attr,
774 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
776 static ssize_t tm_attr_show(struct device *d,
777 char *buf, int sched)
779 struct port_info *pi = netdev_priv(to_net_dev(d));
780 struct adapter *adap = pi->adapter;
781 unsigned int v, addr, bpt, cpt;
784 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
786 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
787 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
790 bpt = (v >> 8) & 0xff;
793 len = sprintf(buf, "disabled\n");
795 v = (adap->params.vpd.cclk * 1000) / cpt;
796 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
802 static ssize_t tm_attr_store(struct device *d,
803 const char *buf, size_t len, int sched)
805 struct port_info *pi = netdev_priv(to_net_dev(d));
806 struct adapter *adap = pi->adapter;
811 if (!capable(CAP_NET_ADMIN))
814 val = simple_strtoul(buf, &endp, 0);
815 if (endp == buf || val > 10000000)
819 ret = t3_config_sched(adap, val, sched);
826 #define TM_ATTR(name, sched) \
827 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
830 return tm_attr_show(d, buf, sched); \
832 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
833 const char *buf, size_t len) \
835 return tm_attr_store(d, buf, len, sched); \
837 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
848 static struct attribute *offload_attrs[] = {
849 &dev_attr_sched0.attr,
850 &dev_attr_sched1.attr,
851 &dev_attr_sched2.attr,
852 &dev_attr_sched3.attr,
853 &dev_attr_sched4.attr,
854 &dev_attr_sched5.attr,
855 &dev_attr_sched6.attr,
856 &dev_attr_sched7.attr,
860 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
863 * Sends an sk_buff to an offload queue driver
864 * after dealing with any active network taps.
866 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
871 ret = t3_offload_tx(tdev, skb);
876 static int write_smt_entry(struct adapter *adapter, int idx)
878 struct cpl_smt_write_req *req;
879 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
884 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
885 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
886 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
887 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
889 memset(req->src_mac1, 0, sizeof(req->src_mac1));
890 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
892 offload_tx(&adapter->tdev, skb);
896 static int init_smt(struct adapter *adapter)
900 for_each_port(adapter, i)
901 write_smt_entry(adapter, i);
905 static void init_port_mtus(struct adapter *adapter)
907 unsigned int mtus = adapter->port[0]->mtu;
909 if (adapter->port[1])
910 mtus |= adapter->port[1]->mtu << 16;
911 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
914 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
918 struct mngt_pktsched_wr *req;
921 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
923 skb = adap->nofail_skb;
927 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
928 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
929 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
935 ret = t3_mgmt_tx(adap, skb);
936 if (skb == adap->nofail_skb) {
937 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
939 if (!adap->nofail_skb)
946 static int bind_qsets(struct adapter *adap)
950 for_each_port(adap, i) {
951 const struct port_info *pi = adap2pinfo(adap, i);
953 for (j = 0; j < pi->nqsets; ++j) {
954 int ret = send_pktsched_cmd(adap, 1,
955 pi->first_qset + j, -1,
965 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
966 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
968 static int upgrade_fw(struct adapter *adap)
972 const struct firmware *fw;
973 struct device *dev = &adap->pdev->dev;
975 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
976 FW_VERSION_MINOR, FW_VERSION_MICRO);
977 ret = request_firmware(&fw, buf, dev);
979 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
983 ret = t3_load_fw(adap, fw->data, fw->size);
984 release_firmware(fw);
987 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
988 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
990 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
991 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
996 static inline char t3rev2char(struct adapter *adapter)
1000 switch(adapter->params.rev) {
1012 static int update_tpsram(struct adapter *adap)
1014 const struct firmware *tpsram;
1016 struct device *dev = &adap->pdev->dev;
1020 rev = t3rev2char(adap);
1024 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1025 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1027 ret = request_firmware(&tpsram, buf, dev);
1029 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1034 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1036 goto release_tpsram;
1038 ret = t3_set_proto_sram(adap, tpsram->data);
1041 "successful update of protocol engine "
1043 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1045 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1046 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1048 dev_err(dev, "loading protocol SRAM failed\n");
1051 release_firmware(tpsram);
1057 * cxgb_up - enable the adapter
1058 * @adapter: adapter being enabled
1060 * Called when the first port is enabled, this function performs the
1061 * actions necessary to make an adapter operational, such as completing
1062 * the initialization of HW modules, and enabling interrupts.
1064 * Must be called with the rtnl lock held.
1066 static int cxgb_up(struct adapter *adap)
1070 if (!(adap->flags & FULL_INIT_DONE)) {
1071 err = t3_check_fw_version(adap);
1072 if (err == -EINVAL) {
1073 err = upgrade_fw(adap);
1074 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1075 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1076 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1079 err = t3_check_tpsram_version(adap);
1080 if (err == -EINVAL) {
1081 err = update_tpsram(adap);
1082 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1083 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1084 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1088 * Clear interrupts now to catch errors if t3_init_hw fails.
1089 * We clear them again later as initialization may trigger
1090 * conditions that can interrupt.
1092 t3_intr_clear(adap);
1094 err = t3_init_hw(adap, 0);
1098 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1099 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1101 err = setup_sge_qsets(adap);
1106 if (!(adap->flags & NAPI_INIT))
1109 t3_start_sge_timers(adap);
1110 adap->flags |= FULL_INIT_DONE;
1113 t3_intr_clear(adap);
1115 if (adap->flags & USING_MSIX) {
1116 name_msix_vecs(adap);
1117 err = request_irq(adap->msix_info[0].vec,
1118 t3_async_intr_handler, 0,
1119 adap->msix_info[0].desc, adap);
1123 err = request_msix_data_irqs(adap);
1125 free_irq(adap->msix_info[0].vec, adap);
1128 } else if ((err = request_irq(adap->pdev->irq,
1129 t3_intr_handler(adap,
1130 adap->sge.qs[0].rspq.
1132 (adap->flags & USING_MSI) ?
1137 enable_all_napi(adap);
1139 t3_intr_enable(adap);
1141 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1142 is_offload(adap) && init_tp_parity(adap) == 0)
1143 adap->flags |= TP_PARITY_INIT;
1145 if (adap->flags & TP_PARITY_INIT) {
1146 t3_write_reg(adap, A_TP_INT_CAUSE,
1147 F_CMCACHEPERR | F_ARPLUTPERR);
1148 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1151 if (!(adap->flags & QUEUES_BOUND)) {
1152 err = bind_qsets(adap);
1154 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1155 t3_intr_disable(adap);
1156 free_irq_resources(adap);
1159 adap->flags |= QUEUES_BOUND;
1165 CH_ERR(adap, "request_irq failed, err %d\n", err);
1170 * Release resources when all the ports and offloading have been stopped.
1172 static void cxgb_down(struct adapter *adapter)
1174 t3_sge_stop(adapter);
1175 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1176 t3_intr_disable(adapter);
1177 spin_unlock_irq(&adapter->work_lock);
1179 free_irq_resources(adapter);
1180 quiesce_rx(adapter);
1181 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1184 static void schedule_chk_task(struct adapter *adap)
1188 timeo = adap->params.linkpoll_period ?
1189 (HZ * adap->params.linkpoll_period) / 10 :
1190 adap->params.stats_update_period * HZ;
1192 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1195 static int offload_open(struct net_device *dev)
1197 struct port_info *pi = netdev_priv(dev);
1198 struct adapter *adapter = pi->adapter;
1199 struct t3cdev *tdev = dev2t3cdev(dev);
1200 int adap_up = adapter->open_device_map & PORT_MASK;
1203 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1206 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1209 t3_tp_set_offload_mode(adapter, 1);
1210 tdev->lldev = adapter->port[0];
1211 err = cxgb3_offload_activate(adapter);
1215 init_port_mtus(adapter);
1216 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1217 adapter->params.b_wnd,
1218 adapter->params.rev == 0 ?
1219 adapter->port[0]->mtu : 0xffff);
1222 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1223 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1225 /* Call back all registered clients */
1226 cxgb3_add_clients(tdev);
1229 /* restore them in case the offload module has changed them */
1231 t3_tp_set_offload_mode(adapter, 0);
1232 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1233 cxgb3_set_dummy_ops(tdev);
1238 static int offload_close(struct t3cdev *tdev)
1240 struct adapter *adapter = tdev2adap(tdev);
1242 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1245 /* Call back all registered clients */
1246 cxgb3_remove_clients(tdev);
1248 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1250 /* Flush work scheduled while releasing TIDs */
1251 flush_scheduled_work();
1254 cxgb3_set_dummy_ops(tdev);
1255 t3_tp_set_offload_mode(adapter, 0);
1256 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1258 if (!adapter->open_device_map)
1261 cxgb3_offload_deactivate(adapter);
1265 static int cxgb_open(struct net_device *dev)
1267 struct port_info *pi = netdev_priv(dev);
1268 struct adapter *adapter = pi->adapter;
1269 int other_ports = adapter->open_device_map & PORT_MASK;
1272 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1275 set_bit(pi->port_id, &adapter->open_device_map);
1276 if (is_offload(adapter) && !ofld_disable) {
1277 err = offload_open(dev);
1280 "Could not initialize offload capabilities\n");
1283 dev->real_num_tx_queues = pi->nqsets;
1285 t3_port_intr_enable(adapter, pi->port_id);
1286 netif_tx_start_all_queues(dev);
1288 schedule_chk_task(adapter);
1293 static int cxgb_close(struct net_device *dev)
1295 struct port_info *pi = netdev_priv(dev);
1296 struct adapter *adapter = pi->adapter;
1299 if (!adapter->open_device_map)
1302 /* Stop link fault interrupts */
1303 t3_xgm_intr_disable(adapter, pi->port_id);
1304 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1306 t3_port_intr_disable(adapter, pi->port_id);
1307 netif_tx_stop_all_queues(dev);
1308 pi->phy.ops->power_down(&pi->phy, 1);
1309 netif_carrier_off(dev);
1310 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1312 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1313 clear_bit(pi->port_id, &adapter->open_device_map);
1314 spin_unlock_irq(&adapter->work_lock);
1316 if (!(adapter->open_device_map & PORT_MASK))
1317 cancel_delayed_work_sync(&adapter->adap_check_task);
1319 if (!adapter->open_device_map)
1325 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1327 struct port_info *pi = netdev_priv(dev);
1328 struct adapter *adapter = pi->adapter;
1329 struct net_device_stats *ns = &pi->netstats;
1330 const struct mac_stats *pstats;
1332 spin_lock(&adapter->stats_lock);
1333 pstats = t3_mac_update_stats(&pi->mac);
1334 spin_unlock(&adapter->stats_lock);
1336 ns->tx_bytes = pstats->tx_octets;
1337 ns->tx_packets = pstats->tx_frames;
1338 ns->rx_bytes = pstats->rx_octets;
1339 ns->rx_packets = pstats->rx_frames;
1340 ns->multicast = pstats->rx_mcast_frames;
1342 ns->tx_errors = pstats->tx_underrun;
1343 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1344 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1345 pstats->rx_fifo_ovfl;
1347 /* detailed rx_errors */
1348 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1349 ns->rx_over_errors = 0;
1350 ns->rx_crc_errors = pstats->rx_fcs_errs;
1351 ns->rx_frame_errors = pstats->rx_symbol_errs;
1352 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1353 ns->rx_missed_errors = pstats->rx_cong_drops;
1355 /* detailed tx_errors */
1356 ns->tx_aborted_errors = 0;
1357 ns->tx_carrier_errors = 0;
1358 ns->tx_fifo_errors = pstats->tx_underrun;
1359 ns->tx_heartbeat_errors = 0;
1360 ns->tx_window_errors = 0;
1364 static u32 get_msglevel(struct net_device *dev)
1366 struct port_info *pi = netdev_priv(dev);
1367 struct adapter *adapter = pi->adapter;
1369 return adapter->msg_enable;
1372 static void set_msglevel(struct net_device *dev, u32 val)
1374 struct port_info *pi = netdev_priv(dev);
1375 struct adapter *adapter = pi->adapter;
1377 adapter->msg_enable = val;
1380 static char stats_strings[][ETH_GSTRING_LEN] = {
1383 "TxMulticastFramesOK",
1384 "TxBroadcastFramesOK",
1391 "TxFrames128To255 ",
1392 "TxFrames256To511 ",
1393 "TxFrames512To1023 ",
1394 "TxFrames1024To1518 ",
1395 "TxFrames1519ToMax ",
1399 "RxMulticastFramesOK",
1400 "RxBroadcastFramesOK",
1411 "RxFrames128To255 ",
1412 "RxFrames256To511 ",
1413 "RxFrames512To1023 ",
1414 "RxFrames1024To1518 ",
1415 "RxFrames1519ToMax ",
1428 "CheckTXEnToggled ",
1434 static int get_sset_count(struct net_device *dev, int sset)
1438 return ARRAY_SIZE(stats_strings);
1444 #define T3_REGMAP_SIZE (3 * 1024)
1446 static int get_regs_len(struct net_device *dev)
1448 return T3_REGMAP_SIZE;
1451 static int get_eeprom_len(struct net_device *dev)
1456 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1458 struct port_info *pi = netdev_priv(dev);
1459 struct adapter *adapter = pi->adapter;
1463 spin_lock(&adapter->stats_lock);
1464 t3_get_fw_version(adapter, &fw_vers);
1465 t3_get_tp_version(adapter, &tp_vers);
1466 spin_unlock(&adapter->stats_lock);
1468 strcpy(info->driver, DRV_NAME);
1469 strcpy(info->version, DRV_VERSION);
1470 strcpy(info->bus_info, pci_name(adapter->pdev));
1472 strcpy(info->fw_version, "N/A");
1474 snprintf(info->fw_version, sizeof(info->fw_version),
1475 "%s %u.%u.%u TP %u.%u.%u",
1476 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1477 G_FW_VERSION_MAJOR(fw_vers),
1478 G_FW_VERSION_MINOR(fw_vers),
1479 G_FW_VERSION_MICRO(fw_vers),
1480 G_TP_VERSION_MAJOR(tp_vers),
1481 G_TP_VERSION_MINOR(tp_vers),
1482 G_TP_VERSION_MICRO(tp_vers));
1486 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1488 if (stringset == ETH_SS_STATS)
1489 memcpy(data, stats_strings, sizeof(stats_strings));
1492 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1493 struct port_info *p, int idx)
1496 unsigned long tot = 0;
1498 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1499 tot += adapter->sge.qs[i].port_stats[idx];
1503 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1506 struct port_info *pi = netdev_priv(dev);
1507 struct adapter *adapter = pi->adapter;
1508 const struct mac_stats *s;
1510 spin_lock(&adapter->stats_lock);
1511 s = t3_mac_update_stats(&pi->mac);
1512 spin_unlock(&adapter->stats_lock);
1514 *data++ = s->tx_octets;
1515 *data++ = s->tx_frames;
1516 *data++ = s->tx_mcast_frames;
1517 *data++ = s->tx_bcast_frames;
1518 *data++ = s->tx_pause;
1519 *data++ = s->tx_underrun;
1520 *data++ = s->tx_fifo_urun;
1522 *data++ = s->tx_frames_64;
1523 *data++ = s->tx_frames_65_127;
1524 *data++ = s->tx_frames_128_255;
1525 *data++ = s->tx_frames_256_511;
1526 *data++ = s->tx_frames_512_1023;
1527 *data++ = s->tx_frames_1024_1518;
1528 *data++ = s->tx_frames_1519_max;
1530 *data++ = s->rx_octets;
1531 *data++ = s->rx_frames;
1532 *data++ = s->rx_mcast_frames;
1533 *data++ = s->rx_bcast_frames;
1534 *data++ = s->rx_pause;
1535 *data++ = s->rx_fcs_errs;
1536 *data++ = s->rx_symbol_errs;
1537 *data++ = s->rx_short;
1538 *data++ = s->rx_jabber;
1539 *data++ = s->rx_too_long;
1540 *data++ = s->rx_fifo_ovfl;
1542 *data++ = s->rx_frames_64;
1543 *data++ = s->rx_frames_65_127;
1544 *data++ = s->rx_frames_128_255;
1545 *data++ = s->rx_frames_256_511;
1546 *data++ = s->rx_frames_512_1023;
1547 *data++ = s->rx_frames_1024_1518;
1548 *data++ = s->rx_frames_1519_max;
1550 *data++ = pi->phy.fifo_errors;
1552 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1553 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1554 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1555 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1556 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1560 *data++ = s->rx_cong_drops;
1562 *data++ = s->num_toggled;
1563 *data++ = s->num_resets;
1565 *data++ = s->link_faults;
1568 static inline void reg_block_dump(struct adapter *ap, void *buf,
1569 unsigned int start, unsigned int end)
1571 u32 *p = buf + start;
1573 for (; start <= end; start += sizeof(u32))
1574 *p++ = t3_read_reg(ap, start);
1577 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1580 struct port_info *pi = netdev_priv(dev);
1581 struct adapter *ap = pi->adapter;
1585 * bits 0..9: chip version
1586 * bits 10..15: chip revision
1587 * bit 31: set for PCIe cards
1589 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1592 * We skip the MAC statistics registers because they are clear-on-read.
1593 * Also reading multi-register stats would need to synchronize with the
1594 * periodic mac stats accumulation. Hard to justify the complexity.
1596 memset(buf, 0, T3_REGMAP_SIZE);
1597 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1598 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1599 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1600 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1601 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1602 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1603 XGM_REG(A_XGM_SERDES_STAT3, 1));
1604 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1605 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1608 static int restart_autoneg(struct net_device *dev)
1610 struct port_info *p = netdev_priv(dev);
1612 if (!netif_running(dev))
1614 if (p->link_config.autoneg != AUTONEG_ENABLE)
1616 p->phy.ops->autoneg_restart(&p->phy);
1620 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1622 struct port_info *pi = netdev_priv(dev);
1623 struct adapter *adapter = pi->adapter;
1629 for (i = 0; i < data * 2; i++) {
1630 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1631 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1632 if (msleep_interruptible(500))
1635 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1640 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1642 struct port_info *p = netdev_priv(dev);
1644 cmd->supported = p->link_config.supported;
1645 cmd->advertising = p->link_config.advertising;
1647 if (netif_carrier_ok(dev)) {
1648 cmd->speed = p->link_config.speed;
1649 cmd->duplex = p->link_config.duplex;
1655 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1656 cmd->phy_address = p->phy.mdio.prtad;
1657 cmd->transceiver = XCVR_EXTERNAL;
1658 cmd->autoneg = p->link_config.autoneg;
1664 static int speed_duplex_to_caps(int speed, int duplex)
1670 if (duplex == DUPLEX_FULL)
1671 cap = SUPPORTED_10baseT_Full;
1673 cap = SUPPORTED_10baseT_Half;
1676 if (duplex == DUPLEX_FULL)
1677 cap = SUPPORTED_100baseT_Full;
1679 cap = SUPPORTED_100baseT_Half;
1682 if (duplex == DUPLEX_FULL)
1683 cap = SUPPORTED_1000baseT_Full;
1685 cap = SUPPORTED_1000baseT_Half;
1688 if (duplex == DUPLEX_FULL)
1689 cap = SUPPORTED_10000baseT_Full;
1694 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1695 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1696 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1697 ADVERTISED_10000baseT_Full)
1699 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1701 struct port_info *p = netdev_priv(dev);
1702 struct link_config *lc = &p->link_config;
1704 if (!(lc->supported & SUPPORTED_Autoneg)) {
1706 * PHY offers a single speed/duplex. See if that's what's
1709 if (cmd->autoneg == AUTONEG_DISABLE) {
1710 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1711 if (lc->supported & cap)
1717 if (cmd->autoneg == AUTONEG_DISABLE) {
1718 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1720 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1722 lc->requested_speed = cmd->speed;
1723 lc->requested_duplex = cmd->duplex;
1724 lc->advertising = 0;
1726 cmd->advertising &= ADVERTISED_MASK;
1727 cmd->advertising &= lc->supported;
1728 if (!cmd->advertising)
1730 lc->requested_speed = SPEED_INVALID;
1731 lc->requested_duplex = DUPLEX_INVALID;
1732 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1734 lc->autoneg = cmd->autoneg;
1735 if (netif_running(dev))
1736 t3_link_start(&p->phy, &p->mac, lc);
1740 static void get_pauseparam(struct net_device *dev,
1741 struct ethtool_pauseparam *epause)
1743 struct port_info *p = netdev_priv(dev);
1745 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1746 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1747 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1750 static int set_pauseparam(struct net_device *dev,
1751 struct ethtool_pauseparam *epause)
1753 struct port_info *p = netdev_priv(dev);
1754 struct link_config *lc = &p->link_config;
1756 if (epause->autoneg == AUTONEG_DISABLE)
1757 lc->requested_fc = 0;
1758 else if (lc->supported & SUPPORTED_Autoneg)
1759 lc->requested_fc = PAUSE_AUTONEG;
1763 if (epause->rx_pause)
1764 lc->requested_fc |= PAUSE_RX;
1765 if (epause->tx_pause)
1766 lc->requested_fc |= PAUSE_TX;
1767 if (lc->autoneg == AUTONEG_ENABLE) {
1768 if (netif_running(dev))
1769 t3_link_start(&p->phy, &p->mac, lc);
1771 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1772 if (netif_running(dev))
1773 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1778 static u32 get_rx_csum(struct net_device *dev)
1780 struct port_info *p = netdev_priv(dev);
1782 return p->rx_offload & T3_RX_CSUM;
1785 static int set_rx_csum(struct net_device *dev, u32 data)
1787 struct port_info *p = netdev_priv(dev);
1790 p->rx_offload |= T3_RX_CSUM;
1794 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1795 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1796 set_qset_lro(dev, i, 0);
1801 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1803 struct port_info *pi = netdev_priv(dev);
1804 struct adapter *adapter = pi->adapter;
1805 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1807 e->rx_max_pending = MAX_RX_BUFFERS;
1808 e->rx_mini_max_pending = 0;
1809 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1810 e->tx_max_pending = MAX_TXQ_ENTRIES;
1812 e->rx_pending = q->fl_size;
1813 e->rx_mini_pending = q->rspq_size;
1814 e->rx_jumbo_pending = q->jumbo_size;
1815 e->tx_pending = q->txq_size[0];
1818 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1820 struct port_info *pi = netdev_priv(dev);
1821 struct adapter *adapter = pi->adapter;
1822 struct qset_params *q;
1825 if (e->rx_pending > MAX_RX_BUFFERS ||
1826 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1827 e->tx_pending > MAX_TXQ_ENTRIES ||
1828 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1829 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1830 e->rx_pending < MIN_FL_ENTRIES ||
1831 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1832 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1835 if (adapter->flags & FULL_INIT_DONE)
1838 q = &adapter->params.sge.qset[pi->first_qset];
1839 for (i = 0; i < pi->nqsets; ++i, ++q) {
1840 q->rspq_size = e->rx_mini_pending;
1841 q->fl_size = e->rx_pending;
1842 q->jumbo_size = e->rx_jumbo_pending;
1843 q->txq_size[0] = e->tx_pending;
1844 q->txq_size[1] = e->tx_pending;
1845 q->txq_size[2] = e->tx_pending;
1850 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1852 struct port_info *pi = netdev_priv(dev);
1853 struct adapter *adapter = pi->adapter;
1854 struct qset_params *qsp = &adapter->params.sge.qset[0];
1855 struct sge_qset *qs = &adapter->sge.qs[0];
1857 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1860 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1861 t3_update_qset_coalesce(qs, qsp);
1865 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1867 struct port_info *pi = netdev_priv(dev);
1868 struct adapter *adapter = pi->adapter;
1869 struct qset_params *q = adapter->params.sge.qset;
1871 c->rx_coalesce_usecs = q->coalesce_usecs;
1875 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1878 struct port_info *pi = netdev_priv(dev);
1879 struct adapter *adapter = pi->adapter;
1882 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1886 e->magic = EEPROM_MAGIC;
1887 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1888 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1891 memcpy(data, buf + e->offset, e->len);
1896 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1899 struct port_info *pi = netdev_priv(dev);
1900 struct adapter *adapter = pi->adapter;
1901 u32 aligned_offset, aligned_len;
1906 if (eeprom->magic != EEPROM_MAGIC)
1909 aligned_offset = eeprom->offset & ~3;
1910 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1912 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1913 buf = kmalloc(aligned_len, GFP_KERNEL);
1916 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1917 if (!err && aligned_len > 4)
1918 err = t3_seeprom_read(adapter,
1919 aligned_offset + aligned_len - 4,
1920 (__le32 *) & buf[aligned_len - 4]);
1923 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1927 err = t3_seeprom_wp(adapter, 0);
1931 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1932 err = t3_seeprom_write(adapter, aligned_offset, *p);
1933 aligned_offset += 4;
1937 err = t3_seeprom_wp(adapter, 1);
1944 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1948 memset(&wol->sopass, 0, sizeof(wol->sopass));
1951 static const struct ethtool_ops cxgb_ethtool_ops = {
1952 .get_settings = get_settings,
1953 .set_settings = set_settings,
1954 .get_drvinfo = get_drvinfo,
1955 .get_msglevel = get_msglevel,
1956 .set_msglevel = set_msglevel,
1957 .get_ringparam = get_sge_param,
1958 .set_ringparam = set_sge_param,
1959 .get_coalesce = get_coalesce,
1960 .set_coalesce = set_coalesce,
1961 .get_eeprom_len = get_eeprom_len,
1962 .get_eeprom = get_eeprom,
1963 .set_eeprom = set_eeprom,
1964 .get_pauseparam = get_pauseparam,
1965 .set_pauseparam = set_pauseparam,
1966 .get_rx_csum = get_rx_csum,
1967 .set_rx_csum = set_rx_csum,
1968 .set_tx_csum = ethtool_op_set_tx_csum,
1969 .set_sg = ethtool_op_set_sg,
1970 .get_link = ethtool_op_get_link,
1971 .get_strings = get_strings,
1972 .phys_id = cxgb3_phys_id,
1973 .nway_reset = restart_autoneg,
1974 .get_sset_count = get_sset_count,
1975 .get_ethtool_stats = get_stats,
1976 .get_regs_len = get_regs_len,
1977 .get_regs = get_regs,
1979 .set_tso = ethtool_op_set_tso,
1982 static int in_range(int val, int lo, int hi)
1984 return val < 0 || (val <= hi && val >= lo);
1987 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1989 struct port_info *pi = netdev_priv(dev);
1990 struct adapter *adapter = pi->adapter;
1994 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1998 case CHELSIO_SET_QSET_PARAMS:{
2000 struct qset_params *q;
2001 struct ch_qset_params t;
2002 int q1 = pi->first_qset;
2003 int nqsets = pi->nqsets;
2005 if (!capable(CAP_NET_ADMIN))
2007 if (copy_from_user(&t, useraddr, sizeof(t)))
2009 if (t.qset_idx >= SGE_QSETS)
2011 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2012 !in_range(t.cong_thres, 0, 255) ||
2013 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2015 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2017 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2018 MAX_CTRL_TXQ_ENTRIES) ||
2019 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2021 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2022 MAX_RX_JUMBO_BUFFERS)
2023 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2027 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2028 for_each_port(adapter, i) {
2029 pi = adap2pinfo(adapter, i);
2030 if (t.qset_idx >= pi->first_qset &&
2031 t.qset_idx < pi->first_qset + pi->nqsets &&
2032 !(pi->rx_offload & T3_RX_CSUM))
2036 if ((adapter->flags & FULL_INIT_DONE) &&
2037 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2038 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2039 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2040 t.polling >= 0 || t.cong_thres >= 0))
2043 /* Allow setting of any available qset when offload enabled */
2044 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2046 for_each_port(adapter, i) {
2047 pi = adap2pinfo(adapter, i);
2048 nqsets += pi->first_qset + pi->nqsets;
2052 if (t.qset_idx < q1)
2054 if (t.qset_idx > q1 + nqsets - 1)
2057 q = &adapter->params.sge.qset[t.qset_idx];
2059 if (t.rspq_size >= 0)
2060 q->rspq_size = t.rspq_size;
2061 if (t.fl_size[0] >= 0)
2062 q->fl_size = t.fl_size[0];
2063 if (t.fl_size[1] >= 0)
2064 q->jumbo_size = t.fl_size[1];
2065 if (t.txq_size[0] >= 0)
2066 q->txq_size[0] = t.txq_size[0];
2067 if (t.txq_size[1] >= 0)
2068 q->txq_size[1] = t.txq_size[1];
2069 if (t.txq_size[2] >= 0)
2070 q->txq_size[2] = t.txq_size[2];
2071 if (t.cong_thres >= 0)
2072 q->cong_thres = t.cong_thres;
2073 if (t.intr_lat >= 0) {
2074 struct sge_qset *qs =
2075 &adapter->sge.qs[t.qset_idx];
2077 q->coalesce_usecs = t.intr_lat;
2078 t3_update_qset_coalesce(qs, q);
2080 if (t.polling >= 0) {
2081 if (adapter->flags & USING_MSIX)
2082 q->polling = t.polling;
2084 /* No polling with INTx for T3A */
2085 if (adapter->params.rev == 0 &&
2086 !(adapter->flags & USING_MSI))
2089 for (i = 0; i < SGE_QSETS; i++) {
2090 q = &adapter->params.sge.
2092 q->polling = t.polling;
2097 set_qset_lro(dev, t.qset_idx, t.lro);
2101 case CHELSIO_GET_QSET_PARAMS:{
2102 struct qset_params *q;
2103 struct ch_qset_params t;
2104 int q1 = pi->first_qset;
2105 int nqsets = pi->nqsets;
2108 if (copy_from_user(&t, useraddr, sizeof(t)))
2111 /* Display qsets for all ports when offload enabled */
2112 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2114 for_each_port(adapter, i) {
2115 pi = adap2pinfo(adapter, i);
2116 nqsets = pi->first_qset + pi->nqsets;
2120 if (t.qset_idx >= nqsets)
2123 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2124 t.rspq_size = q->rspq_size;
2125 t.txq_size[0] = q->txq_size[0];
2126 t.txq_size[1] = q->txq_size[1];
2127 t.txq_size[2] = q->txq_size[2];
2128 t.fl_size[0] = q->fl_size;
2129 t.fl_size[1] = q->jumbo_size;
2130 t.polling = q->polling;
2132 t.intr_lat = q->coalesce_usecs;
2133 t.cong_thres = q->cong_thres;
2136 if (adapter->flags & USING_MSIX)
2137 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2139 t.vector = adapter->pdev->irq;
2141 if (copy_to_user(useraddr, &t, sizeof(t)))
2145 case CHELSIO_SET_QSET_NUM:{
2146 struct ch_reg edata;
2147 unsigned int i, first_qset = 0, other_qsets = 0;
2149 if (!capable(CAP_NET_ADMIN))
2151 if (adapter->flags & FULL_INIT_DONE)
2153 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2155 if (edata.val < 1 ||
2156 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2159 for_each_port(adapter, i)
2160 if (adapter->port[i] && adapter->port[i] != dev)
2161 other_qsets += adap2pinfo(adapter, i)->nqsets;
2163 if (edata.val + other_qsets > SGE_QSETS)
2166 pi->nqsets = edata.val;
2168 for_each_port(adapter, i)
2169 if (adapter->port[i]) {
2170 pi = adap2pinfo(adapter, i);
2171 pi->first_qset = first_qset;
2172 first_qset += pi->nqsets;
2176 case CHELSIO_GET_QSET_NUM:{
2177 struct ch_reg edata;
2179 edata.cmd = CHELSIO_GET_QSET_NUM;
2180 edata.val = pi->nqsets;
2181 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2185 case CHELSIO_LOAD_FW:{
2187 struct ch_mem_range t;
2189 if (!capable(CAP_SYS_RAWIO))
2191 if (copy_from_user(&t, useraddr, sizeof(t)))
2193 /* Check t.len sanity ? */
2194 fw_data = kmalloc(t.len, GFP_KERNEL);
2199 (fw_data, useraddr + sizeof(t), t.len)) {
2204 ret = t3_load_fw(adapter, fw_data, t.len);
2210 case CHELSIO_SETMTUTAB:{
2214 if (!is_offload(adapter))
2216 if (!capable(CAP_NET_ADMIN))
2218 if (offload_running(adapter))
2220 if (copy_from_user(&m, useraddr, sizeof(m)))
2222 if (m.nmtus != NMTUS)
2224 if (m.mtus[0] < 81) /* accommodate SACK */
2227 /* MTUs must be in ascending order */
2228 for (i = 1; i < NMTUS; ++i)
2229 if (m.mtus[i] < m.mtus[i - 1])
2232 memcpy(adapter->params.mtus, m.mtus,
2233 sizeof(adapter->params.mtus));
2236 case CHELSIO_GET_PM:{
2237 struct tp_params *p = &adapter->params.tp;
2238 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2240 if (!is_offload(adapter))
2242 m.tx_pg_sz = p->tx_pg_size;
2243 m.tx_num_pg = p->tx_num_pgs;
2244 m.rx_pg_sz = p->rx_pg_size;
2245 m.rx_num_pg = p->rx_num_pgs;
2246 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2247 if (copy_to_user(useraddr, &m, sizeof(m)))
2251 case CHELSIO_SET_PM:{
2253 struct tp_params *p = &adapter->params.tp;
2255 if (!is_offload(adapter))
2257 if (!capable(CAP_NET_ADMIN))
2259 if (adapter->flags & FULL_INIT_DONE)
2261 if (copy_from_user(&m, useraddr, sizeof(m)))
2263 if (!is_power_of_2(m.rx_pg_sz) ||
2264 !is_power_of_2(m.tx_pg_sz))
2265 return -EINVAL; /* not power of 2 */
2266 if (!(m.rx_pg_sz & 0x14000))
2267 return -EINVAL; /* not 16KB or 64KB */
2268 if (!(m.tx_pg_sz & 0x1554000))
2270 if (m.tx_num_pg == -1)
2271 m.tx_num_pg = p->tx_num_pgs;
2272 if (m.rx_num_pg == -1)
2273 m.rx_num_pg = p->rx_num_pgs;
2274 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2276 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2277 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2279 p->rx_pg_size = m.rx_pg_sz;
2280 p->tx_pg_size = m.tx_pg_sz;
2281 p->rx_num_pgs = m.rx_num_pg;
2282 p->tx_num_pgs = m.tx_num_pg;
2285 case CHELSIO_GET_MEM:{
2286 struct ch_mem_range t;
2290 if (!is_offload(adapter))
2292 if (!(adapter->flags & FULL_INIT_DONE))
2293 return -EIO; /* need the memory controllers */
2294 if (copy_from_user(&t, useraddr, sizeof(t)))
2296 if ((t.addr & 7) || (t.len & 7))
2298 if (t.mem_id == MEM_CM)
2300 else if (t.mem_id == MEM_PMRX)
2301 mem = &adapter->pmrx;
2302 else if (t.mem_id == MEM_PMTX)
2303 mem = &adapter->pmtx;
2309 * bits 0..9: chip version
2310 * bits 10..15: chip revision
2312 t.version = 3 | (adapter->params.rev << 10);
2313 if (copy_to_user(useraddr, &t, sizeof(t)))
2317 * Read 256 bytes at a time as len can be large and we don't
2318 * want to use huge intermediate buffers.
2320 useraddr += sizeof(t); /* advance to start of buffer */
2322 unsigned int chunk =
2323 min_t(unsigned int, t.len, sizeof(buf));
2326 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2330 if (copy_to_user(useraddr, buf, chunk))
2338 case CHELSIO_SET_TRACE_FILTER:{
2340 const struct trace_params *tp;
2342 if (!capable(CAP_NET_ADMIN))
2344 if (!offload_running(adapter))
2346 if (copy_from_user(&t, useraddr, sizeof(t)))
2349 tp = (const struct trace_params *)&t.sip;
2351 t3_config_trace_filter(adapter, tp, 0,
2355 t3_config_trace_filter(adapter, tp, 1,
2366 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2368 struct mii_ioctl_data *data = if_mii(req);
2369 struct port_info *pi = netdev_priv(dev);
2370 struct adapter *adapter = pi->adapter;
2375 /* Convert phy_id from older PRTAD/DEVAD format */
2376 if (is_10G(adapter) &&
2377 !mdio_phy_id_is_c45(data->phy_id) &&
2378 (data->phy_id & 0x1f00) &&
2379 !(data->phy_id & 0xe0e0))
2380 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2381 data->phy_id & 0x1f);
2384 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2386 return cxgb_extension_ioctl(dev, req->ifr_data);
2392 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2394 struct port_info *pi = netdev_priv(dev);
2395 struct adapter *adapter = pi->adapter;
2398 if (new_mtu < 81) /* accommodate SACK */
2400 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2403 init_port_mtus(adapter);
2404 if (adapter->params.rev == 0 && offload_running(adapter))
2405 t3_load_mtus(adapter, adapter->params.mtus,
2406 adapter->params.a_wnd, adapter->params.b_wnd,
2407 adapter->port[0]->mtu);
2411 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2413 struct port_info *pi = netdev_priv(dev);
2414 struct adapter *adapter = pi->adapter;
2415 struct sockaddr *addr = p;
2417 if (!is_valid_ether_addr(addr->sa_data))
2420 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2421 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2422 if (offload_running(adapter))
2423 write_smt_entry(adapter, pi->port_id);
2428 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2429 * @adap: the adapter
2432 * Ensures that current Rx processing on any of the queues associated with
2433 * the given port completes before returning. We do this by acquiring and
2434 * releasing the locks of the response queues associated with the port.
2436 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2440 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2441 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2443 spin_lock_irq(&q->lock);
2444 spin_unlock_irq(&q->lock);
2448 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2450 struct port_info *pi = netdev_priv(dev);
2451 struct adapter *adapter = pi->adapter;
2454 if (adapter->params.rev > 0)
2455 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2457 /* single control for all ports */
2458 unsigned int i, have_vlans = 0;
2459 for_each_port(adapter, i)
2460 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2462 t3_set_vlan_accel(adapter, 1, have_vlans);
2464 t3_synchronize_rx(adapter, pi);
2467 #ifdef CONFIG_NET_POLL_CONTROLLER
2468 static void cxgb_netpoll(struct net_device *dev)
2470 struct port_info *pi = netdev_priv(dev);
2471 struct adapter *adapter = pi->adapter;
2474 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2475 struct sge_qset *qs = &adapter->sge.qs[qidx];
2478 if (adapter->flags & USING_MSIX)
2483 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2489 * Periodic accumulation of MAC statistics.
2491 static void mac_stats_update(struct adapter *adapter)
2495 for_each_port(adapter, i) {
2496 struct net_device *dev = adapter->port[i];
2497 struct port_info *p = netdev_priv(dev);
2499 if (netif_running(dev)) {
2500 spin_lock(&adapter->stats_lock);
2501 t3_mac_update_stats(&p->mac);
2502 spin_unlock(&adapter->stats_lock);
2507 static void check_link_status(struct adapter *adapter)
2511 for_each_port(adapter, i) {
2512 struct net_device *dev = adapter->port[i];
2513 struct port_info *p = netdev_priv(dev);
2516 spin_lock_irq(&adapter->work_lock);
2517 link_fault = p->link_fault;
2518 spin_unlock_irq(&adapter->work_lock);
2521 t3_link_fault(adapter, i);
2525 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2526 t3_xgm_intr_disable(adapter, i);
2527 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2529 t3_link_changed(adapter, i);
2530 t3_xgm_intr_enable(adapter, i);
2535 static void check_t3b2_mac(struct adapter *adapter)
2539 if (!rtnl_trylock()) /* synchronize with ifdown */
2542 for_each_port(adapter, i) {
2543 struct net_device *dev = adapter->port[i];
2544 struct port_info *p = netdev_priv(dev);
2547 if (!netif_running(dev))
2551 if (netif_running(dev) && netif_carrier_ok(dev))
2552 status = t3b2_mac_watchdog_task(&p->mac);
2554 p->mac.stats.num_toggled++;
2555 else if (status == 2) {
2556 struct cmac *mac = &p->mac;
2558 t3_mac_set_mtu(mac, dev->mtu);
2559 t3_mac_set_address(mac, 0, dev->dev_addr);
2560 cxgb_set_rxmode(dev);
2561 t3_link_start(&p->phy, mac, &p->link_config);
2562 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2563 t3_port_intr_enable(adapter, p->port_id);
2564 p->mac.stats.num_resets++;
2571 static void t3_adap_check_task(struct work_struct *work)
2573 struct adapter *adapter = container_of(work, struct adapter,
2574 adap_check_task.work);
2575 const struct adapter_params *p = &adapter->params;
2577 unsigned int v, status, reset;
2579 adapter->check_task_cnt++;
2581 check_link_status(adapter);
2583 /* Accumulate MAC stats if needed */
2584 if (!p->linkpoll_period ||
2585 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2586 p->stats_update_period) {
2587 mac_stats_update(adapter);
2588 adapter->check_task_cnt = 0;
2591 if (p->rev == T3_REV_B2)
2592 check_t3b2_mac(adapter);
2595 * Scan the XGMAC's to check for various conditions which we want to
2596 * monitor in a periodic polling manner rather than via an interrupt
2597 * condition. This is used for conditions which would otherwise flood
2598 * the system with interrupts and we only really need to know that the
2599 * conditions are "happening" ... For each condition we count the
2600 * detection of the condition and reset it for the next polling loop.
2602 for_each_port(adapter, port) {
2603 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2606 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2608 if (cause & F_RXFIFO_OVERFLOW) {
2609 mac->stats.rx_fifo_ovfl++;
2610 reset |= F_RXFIFO_OVERFLOW;
2613 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2617 * We do the same as above for FL_EMPTY interrupts.
2619 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2622 if (status & F_FLEMPTY) {
2623 struct sge_qset *qs = &adapter->sge.qs[0];
2628 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2632 qs->fl[i].empty += (v & 1);
2640 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2642 /* Schedule the next check update if any port is active. */
2643 spin_lock_irq(&adapter->work_lock);
2644 if (adapter->open_device_map & PORT_MASK)
2645 schedule_chk_task(adapter);
2646 spin_unlock_irq(&adapter->work_lock);
2650 * Processes external (PHY) interrupts in process context.
2652 static void ext_intr_task(struct work_struct *work)
2654 struct adapter *adapter = container_of(work, struct adapter,
2655 ext_intr_handler_task);
2658 /* Disable link fault interrupts */
2659 for_each_port(adapter, i) {
2660 struct net_device *dev = adapter->port[i];
2661 struct port_info *p = netdev_priv(dev);
2663 t3_xgm_intr_disable(adapter, i);
2664 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2667 /* Re-enable link fault interrupts */
2668 t3_phy_intr_handler(adapter);
2670 for_each_port(adapter, i)
2671 t3_xgm_intr_enable(adapter, i);
2673 /* Now reenable external interrupts */
2674 spin_lock_irq(&adapter->work_lock);
2675 if (adapter->slow_intr_mask) {
2676 adapter->slow_intr_mask |= F_T3DBG;
2677 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2678 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2679 adapter->slow_intr_mask);
2681 spin_unlock_irq(&adapter->work_lock);
2685 * Interrupt-context handler for external (PHY) interrupts.
2687 void t3_os_ext_intr_handler(struct adapter *adapter)
2690 * Schedule a task to handle external interrupts as they may be slow
2691 * and we use a mutex to protect MDIO registers. We disable PHY
2692 * interrupts in the meantime and let the task reenable them when
2695 spin_lock(&adapter->work_lock);
2696 if (adapter->slow_intr_mask) {
2697 adapter->slow_intr_mask &= ~F_T3DBG;
2698 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2699 adapter->slow_intr_mask);
2700 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2702 spin_unlock(&adapter->work_lock);
2705 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2707 struct net_device *netdev = adapter->port[port_id];
2708 struct port_info *pi = netdev_priv(netdev);
2710 spin_lock(&adapter->work_lock);
2712 spin_unlock(&adapter->work_lock);
2715 static int t3_adapter_error(struct adapter *adapter, int reset)
2719 if (is_offload(adapter) &&
2720 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2721 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2722 offload_close(&adapter->tdev);
2725 /* Stop all ports */
2726 for_each_port(adapter, i) {
2727 struct net_device *netdev = adapter->port[i];
2729 if (netif_running(netdev))
2733 /* Stop SGE timers */
2734 t3_stop_sge_timers(adapter);
2736 adapter->flags &= ~FULL_INIT_DONE;
2739 ret = t3_reset_adapter(adapter);
2741 pci_disable_device(adapter->pdev);
2746 static int t3_reenable_adapter(struct adapter *adapter)
2748 if (pci_enable_device(adapter->pdev)) {
2749 dev_err(&adapter->pdev->dev,
2750 "Cannot re-enable PCI device after reset.\n");
2753 pci_set_master(adapter->pdev);
2754 pci_restore_state(adapter->pdev);
2756 /* Free sge resources */
2757 t3_free_sge_resources(adapter);
2759 if (t3_replay_prep_adapter(adapter))
2767 static void t3_resume_ports(struct adapter *adapter)
2771 /* Restart the ports */
2772 for_each_port(adapter, i) {
2773 struct net_device *netdev = adapter->port[i];
2775 if (netif_running(netdev)) {
2776 if (cxgb_open(netdev)) {
2777 dev_err(&adapter->pdev->dev,
2778 "can't bring device back up"
2785 if (is_offload(adapter) && !ofld_disable)
2786 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2790 * processes a fatal error.
2791 * Bring the ports down, reset the chip, bring the ports back up.
2793 static void fatal_error_task(struct work_struct *work)
2795 struct adapter *adapter = container_of(work, struct adapter,
2796 fatal_error_handler_task);
2800 err = t3_adapter_error(adapter, 1);
2802 err = t3_reenable_adapter(adapter);
2804 t3_resume_ports(adapter);
2806 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2810 void t3_fatal_err(struct adapter *adapter)
2812 unsigned int fw_status[4];
2814 if (adapter->flags & FULL_INIT_DONE) {
2815 t3_sge_stop(adapter);
2816 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2817 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2818 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2819 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2821 spin_lock(&adapter->work_lock);
2822 t3_intr_disable(adapter);
2823 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2824 spin_unlock(&adapter->work_lock);
2826 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2827 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2828 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2829 fw_status[0], fw_status[1],
2830 fw_status[2], fw_status[3]);
2834 * t3_io_error_detected - called when PCI error is detected
2835 * @pdev: Pointer to PCI device
2836 * @state: The current pci connection state
2838 * This function is called after a PCI bus error affecting
2839 * this device has been detected.
2841 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2842 pci_channel_state_t state)
2844 struct adapter *adapter = pci_get_drvdata(pdev);
2847 if (state == pci_channel_io_perm_failure)
2848 return PCI_ERS_RESULT_DISCONNECT;
2850 ret = t3_adapter_error(adapter, 0);
2852 /* Request a slot reset. */
2853 return PCI_ERS_RESULT_NEED_RESET;
2857 * t3_io_slot_reset - called after the pci bus has been reset.
2858 * @pdev: Pointer to PCI device
2860 * Restart the card from scratch, as if from a cold-boot.
2862 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2864 struct adapter *adapter = pci_get_drvdata(pdev);
2866 if (!t3_reenable_adapter(adapter))
2867 return PCI_ERS_RESULT_RECOVERED;
2869 return PCI_ERS_RESULT_DISCONNECT;
2873 * t3_io_resume - called when traffic can start flowing again.
2874 * @pdev: Pointer to PCI device
2876 * This callback is called when the error recovery driver tells us that
2877 * its OK to resume normal operation.
2879 static void t3_io_resume(struct pci_dev *pdev)
2881 struct adapter *adapter = pci_get_drvdata(pdev);
2883 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2884 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2886 t3_resume_ports(adapter);
2889 static struct pci_error_handlers t3_err_handler = {
2890 .error_detected = t3_io_error_detected,
2891 .slot_reset = t3_io_slot_reset,
2892 .resume = t3_io_resume,
2896 * Set the number of qsets based on the number of CPUs and the number of ports,
2897 * not to exceed the number of available qsets, assuming there are enough qsets
2900 static void set_nqsets(struct adapter *adap)
2903 int num_cpus = num_online_cpus();
2904 int hwports = adap->params.nports;
2905 int nqsets = adap->msix_nvectors - 1;
2907 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2909 (hwports * nqsets > SGE_QSETS ||
2910 num_cpus >= nqsets / hwports))
2912 if (nqsets > num_cpus)
2914 if (nqsets < 1 || hwports == 4)
2919 for_each_port(adap, i) {
2920 struct port_info *pi = adap2pinfo(adap, i);
2923 pi->nqsets = nqsets;
2924 j = pi->first_qset + nqsets;
2926 dev_info(&adap->pdev->dev,
2927 "Port %d using %d queue sets.\n", i, nqsets);
2931 static int __devinit cxgb_enable_msix(struct adapter *adap)
2933 struct msix_entry entries[SGE_QSETS + 1];
2937 vectors = ARRAY_SIZE(entries);
2938 for (i = 0; i < vectors; ++i)
2939 entries[i].entry = i;
2941 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2945 pci_disable_msix(adap->pdev);
2947 if (!err && vectors < (adap->params.nports + 1)) {
2948 pci_disable_msix(adap->pdev);
2953 for (i = 0; i < vectors; ++i)
2954 adap->msix_info[i].vec = entries[i].vector;
2955 adap->msix_nvectors = vectors;
2961 static void __devinit print_port_info(struct adapter *adap,
2962 const struct adapter_info *ai)
2964 static const char *pci_variant[] = {
2965 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2972 snprintf(buf, sizeof(buf), "%s x%d",
2973 pci_variant[adap->params.pci.variant],
2974 adap->params.pci.width);
2976 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2977 pci_variant[adap->params.pci.variant],
2978 adap->params.pci.speed, adap->params.pci.width);
2980 for_each_port(adap, i) {
2981 struct net_device *dev = adap->port[i];
2982 const struct port_info *pi = netdev_priv(dev);
2984 if (!test_bit(i, &adap->registered_device_map))
2986 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2987 dev->name, ai->desc, pi->phy.desc,
2988 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2989 (adap->flags & USING_MSIX) ? " MSI-X" :
2990 (adap->flags & USING_MSI) ? " MSI" : "");
2991 if (adap->name == dev->name && adap->params.vpd.mclk)
2993 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2994 adap->name, t3_mc7_size(&adap->cm) >> 20,
2995 t3_mc7_size(&adap->pmtx) >> 20,
2996 t3_mc7_size(&adap->pmrx) >> 20,
2997 adap->params.vpd.sn);
3001 static const struct net_device_ops cxgb_netdev_ops = {
3002 .ndo_open = cxgb_open,
3003 .ndo_stop = cxgb_close,
3004 .ndo_start_xmit = t3_eth_xmit,
3005 .ndo_get_stats = cxgb_get_stats,
3006 .ndo_validate_addr = eth_validate_addr,
3007 .ndo_set_multicast_list = cxgb_set_rxmode,
3008 .ndo_do_ioctl = cxgb_ioctl,
3009 .ndo_change_mtu = cxgb_change_mtu,
3010 .ndo_set_mac_address = cxgb_set_mac_addr,
3011 .ndo_vlan_rx_register = vlan_rx_register,
3012 #ifdef CONFIG_NET_POLL_CONTROLLER
3013 .ndo_poll_controller = cxgb_netpoll,
3017 static int __devinit init_one(struct pci_dev *pdev,
3018 const struct pci_device_id *ent)
3020 static int version_printed;
3022 int i, err, pci_using_dac = 0;
3023 resource_size_t mmio_start, mmio_len;
3024 const struct adapter_info *ai;
3025 struct adapter *adapter = NULL;
3026 struct port_info *pi;
3028 if (!version_printed) {
3029 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3034 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3036 printk(KERN_ERR DRV_NAME
3037 ": cannot initialize work queue\n");
3042 err = pci_request_regions(pdev, DRV_NAME);
3044 /* Just info, some other driver may have claimed the device. */
3045 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3049 err = pci_enable_device(pdev);
3051 dev_err(&pdev->dev, "cannot enable PCI device\n");
3052 goto out_release_regions;
3055 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3057 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3059 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3060 "coherent allocations\n");
3061 goto out_disable_device;
3063 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3064 dev_err(&pdev->dev, "no usable DMA configuration\n");
3065 goto out_disable_device;
3068 pci_set_master(pdev);
3069 pci_save_state(pdev);
3071 mmio_start = pci_resource_start(pdev, 0);
3072 mmio_len = pci_resource_len(pdev, 0);
3073 ai = t3_get_adapter_info(ent->driver_data);
3075 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3078 goto out_disable_device;
3081 adapter->nofail_skb =
3082 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083 if (!adapter->nofail_skb) {
3084 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3086 goto out_free_adapter;
3089 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3090 if (!adapter->regs) {
3091 dev_err(&pdev->dev, "cannot map device registers\n");
3093 goto out_free_adapter;
3096 adapter->pdev = pdev;
3097 adapter->name = pci_name(pdev);
3098 adapter->msg_enable = dflt_msg_enable;
3099 adapter->mmio_len = mmio_len;
3101 mutex_init(&adapter->mdio_lock);
3102 spin_lock_init(&adapter->work_lock);
3103 spin_lock_init(&adapter->stats_lock);
3105 INIT_LIST_HEAD(&adapter->adapter_list);
3106 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3107 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3108 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3110 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3111 struct net_device *netdev;
3113 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3119 SET_NETDEV_DEV(netdev, &pdev->dev);
3121 adapter->port[i] = netdev;
3122 pi = netdev_priv(netdev);
3123 pi->adapter = adapter;
3124 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3126 netif_carrier_off(netdev);
3127 netif_tx_stop_all_queues(netdev);
3128 netdev->irq = pdev->irq;
3129 netdev->mem_start = mmio_start;
3130 netdev->mem_end = mmio_start + mmio_len - 1;
3131 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3132 netdev->features |= NETIF_F_GRO;
3134 netdev->features |= NETIF_F_HIGHDMA;
3136 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3137 netdev->netdev_ops = &cxgb_netdev_ops;
3138 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3141 pci_set_drvdata(pdev, adapter);
3142 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3148 * The card is now ready to go. If any errors occur during device
3149 * registration we do not fail the whole card but rather proceed only
3150 * with the ports we manage to register successfully. However we must
3151 * register at least one net device.
3153 for_each_port(adapter, i) {
3154 err = register_netdev(adapter->port[i]);
3156 dev_warn(&pdev->dev,
3157 "cannot register net device %s, skipping\n",
3158 adapter->port[i]->name);
3161 * Change the name we use for messages to the name of
3162 * the first successfully registered interface.
3164 if (!adapter->registered_device_map)
3165 adapter->name = adapter->port[i]->name;
3167 __set_bit(i, &adapter->registered_device_map);
3170 if (!adapter->registered_device_map) {
3171 dev_err(&pdev->dev, "could not register any net devices\n");
3175 /* Driver's ready. Reflect it on LEDs */
3176 t3_led_ready(adapter);
3178 if (is_offload(adapter)) {
3179 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3180 cxgb3_adapter_ofld(adapter);
3183 /* See what interrupts we'll be using */
3184 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3185 adapter->flags |= USING_MSIX;
3186 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3187 adapter->flags |= USING_MSI;
3189 set_nqsets(adapter);
3191 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3194 print_port_info(adapter, ai);
3198 iounmap(adapter->regs);
3199 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3200 if (adapter->port[i])
3201 free_netdev(adapter->port[i]);
3207 pci_disable_device(pdev);
3208 out_release_regions:
3209 pci_release_regions(pdev);
3210 pci_set_drvdata(pdev, NULL);
3214 static void __devexit remove_one(struct pci_dev *pdev)
3216 struct adapter *adapter = pci_get_drvdata(pdev);
3221 t3_sge_stop(adapter);
3222 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3225 if (is_offload(adapter)) {
3226 cxgb3_adapter_unofld(adapter);
3227 if (test_bit(OFFLOAD_DEVMAP_BIT,
3228 &adapter->open_device_map))
3229 offload_close(&adapter->tdev);
3232 for_each_port(adapter, i)
3233 if (test_bit(i, &adapter->registered_device_map))
3234 unregister_netdev(adapter->port[i]);
3236 t3_stop_sge_timers(adapter);
3237 t3_free_sge_resources(adapter);
3238 cxgb_disable_msi(adapter);
3240 for_each_port(adapter, i)
3241 if (adapter->port[i])
3242 free_netdev(adapter->port[i]);
3244 iounmap(adapter->regs);
3245 if (adapter->nofail_skb)
3246 kfree_skb(adapter->nofail_skb);
3248 pci_release_regions(pdev);
3249 pci_disable_device(pdev);
3250 pci_set_drvdata(pdev, NULL);
3254 static struct pci_driver driver = {
3256 .id_table = cxgb3_pci_tbl,
3258 .remove = __devexit_p(remove_one),
3259 .err_handler = &t3_err_handler,
3262 static int __init cxgb3_init_module(void)
3266 cxgb3_offload_init();
3268 ret = pci_register_driver(&driver);
3272 static void __exit cxgb3_cleanup_module(void)
3274 pci_unregister_driver(&driver);
3276 destroy_workqueue(cxgb3_wq);
3279 module_init(cxgb3_init_module);
3280 module_exit(cxgb3_cleanup_module);