2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <asm/uaccess.h>
53 #include "cxgb3_ioctl.h"
55 #include "cxgb3_offload.h"
58 #include "cxgb3_ctl_defs.h"
60 #include "firmware_exports.h"
63 MAX_TXQ_ENTRIES = 16384,
64 MAX_CTRL_TXQ_ENTRIES = 1024,
65 MAX_RSPQ_ENTRIES = 16384,
66 MAX_RX_BUFFERS = 16384,
67 MAX_RX_JUMBO_BUFFERS = 16384,
69 MIN_CTRL_TXQ_ENTRIES = 4,
70 MIN_RSPQ_ENTRIES = 32,
74 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
76 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
78 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
80 #define EEPROM_MAGIC 0x38E2F10C
82 #define CH_DEVICE(devid, idx) \
83 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
85 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
86 CH_DEVICE(0x20, 0), /* PE9000 */
87 CH_DEVICE(0x21, 1), /* T302E */
88 CH_DEVICE(0x22, 2), /* T310E */
89 CH_DEVICE(0x23, 3), /* T320X */
90 CH_DEVICE(0x24, 1), /* T302X */
91 CH_DEVICE(0x25, 3), /* T320E */
92 CH_DEVICE(0x26, 2), /* T310X */
93 CH_DEVICE(0x30, 2), /* T3B10 */
94 CH_DEVICE(0x31, 3), /* T3B20 */
95 CH_DEVICE(0x32, 1), /* T3B02 */
96 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97 CH_DEVICE(0x36, 3), /* S320E-CR */
98 CH_DEVICE(0x37, 7), /* N320E-G2 */
102 MODULE_DESCRIPTION(DRV_DESC);
103 MODULE_AUTHOR("Chelsio Communications");
104 MODULE_LICENSE("Dual BSD/GPL");
105 MODULE_VERSION(DRV_VERSION);
106 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
108 static int dflt_msg_enable = DFLT_MSG_ENABLE;
110 module_param(dflt_msg_enable, int, 0644);
111 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
114 * The driver uses the best interrupt scheme available on a platform in the
115 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
116 * of these schemes the driver may consider as follows:
118 * msi = 2: choose from among all three options
119 * msi = 1: only consider MSI and pin interrupts
120 * msi = 0: force pin interrupts
124 module_param(msi, int, 0644);
125 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
128 * The driver enables offload as a default.
129 * To disable it, use ofld_disable = 1.
132 static int ofld_disable = 0;
134 module_param(ofld_disable, int, 0644);
135 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
138 * We have work elements that we need to cancel when an interface is taken
139 * down. Normally the work elements would be executed by keventd but that
140 * can deadlock because of linkwatch. If our close method takes the rtnl
141 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
142 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
143 * for our work to complete. Get our own work queue to solve this.
145 struct workqueue_struct *cxgb3_wq;
148 * link_report - show link status and link speed/duplex
149 * @p: the port whose settings are to be reported
151 * Shows the link status, speed, and duplex of a port.
153 static void link_report(struct net_device *dev)
155 if (!netif_carrier_ok(dev))
156 printk(KERN_INFO "%s: link down\n", dev->name);
158 const char *s = "10Mbps";
159 const struct port_info *p = netdev_priv(dev);
161 switch (p->link_config.speed) {
173 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
174 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
178 static void enable_tx_fifo_drain(struct adapter *adapter,
179 struct port_info *pi)
181 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
183 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
184 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
185 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
188 static void disable_tx_fifo_drain(struct adapter *adapter,
189 struct port_info *pi)
191 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
195 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
197 struct net_device *dev = adap->port[port_id];
198 struct port_info *pi = netdev_priv(dev);
200 if (state == netif_carrier_ok(dev))
204 struct cmac *mac = &pi->mac;
206 netif_carrier_on(dev);
208 disable_tx_fifo_drain(adap, pi);
210 /* Clear local faults */
211 t3_xgm_intr_disable(adap, pi->port_id);
212 t3_read_reg(adap, A_XGM_INT_STATUS +
215 A_XGM_INT_CAUSE + pi->mac.offset,
218 t3_set_reg_field(adap,
221 F_XGM_INT, F_XGM_INT);
222 t3_xgm_intr_enable(adap, pi->port_id);
224 t3_mac_enable(mac, MAC_DIRECTION_TX);
226 netif_carrier_off(dev);
229 enable_tx_fifo_drain(adap, pi);
235 * t3_os_link_changed - handle link status changes
236 * @adapter: the adapter associated with the link change
237 * @port_id: the port index whose limk status has changed
238 * @link_stat: the new status of the link
239 * @speed: the new speed setting
240 * @duplex: the new duplex setting
241 * @pause: the new flow-control setting
243 * This is the OS-dependent handler for link status changes. The OS
244 * neutral handler takes care of most of the processing for these events,
245 * then calls this handler for any OS-specific processing.
247 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
248 int speed, int duplex, int pause)
250 struct net_device *dev = adapter->port[port_id];
251 struct port_info *pi = netdev_priv(dev);
252 struct cmac *mac = &pi->mac;
254 /* Skip changes from disabled ports. */
255 if (!netif_running(dev))
258 if (link_stat != netif_carrier_ok(dev)) {
260 disable_tx_fifo_drain(adapter, pi);
262 t3_mac_enable(mac, MAC_DIRECTION_RX);
264 /* Clear local faults */
265 t3_xgm_intr_disable(adapter, pi->port_id);
266 t3_read_reg(adapter, A_XGM_INT_STATUS +
268 t3_write_reg(adapter,
269 A_XGM_INT_CAUSE + pi->mac.offset,
272 t3_set_reg_field(adapter,
273 A_XGM_INT_ENABLE + pi->mac.offset,
274 F_XGM_INT, F_XGM_INT);
275 t3_xgm_intr_enable(adapter, pi->port_id);
277 netif_carrier_on(dev);
279 netif_carrier_off(dev);
281 t3_xgm_intr_disable(adapter, pi->port_id);
282 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
283 t3_set_reg_field(adapter,
284 A_XGM_INT_ENABLE + pi->mac.offset,
288 pi->phy.ops->power_down(&pi->phy, 1);
290 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
291 t3_mac_disable(mac, MAC_DIRECTION_RX);
292 t3_link_start(&pi->phy, mac, &pi->link_config);
295 enable_tx_fifo_drain(adapter, pi);
303 * t3_os_phymod_changed - handle PHY module changes
304 * @phy: the PHY reporting the module change
305 * @mod_type: new module type
307 * This is the OS-dependent handler for PHY module changes. It is
308 * invoked when a PHY module is removed or inserted for any OS-specific
311 void t3_os_phymod_changed(struct adapter *adap, int port_id)
313 static const char *mod_str[] = {
314 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
317 const struct net_device *dev = adap->port[port_id];
318 const struct port_info *pi = netdev_priv(dev);
320 if (pi->phy.modtype == phy_modtype_none)
321 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
323 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
324 mod_str[pi->phy.modtype]);
327 static void cxgb_set_rxmode(struct net_device *dev)
329 struct port_info *pi = netdev_priv(dev);
331 t3_mac_set_rx_mode(&pi->mac, dev);
335 * link_start - enable a port
336 * @dev: the device to enable
338 * Performs the MAC and PHY actions needed to enable a port.
340 static void link_start(struct net_device *dev)
342 struct port_info *pi = netdev_priv(dev);
343 struct cmac *mac = &pi->mac;
346 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347 t3_mac_set_mtu(mac, dev->mtu);
348 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
349 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
350 t3_mac_set_rx_mode(mac, dev);
351 t3_link_start(&pi->phy, mac, &pi->link_config);
352 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
355 static inline void cxgb_disable_msi(struct adapter *adapter)
357 if (adapter->flags & USING_MSIX) {
358 pci_disable_msix(adapter->pdev);
359 adapter->flags &= ~USING_MSIX;
360 } else if (adapter->flags & USING_MSI) {
361 pci_disable_msi(adapter->pdev);
362 adapter->flags &= ~USING_MSI;
367 * Interrupt handler for asynchronous events used with MSI-X.
369 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
371 t3_slow_intr_handler(cookie);
376 * Name the MSI-X interrupts.
378 static void name_msix_vecs(struct adapter *adap)
380 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
382 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
383 adap->msix_info[0].desc[n] = 0;
385 for_each_port(adap, j) {
386 struct net_device *d = adap->port[j];
387 const struct port_info *pi = netdev_priv(d);
389 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
390 snprintf(adap->msix_info[msi_idx].desc, n,
391 "%s-%d", d->name, pi->first_qset + i);
392 adap->msix_info[msi_idx].desc[n] = 0;
397 static int request_msix_data_irqs(struct adapter *adap)
399 int i, j, err, qidx = 0;
401 for_each_port(adap, i) {
402 int nqsets = adap2pinfo(adap, i)->nqsets;
404 for (j = 0; j < nqsets; ++j) {
405 err = request_irq(adap->msix_info[qidx + 1].vec,
406 t3_intr_handler(adap,
409 adap->msix_info[qidx + 1].desc,
410 &adap->sge.qs[qidx]);
413 free_irq(adap->msix_info[qidx + 1].vec,
414 &adap->sge.qs[qidx]);
423 static void free_irq_resources(struct adapter *adapter)
425 if (adapter->flags & USING_MSIX) {
428 free_irq(adapter->msix_info[0].vec, adapter);
429 for_each_port(adapter, i)
430 n += adap2pinfo(adapter, i)->nqsets;
432 for (i = 0; i < n; ++i)
433 free_irq(adapter->msix_info[i + 1].vec,
434 &adapter->sge.qs[i]);
436 free_irq(adapter->pdev->irq, adapter);
439 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
452 static int init_tp_parity(struct adapter *adap)
456 struct cpl_set_tcb_field *greq;
457 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
459 t3_tp_set_offload_mode(adap, 1);
461 for (i = 0; i < 16; i++) {
462 struct cpl_smt_write_req *req;
464 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
466 skb = adap->nofail_skb;
470 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
471 memset(req, 0, sizeof(*req));
472 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
474 req->mtu_idx = NMTUS - 1;
476 t3_mgmt_tx(adap, skb);
477 if (skb == adap->nofail_skb) {
478 await_mgmt_replies(adap, cnt, i + 1);
479 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
480 if (!adap->nofail_skb)
485 for (i = 0; i < 2048; i++) {
486 struct cpl_l2t_write_req *req;
488 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
490 skb = adap->nofail_skb;
494 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
495 memset(req, 0, sizeof(*req));
496 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498 req->params = htonl(V_L2T_W_IDX(i));
499 t3_mgmt_tx(adap, skb);
500 if (skb == adap->nofail_skb) {
501 await_mgmt_replies(adap, cnt, 16 + i + 1);
502 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!adap->nofail_skb)
508 for (i = 0; i < 2048; i++) {
509 struct cpl_rte_write_req *req;
511 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
513 skb = adap->nofail_skb;
517 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
518 memset(req, 0, sizeof(*req));
519 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521 req->l2t_idx = htonl(V_L2T_W_IDX(i));
522 t3_mgmt_tx(adap, skb);
523 if (skb == adap->nofail_skb) {
524 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526 if (!adap->nofail_skb)
531 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
533 skb = adap->nofail_skb;
537 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
538 memset(greq, 0, sizeof(*greq));
539 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
540 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
541 greq->mask = cpu_to_be64(1);
542 t3_mgmt_tx(adap, skb);
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 if (skb == adap->nofail_skb) {
546 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
547 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
550 t3_tp_set_offload_mode(adap, 0);
554 t3_tp_set_offload_mode(adap, 0);
559 * setup_rss - configure RSS
562 * Sets up RSS to distribute packets to multiple receive queues. We
563 * configure the RSS CPU lookup table to distribute to the number of HW
564 * receive queues, and the response queue lookup table to narrow that
565 * down to the response queues actually configured for each port.
566 * We always configure the RSS mapping for two ports since the mapping
567 * table has plenty of entries.
569 static void setup_rss(struct adapter *adap)
572 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
573 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
574 u8 cpus[SGE_QSETS + 1];
575 u16 rspq_map[RSS_TABLE_SIZE];
577 for (i = 0; i < SGE_QSETS; ++i)
579 cpus[SGE_QSETS] = 0xff; /* terminator */
581 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
582 rspq_map[i] = i % nq0;
583 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
586 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591 static void ring_dbs(struct adapter *adap)
595 for (i = 0; i < SGE_QSETS; i++) {
596 struct sge_qset *qs = &adap->sge.qs[i];
599 for (j = 0; j < SGE_TXQ_PER_SET; j++)
600 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
604 static void init_napi(struct adapter *adap)
608 for (i = 0; i < SGE_QSETS; i++) {
609 struct sge_qset *qs = &adap->sge.qs[i];
612 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
617 * netif_napi_add() can be called only once per napi_struct because it
618 * adds each new napi_struct to a list. Be careful not to call it a
619 * second time, e.g., during EEH recovery, by making a note of it.
621 adap->flags |= NAPI_INIT;
625 * Wait until all NAPI handlers are descheduled. This includes the handlers of
626 * both netdevices representing interfaces and the dummy ones for the extra
629 static void quiesce_rx(struct adapter *adap)
633 for (i = 0; i < SGE_QSETS; i++)
634 if (adap->sge.qs[i].adap)
635 napi_disable(&adap->sge.qs[i].napi);
638 static void enable_all_napi(struct adapter *adap)
641 for (i = 0; i < SGE_QSETS; i++)
642 if (adap->sge.qs[i].adap)
643 napi_enable(&adap->sge.qs[i].napi);
647 * set_qset_lro - Turn a queue set's LRO capability on and off
648 * @dev: the device the qset is attached to
649 * @qset_idx: the queue set index
650 * @val: the LRO switch
652 * Sets LRO on or off for a particular queue set.
653 * the device's features flag is updated to reflect the LRO
654 * capability when all queues belonging to the device are
657 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
659 struct port_info *pi = netdev_priv(dev);
660 struct adapter *adapter = pi->adapter;
662 adapter->params.sge.qset[qset_idx].lro = !!val;
663 adapter->sge.qs[qset_idx].lro_enabled = !!val;
667 * setup_sge_qsets - configure SGE Tx/Rx/response queues
670 * Determines how many sets of SGE queues to use and initializes them.
671 * We support multiple queue sets per port if we have MSI-X, otherwise
672 * just one queue set per port.
674 static int setup_sge_qsets(struct adapter *adap)
676 int i, j, err, irq_idx = 0, qset_idx = 0;
677 unsigned int ntxq = SGE_TXQ_PER_SET;
679 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
682 for_each_port(adap, i) {
683 struct net_device *dev = adap->port[i];
684 struct port_info *pi = netdev_priv(dev);
686 pi->qs = &adap->sge.qs[pi->first_qset];
687 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
688 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
689 err = t3_sge_alloc_qset(adap, qset_idx, 1,
690 (adap->flags & USING_MSIX) ? qset_idx + 1 :
692 &adap->params.sge.qset[qset_idx], ntxq, dev,
693 netdev_get_tx_queue(dev, j));
695 t3_free_sge_resources(adap);
704 static ssize_t attr_show(struct device *d, char *buf,
705 ssize_t(*format) (struct net_device *, char *))
709 /* Synchronize with ioctls that may shut down the device */
711 len = (*format) (to_net_dev(d), buf);
716 static ssize_t attr_store(struct device *d,
717 const char *buf, size_t len,
718 ssize_t(*set) (struct net_device *, unsigned int),
719 unsigned int min_val, unsigned int max_val)
725 if (!capable(CAP_NET_ADMIN))
728 val = simple_strtoul(buf, &endp, 0);
729 if (endp == buf || val < min_val || val > max_val)
733 ret = (*set) (to_net_dev(d), val);
740 #define CXGB3_SHOW(name, val_expr) \
741 static ssize_t format_##name(struct net_device *dev, char *buf) \
743 struct port_info *pi = netdev_priv(dev); \
744 struct adapter *adap = pi->adapter; \
745 return sprintf(buf, "%u\n", val_expr); \
747 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
750 return attr_show(d, buf, format_##name); \
753 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
755 struct port_info *pi = netdev_priv(dev);
756 struct adapter *adap = pi->adapter;
757 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
759 if (adap->flags & FULL_INIT_DONE)
761 if (val && adap->params.rev == 0)
763 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
766 adap->params.mc5.nfilters = val;
770 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
771 const char *buf, size_t len)
773 return attr_store(d, buf, len, set_nfilters, 0, ~0);
776 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
778 struct port_info *pi = netdev_priv(dev);
779 struct adapter *adap = pi->adapter;
781 if (adap->flags & FULL_INIT_DONE)
783 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
786 adap->params.mc5.nservers = val;
790 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
791 const char *buf, size_t len)
793 return attr_store(d, buf, len, set_nservers, 0, ~0);
796 #define CXGB3_ATTR_R(name, val_expr) \
797 CXGB3_SHOW(name, val_expr) \
798 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
800 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
801 CXGB3_SHOW(name, val_expr) \
802 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
804 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
805 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
806 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
808 static struct attribute *cxgb3_attrs[] = {
809 &dev_attr_cam_size.attr,
810 &dev_attr_nfilters.attr,
811 &dev_attr_nservers.attr,
815 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
817 static ssize_t tm_attr_show(struct device *d,
818 char *buf, int sched)
820 struct port_info *pi = netdev_priv(to_net_dev(d));
821 struct adapter *adap = pi->adapter;
822 unsigned int v, addr, bpt, cpt;
825 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
827 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
828 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
831 bpt = (v >> 8) & 0xff;
834 len = sprintf(buf, "disabled\n");
836 v = (adap->params.vpd.cclk * 1000) / cpt;
837 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
843 static ssize_t tm_attr_store(struct device *d,
844 const char *buf, size_t len, int sched)
846 struct port_info *pi = netdev_priv(to_net_dev(d));
847 struct adapter *adap = pi->adapter;
852 if (!capable(CAP_NET_ADMIN))
855 val = simple_strtoul(buf, &endp, 0);
856 if (endp == buf || val > 10000000)
860 ret = t3_config_sched(adap, val, sched);
867 #define TM_ATTR(name, sched) \
868 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
871 return tm_attr_show(d, buf, sched); \
873 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
874 const char *buf, size_t len) \
876 return tm_attr_store(d, buf, len, sched); \
878 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
889 static struct attribute *offload_attrs[] = {
890 &dev_attr_sched0.attr,
891 &dev_attr_sched1.attr,
892 &dev_attr_sched2.attr,
893 &dev_attr_sched3.attr,
894 &dev_attr_sched4.attr,
895 &dev_attr_sched5.attr,
896 &dev_attr_sched6.attr,
897 &dev_attr_sched7.attr,
901 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
904 * Sends an sk_buff to an offload queue driver
905 * after dealing with any active network taps.
907 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
912 ret = t3_offload_tx(tdev, skb);
917 static int write_smt_entry(struct adapter *adapter, int idx)
919 struct cpl_smt_write_req *req;
920 struct port_info *pi = netdev_priv(adapter->port[idx]);
921 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
926 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
927 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
928 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
929 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
931 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
932 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
934 offload_tx(&adapter->tdev, skb);
938 static int init_smt(struct adapter *adapter)
942 for_each_port(adapter, i)
943 write_smt_entry(adapter, i);
947 static void init_port_mtus(struct adapter *adapter)
949 unsigned int mtus = adapter->port[0]->mtu;
951 if (adapter->port[1])
952 mtus |= adapter->port[1]->mtu << 16;
953 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
956 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
960 struct mngt_pktsched_wr *req;
963 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
965 skb = adap->nofail_skb;
969 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
970 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
971 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
977 ret = t3_mgmt_tx(adap, skb);
978 if (skb == adap->nofail_skb) {
979 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
981 if (!adap->nofail_skb)
988 static int bind_qsets(struct adapter *adap)
992 for_each_port(adap, i) {
993 const struct port_info *pi = adap2pinfo(adap, i);
995 for (j = 0; j < pi->nqsets; ++j) {
996 int ret = send_pktsched_cmd(adap, 1,
997 pi->first_qset + j, -1,
1007 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
1008 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
1009 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
1010 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
1011 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1012 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1013 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1014 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1015 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1016 MODULE_FIRMWARE(FW_FNAME);
1017 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1018 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1019 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1020 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1021 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1023 static inline const char *get_edc_fw_name(int edc_idx)
1025 const char *fw_name = NULL;
1028 case EDC_OPT_AEL2005:
1029 fw_name = AEL2005_OPT_EDC_NAME;
1031 case EDC_TWX_AEL2005:
1032 fw_name = AEL2005_TWX_EDC_NAME;
1034 case EDC_TWX_AEL2020:
1035 fw_name = AEL2020_TWX_EDC_NAME;
1041 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1043 struct adapter *adapter = phy->adapter;
1044 const struct firmware *fw;
1048 u16 *cache = phy->phy_cache;
1051 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1053 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1055 dev_err(&adapter->pdev->dev,
1056 "could not upgrade firmware: unable to load %s\n",
1061 /* check size, take checksum in account */
1062 if (fw->size > size + 4) {
1063 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1064 (unsigned int)fw->size, size + 4);
1068 /* compute checksum */
1069 p = (const __be32 *)fw->data;
1070 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1071 csum += ntohl(p[i]);
1073 if (csum != 0xffffffff) {
1074 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1079 for (i = 0; i < size / 4 ; i++) {
1080 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1081 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1084 release_firmware(fw);
1089 static int upgrade_fw(struct adapter *adap)
1092 const struct firmware *fw;
1093 struct device *dev = &adap->pdev->dev;
1095 ret = request_firmware(&fw, FW_FNAME, dev);
1097 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1101 ret = t3_load_fw(adap, fw->data, fw->size);
1102 release_firmware(fw);
1105 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1106 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1108 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1109 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1114 static inline char t3rev2char(struct adapter *adapter)
1118 switch(adapter->params.rev) {
1130 static int update_tpsram(struct adapter *adap)
1132 const struct firmware *tpsram;
1134 struct device *dev = &adap->pdev->dev;
1138 rev = t3rev2char(adap);
1142 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1144 ret = request_firmware(&tpsram, buf, dev);
1146 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1151 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1153 goto release_tpsram;
1155 ret = t3_set_proto_sram(adap, tpsram->data);
1158 "successful update of protocol engine "
1160 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1162 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1163 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1165 dev_err(dev, "loading protocol SRAM failed\n");
1168 release_firmware(tpsram);
1174 * cxgb_up - enable the adapter
1175 * @adapter: adapter being enabled
1177 * Called when the first port is enabled, this function performs the
1178 * actions necessary to make an adapter operational, such as completing
1179 * the initialization of HW modules, and enabling interrupts.
1181 * Must be called with the rtnl lock held.
1183 static int cxgb_up(struct adapter *adap)
1187 if (!(adap->flags & FULL_INIT_DONE)) {
1188 err = t3_check_fw_version(adap);
1189 if (err == -EINVAL) {
1190 err = upgrade_fw(adap);
1191 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1192 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1193 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1196 err = t3_check_tpsram_version(adap);
1197 if (err == -EINVAL) {
1198 err = update_tpsram(adap);
1199 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1200 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1201 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1205 * Clear interrupts now to catch errors if t3_init_hw fails.
1206 * We clear them again later as initialization may trigger
1207 * conditions that can interrupt.
1209 t3_intr_clear(adap);
1211 err = t3_init_hw(adap, 0);
1215 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1216 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1218 err = setup_sge_qsets(adap);
1223 if (!(adap->flags & NAPI_INIT))
1226 t3_start_sge_timers(adap);
1227 adap->flags |= FULL_INIT_DONE;
1230 t3_intr_clear(adap);
1232 if (adap->flags & USING_MSIX) {
1233 name_msix_vecs(adap);
1234 err = request_irq(adap->msix_info[0].vec,
1235 t3_async_intr_handler, 0,
1236 adap->msix_info[0].desc, adap);
1240 err = request_msix_data_irqs(adap);
1242 free_irq(adap->msix_info[0].vec, adap);
1245 } else if ((err = request_irq(adap->pdev->irq,
1246 t3_intr_handler(adap,
1247 adap->sge.qs[0].rspq.
1249 (adap->flags & USING_MSI) ?
1254 enable_all_napi(adap);
1256 t3_intr_enable(adap);
1258 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1259 is_offload(adap) && init_tp_parity(adap) == 0)
1260 adap->flags |= TP_PARITY_INIT;
1262 if (adap->flags & TP_PARITY_INIT) {
1263 t3_write_reg(adap, A_TP_INT_CAUSE,
1264 F_CMCACHEPERR | F_ARPLUTPERR);
1265 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1268 if (!(adap->flags & QUEUES_BOUND)) {
1269 int ret = bind_qsets(adap);
1272 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1273 t3_intr_disable(adap);
1274 free_irq_resources(adap);
1278 adap->flags |= QUEUES_BOUND;
1284 CH_ERR(adap, "request_irq failed, err %d\n", err);
1289 * Release resources when all the ports and offloading have been stopped.
1291 static void cxgb_down(struct adapter *adapter, int on_wq)
1293 t3_sge_stop(adapter);
1294 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1295 t3_intr_disable(adapter);
1296 spin_unlock_irq(&adapter->work_lock);
1298 free_irq_resources(adapter);
1299 quiesce_rx(adapter);
1300 t3_sge_stop(adapter);
1302 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1305 static void schedule_chk_task(struct adapter *adap)
1309 timeo = adap->params.linkpoll_period ?
1310 (HZ * adap->params.linkpoll_period) / 10 :
1311 adap->params.stats_update_period * HZ;
1313 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1316 static int offload_open(struct net_device *dev)
1318 struct port_info *pi = netdev_priv(dev);
1319 struct adapter *adapter = pi->adapter;
1320 struct t3cdev *tdev = dev2t3cdev(dev);
1321 int adap_up = adapter->open_device_map & PORT_MASK;
1324 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1327 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1330 t3_tp_set_offload_mode(adapter, 1);
1331 tdev->lldev = adapter->port[0];
1332 err = cxgb3_offload_activate(adapter);
1336 init_port_mtus(adapter);
1337 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1338 adapter->params.b_wnd,
1339 adapter->params.rev == 0 ?
1340 adapter->port[0]->mtu : 0xffff);
1343 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1344 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1346 /* Call back all registered clients */
1347 cxgb3_add_clients(tdev);
1350 /* restore them in case the offload module has changed them */
1352 t3_tp_set_offload_mode(adapter, 0);
1353 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1354 cxgb3_set_dummy_ops(tdev);
1359 static int offload_close(struct t3cdev *tdev)
1361 struct adapter *adapter = tdev2adap(tdev);
1362 struct t3c_data *td = T3C_DATA(tdev);
1364 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1367 /* Call back all registered clients */
1368 cxgb3_remove_clients(tdev);
1370 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1372 /* Flush work scheduled while releasing TIDs */
1373 flush_work_sync(&td->tid_release_task);
1376 cxgb3_set_dummy_ops(tdev);
1377 t3_tp_set_offload_mode(adapter, 0);
1378 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1380 if (!adapter->open_device_map)
1381 cxgb_down(adapter, 0);
1383 cxgb3_offload_deactivate(adapter);
1387 static int cxgb_open(struct net_device *dev)
1389 struct port_info *pi = netdev_priv(dev);
1390 struct adapter *adapter = pi->adapter;
1391 int other_ports = adapter->open_device_map & PORT_MASK;
1394 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1397 set_bit(pi->port_id, &adapter->open_device_map);
1398 if (is_offload(adapter) && !ofld_disable) {
1399 err = offload_open(dev);
1402 "Could not initialize offload capabilities\n");
1405 netif_set_real_num_tx_queues(dev, pi->nqsets);
1406 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1410 t3_port_intr_enable(adapter, pi->port_id);
1411 netif_tx_start_all_queues(dev);
1413 schedule_chk_task(adapter);
1415 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1419 static int __cxgb_close(struct net_device *dev, int on_wq)
1421 struct port_info *pi = netdev_priv(dev);
1422 struct adapter *adapter = pi->adapter;
1425 if (!adapter->open_device_map)
1428 /* Stop link fault interrupts */
1429 t3_xgm_intr_disable(adapter, pi->port_id);
1430 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1432 t3_port_intr_disable(adapter, pi->port_id);
1433 netif_tx_stop_all_queues(dev);
1434 pi->phy.ops->power_down(&pi->phy, 1);
1435 netif_carrier_off(dev);
1436 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1438 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1439 clear_bit(pi->port_id, &adapter->open_device_map);
1440 spin_unlock_irq(&adapter->work_lock);
1442 if (!(adapter->open_device_map & PORT_MASK))
1443 cancel_delayed_work_sync(&adapter->adap_check_task);
1445 if (!adapter->open_device_map)
1446 cxgb_down(adapter, on_wq);
1448 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1452 static int cxgb_close(struct net_device *dev)
1454 return __cxgb_close(dev, 0);
1457 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1459 struct port_info *pi = netdev_priv(dev);
1460 struct adapter *adapter = pi->adapter;
1461 struct net_device_stats *ns = &pi->netstats;
1462 const struct mac_stats *pstats;
1464 spin_lock(&adapter->stats_lock);
1465 pstats = t3_mac_update_stats(&pi->mac);
1466 spin_unlock(&adapter->stats_lock);
1468 ns->tx_bytes = pstats->tx_octets;
1469 ns->tx_packets = pstats->tx_frames;
1470 ns->rx_bytes = pstats->rx_octets;
1471 ns->rx_packets = pstats->rx_frames;
1472 ns->multicast = pstats->rx_mcast_frames;
1474 ns->tx_errors = pstats->tx_underrun;
1475 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1476 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1477 pstats->rx_fifo_ovfl;
1479 /* detailed rx_errors */
1480 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1481 ns->rx_over_errors = 0;
1482 ns->rx_crc_errors = pstats->rx_fcs_errs;
1483 ns->rx_frame_errors = pstats->rx_symbol_errs;
1484 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1485 ns->rx_missed_errors = pstats->rx_cong_drops;
1487 /* detailed tx_errors */
1488 ns->tx_aborted_errors = 0;
1489 ns->tx_carrier_errors = 0;
1490 ns->tx_fifo_errors = pstats->tx_underrun;
1491 ns->tx_heartbeat_errors = 0;
1492 ns->tx_window_errors = 0;
1496 static u32 get_msglevel(struct net_device *dev)
1498 struct port_info *pi = netdev_priv(dev);
1499 struct adapter *adapter = pi->adapter;
1501 return adapter->msg_enable;
1504 static void set_msglevel(struct net_device *dev, u32 val)
1506 struct port_info *pi = netdev_priv(dev);
1507 struct adapter *adapter = pi->adapter;
1509 adapter->msg_enable = val;
1512 static char stats_strings[][ETH_GSTRING_LEN] = {
1515 "TxMulticastFramesOK",
1516 "TxBroadcastFramesOK",
1523 "TxFrames128To255 ",
1524 "TxFrames256To511 ",
1525 "TxFrames512To1023 ",
1526 "TxFrames1024To1518 ",
1527 "TxFrames1519ToMax ",
1531 "RxMulticastFramesOK",
1532 "RxBroadcastFramesOK",
1543 "RxFrames128To255 ",
1544 "RxFrames256To511 ",
1545 "RxFrames512To1023 ",
1546 "RxFrames1024To1518 ",
1547 "RxFrames1519ToMax ",
1560 "CheckTXEnToggled ",
1566 static int get_sset_count(struct net_device *dev, int sset)
1570 return ARRAY_SIZE(stats_strings);
1576 #define T3_REGMAP_SIZE (3 * 1024)
1578 static int get_regs_len(struct net_device *dev)
1580 return T3_REGMAP_SIZE;
1583 static int get_eeprom_len(struct net_device *dev)
1588 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1590 struct port_info *pi = netdev_priv(dev);
1591 struct adapter *adapter = pi->adapter;
1595 spin_lock(&adapter->stats_lock);
1596 t3_get_fw_version(adapter, &fw_vers);
1597 t3_get_tp_version(adapter, &tp_vers);
1598 spin_unlock(&adapter->stats_lock);
1600 strcpy(info->driver, DRV_NAME);
1601 strcpy(info->version, DRV_VERSION);
1602 strcpy(info->bus_info, pci_name(adapter->pdev));
1604 strcpy(info->fw_version, "N/A");
1606 snprintf(info->fw_version, sizeof(info->fw_version),
1607 "%s %u.%u.%u TP %u.%u.%u",
1608 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1609 G_FW_VERSION_MAJOR(fw_vers),
1610 G_FW_VERSION_MINOR(fw_vers),
1611 G_FW_VERSION_MICRO(fw_vers),
1612 G_TP_VERSION_MAJOR(tp_vers),
1613 G_TP_VERSION_MINOR(tp_vers),
1614 G_TP_VERSION_MICRO(tp_vers));
1618 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1620 if (stringset == ETH_SS_STATS)
1621 memcpy(data, stats_strings, sizeof(stats_strings));
1624 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1625 struct port_info *p, int idx)
1628 unsigned long tot = 0;
1630 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1631 tot += adapter->sge.qs[i].port_stats[idx];
1635 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1638 struct port_info *pi = netdev_priv(dev);
1639 struct adapter *adapter = pi->adapter;
1640 const struct mac_stats *s;
1642 spin_lock(&adapter->stats_lock);
1643 s = t3_mac_update_stats(&pi->mac);
1644 spin_unlock(&adapter->stats_lock);
1646 *data++ = s->tx_octets;
1647 *data++ = s->tx_frames;
1648 *data++ = s->tx_mcast_frames;
1649 *data++ = s->tx_bcast_frames;
1650 *data++ = s->tx_pause;
1651 *data++ = s->tx_underrun;
1652 *data++ = s->tx_fifo_urun;
1654 *data++ = s->tx_frames_64;
1655 *data++ = s->tx_frames_65_127;
1656 *data++ = s->tx_frames_128_255;
1657 *data++ = s->tx_frames_256_511;
1658 *data++ = s->tx_frames_512_1023;
1659 *data++ = s->tx_frames_1024_1518;
1660 *data++ = s->tx_frames_1519_max;
1662 *data++ = s->rx_octets;
1663 *data++ = s->rx_frames;
1664 *data++ = s->rx_mcast_frames;
1665 *data++ = s->rx_bcast_frames;
1666 *data++ = s->rx_pause;
1667 *data++ = s->rx_fcs_errs;
1668 *data++ = s->rx_symbol_errs;
1669 *data++ = s->rx_short;
1670 *data++ = s->rx_jabber;
1671 *data++ = s->rx_too_long;
1672 *data++ = s->rx_fifo_ovfl;
1674 *data++ = s->rx_frames_64;
1675 *data++ = s->rx_frames_65_127;
1676 *data++ = s->rx_frames_128_255;
1677 *data++ = s->rx_frames_256_511;
1678 *data++ = s->rx_frames_512_1023;
1679 *data++ = s->rx_frames_1024_1518;
1680 *data++ = s->rx_frames_1519_max;
1682 *data++ = pi->phy.fifo_errors;
1684 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1685 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1686 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1687 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1688 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1692 *data++ = s->rx_cong_drops;
1694 *data++ = s->num_toggled;
1695 *data++ = s->num_resets;
1697 *data++ = s->link_faults;
1700 static inline void reg_block_dump(struct adapter *ap, void *buf,
1701 unsigned int start, unsigned int end)
1703 u32 *p = buf + start;
1705 for (; start <= end; start += sizeof(u32))
1706 *p++ = t3_read_reg(ap, start);
1709 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1712 struct port_info *pi = netdev_priv(dev);
1713 struct adapter *ap = pi->adapter;
1717 * bits 0..9: chip version
1718 * bits 10..15: chip revision
1719 * bit 31: set for PCIe cards
1721 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1724 * We skip the MAC statistics registers because they are clear-on-read.
1725 * Also reading multi-register stats would need to synchronize with the
1726 * periodic mac stats accumulation. Hard to justify the complexity.
1728 memset(buf, 0, T3_REGMAP_SIZE);
1729 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1730 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1731 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1732 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1733 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1734 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1735 XGM_REG(A_XGM_SERDES_STAT3, 1));
1736 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1737 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1740 static int restart_autoneg(struct net_device *dev)
1742 struct port_info *p = netdev_priv(dev);
1744 if (!netif_running(dev))
1746 if (p->link_config.autoneg != AUTONEG_ENABLE)
1748 p->phy.ops->autoneg_restart(&p->phy);
1752 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1754 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter;
1761 for (i = 0; i < data * 2; i++) {
1762 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1763 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1764 if (msleep_interruptible(500))
1767 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1772 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1774 struct port_info *p = netdev_priv(dev);
1776 cmd->supported = p->link_config.supported;
1777 cmd->advertising = p->link_config.advertising;
1779 if (netif_carrier_ok(dev)) {
1780 cmd->speed = p->link_config.speed;
1781 cmd->duplex = p->link_config.duplex;
1787 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1788 cmd->phy_address = p->phy.mdio.prtad;
1789 cmd->transceiver = XCVR_EXTERNAL;
1790 cmd->autoneg = p->link_config.autoneg;
1796 static int speed_duplex_to_caps(int speed, int duplex)
1802 if (duplex == DUPLEX_FULL)
1803 cap = SUPPORTED_10baseT_Full;
1805 cap = SUPPORTED_10baseT_Half;
1808 if (duplex == DUPLEX_FULL)
1809 cap = SUPPORTED_100baseT_Full;
1811 cap = SUPPORTED_100baseT_Half;
1814 if (duplex == DUPLEX_FULL)
1815 cap = SUPPORTED_1000baseT_Full;
1817 cap = SUPPORTED_1000baseT_Half;
1820 if (duplex == DUPLEX_FULL)
1821 cap = SUPPORTED_10000baseT_Full;
1826 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1827 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1828 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1829 ADVERTISED_10000baseT_Full)
1831 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1833 struct port_info *p = netdev_priv(dev);
1834 struct link_config *lc = &p->link_config;
1836 if (!(lc->supported & SUPPORTED_Autoneg)) {
1838 * PHY offers a single speed/duplex. See if that's what's
1841 if (cmd->autoneg == AUTONEG_DISABLE) {
1842 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1843 if (lc->supported & cap)
1849 if (cmd->autoneg == AUTONEG_DISABLE) {
1850 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1852 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1854 lc->requested_speed = cmd->speed;
1855 lc->requested_duplex = cmd->duplex;
1856 lc->advertising = 0;
1858 cmd->advertising &= ADVERTISED_MASK;
1859 cmd->advertising &= lc->supported;
1860 if (!cmd->advertising)
1862 lc->requested_speed = SPEED_INVALID;
1863 lc->requested_duplex = DUPLEX_INVALID;
1864 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1866 lc->autoneg = cmd->autoneg;
1867 if (netif_running(dev))
1868 t3_link_start(&p->phy, &p->mac, lc);
1872 static void get_pauseparam(struct net_device *dev,
1873 struct ethtool_pauseparam *epause)
1875 struct port_info *p = netdev_priv(dev);
1877 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1878 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1879 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1882 static int set_pauseparam(struct net_device *dev,
1883 struct ethtool_pauseparam *epause)
1885 struct port_info *p = netdev_priv(dev);
1886 struct link_config *lc = &p->link_config;
1888 if (epause->autoneg == AUTONEG_DISABLE)
1889 lc->requested_fc = 0;
1890 else if (lc->supported & SUPPORTED_Autoneg)
1891 lc->requested_fc = PAUSE_AUTONEG;
1895 if (epause->rx_pause)
1896 lc->requested_fc |= PAUSE_RX;
1897 if (epause->tx_pause)
1898 lc->requested_fc |= PAUSE_TX;
1899 if (lc->autoneg == AUTONEG_ENABLE) {
1900 if (netif_running(dev))
1901 t3_link_start(&p->phy, &p->mac, lc);
1903 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1904 if (netif_running(dev))
1905 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1910 static u32 get_rx_csum(struct net_device *dev)
1912 struct port_info *p = netdev_priv(dev);
1914 return p->rx_offload & T3_RX_CSUM;
1917 static int set_rx_csum(struct net_device *dev, u32 data)
1919 struct port_info *p = netdev_priv(dev);
1922 p->rx_offload |= T3_RX_CSUM;
1926 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1927 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1928 set_qset_lro(dev, i, 0);
1933 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1935 struct port_info *pi = netdev_priv(dev);
1936 struct adapter *adapter = pi->adapter;
1937 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1939 e->rx_max_pending = MAX_RX_BUFFERS;
1940 e->rx_mini_max_pending = 0;
1941 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1942 e->tx_max_pending = MAX_TXQ_ENTRIES;
1944 e->rx_pending = q->fl_size;
1945 e->rx_mini_pending = q->rspq_size;
1946 e->rx_jumbo_pending = q->jumbo_size;
1947 e->tx_pending = q->txq_size[0];
1950 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1952 struct port_info *pi = netdev_priv(dev);
1953 struct adapter *adapter = pi->adapter;
1954 struct qset_params *q;
1957 if (e->rx_pending > MAX_RX_BUFFERS ||
1958 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1959 e->tx_pending > MAX_TXQ_ENTRIES ||
1960 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1961 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1962 e->rx_pending < MIN_FL_ENTRIES ||
1963 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1964 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1967 if (adapter->flags & FULL_INIT_DONE)
1970 q = &adapter->params.sge.qset[pi->first_qset];
1971 for (i = 0; i < pi->nqsets; ++i, ++q) {
1972 q->rspq_size = e->rx_mini_pending;
1973 q->fl_size = e->rx_pending;
1974 q->jumbo_size = e->rx_jumbo_pending;
1975 q->txq_size[0] = e->tx_pending;
1976 q->txq_size[1] = e->tx_pending;
1977 q->txq_size[2] = e->tx_pending;
1982 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1984 struct port_info *pi = netdev_priv(dev);
1985 struct adapter *adapter = pi->adapter;
1986 struct qset_params *qsp = &adapter->params.sge.qset[0];
1987 struct sge_qset *qs = &adapter->sge.qs[0];
1989 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1992 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1993 t3_update_qset_coalesce(qs, qsp);
1997 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1999 struct port_info *pi = netdev_priv(dev);
2000 struct adapter *adapter = pi->adapter;
2001 struct qset_params *q = adapter->params.sge.qset;
2003 c->rx_coalesce_usecs = q->coalesce_usecs;
2007 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2010 struct port_info *pi = netdev_priv(dev);
2011 struct adapter *adapter = pi->adapter;
2014 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2018 e->magic = EEPROM_MAGIC;
2019 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2020 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2023 memcpy(data, buf + e->offset, e->len);
2028 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2031 struct port_info *pi = netdev_priv(dev);
2032 struct adapter *adapter = pi->adapter;
2033 u32 aligned_offset, aligned_len;
2038 if (eeprom->magic != EEPROM_MAGIC)
2041 aligned_offset = eeprom->offset & ~3;
2042 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2044 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2045 buf = kmalloc(aligned_len, GFP_KERNEL);
2048 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2049 if (!err && aligned_len > 4)
2050 err = t3_seeprom_read(adapter,
2051 aligned_offset + aligned_len - 4,
2052 (__le32 *) & buf[aligned_len - 4]);
2055 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2059 err = t3_seeprom_wp(adapter, 0);
2063 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2064 err = t3_seeprom_write(adapter, aligned_offset, *p);
2065 aligned_offset += 4;
2069 err = t3_seeprom_wp(adapter, 1);
2076 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2080 memset(&wol->sopass, 0, sizeof(wol->sopass));
2083 static const struct ethtool_ops cxgb_ethtool_ops = {
2084 .get_settings = get_settings,
2085 .set_settings = set_settings,
2086 .get_drvinfo = get_drvinfo,
2087 .get_msglevel = get_msglevel,
2088 .set_msglevel = set_msglevel,
2089 .get_ringparam = get_sge_param,
2090 .set_ringparam = set_sge_param,
2091 .get_coalesce = get_coalesce,
2092 .set_coalesce = set_coalesce,
2093 .get_eeprom_len = get_eeprom_len,
2094 .get_eeprom = get_eeprom,
2095 .set_eeprom = set_eeprom,
2096 .get_pauseparam = get_pauseparam,
2097 .set_pauseparam = set_pauseparam,
2098 .get_rx_csum = get_rx_csum,
2099 .set_rx_csum = set_rx_csum,
2100 .set_tx_csum = ethtool_op_set_tx_csum,
2101 .set_sg = ethtool_op_set_sg,
2102 .get_link = ethtool_op_get_link,
2103 .get_strings = get_strings,
2104 .phys_id = cxgb3_phys_id,
2105 .nway_reset = restart_autoneg,
2106 .get_sset_count = get_sset_count,
2107 .get_ethtool_stats = get_stats,
2108 .get_regs_len = get_regs_len,
2109 .get_regs = get_regs,
2111 .set_tso = ethtool_op_set_tso,
2114 static int in_range(int val, int lo, int hi)
2116 return val < 0 || (val <= hi && val >= lo);
2119 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2121 struct port_info *pi = netdev_priv(dev);
2122 struct adapter *adapter = pi->adapter;
2126 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2130 case CHELSIO_SET_QSET_PARAMS:{
2132 struct qset_params *q;
2133 struct ch_qset_params t;
2134 int q1 = pi->first_qset;
2135 int nqsets = pi->nqsets;
2137 if (!capable(CAP_NET_ADMIN))
2139 if (copy_from_user(&t, useraddr, sizeof(t)))
2141 if (t.qset_idx >= SGE_QSETS)
2143 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2144 !in_range(t.cong_thres, 0, 255) ||
2145 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2147 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2149 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2150 MAX_CTRL_TXQ_ENTRIES) ||
2151 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2153 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2154 MAX_RX_JUMBO_BUFFERS) ||
2155 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2159 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2160 for_each_port(adapter, i) {
2161 pi = adap2pinfo(adapter, i);
2162 if (t.qset_idx >= pi->first_qset &&
2163 t.qset_idx < pi->first_qset + pi->nqsets &&
2164 !(pi->rx_offload & T3_RX_CSUM))
2168 if ((adapter->flags & FULL_INIT_DONE) &&
2169 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2170 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2171 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2172 t.polling >= 0 || t.cong_thres >= 0))
2175 /* Allow setting of any available qset when offload enabled */
2176 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2178 for_each_port(adapter, i) {
2179 pi = adap2pinfo(adapter, i);
2180 nqsets += pi->first_qset + pi->nqsets;
2184 if (t.qset_idx < q1)
2186 if (t.qset_idx > q1 + nqsets - 1)
2189 q = &adapter->params.sge.qset[t.qset_idx];
2191 if (t.rspq_size >= 0)
2192 q->rspq_size = t.rspq_size;
2193 if (t.fl_size[0] >= 0)
2194 q->fl_size = t.fl_size[0];
2195 if (t.fl_size[1] >= 0)
2196 q->jumbo_size = t.fl_size[1];
2197 if (t.txq_size[0] >= 0)
2198 q->txq_size[0] = t.txq_size[0];
2199 if (t.txq_size[1] >= 0)
2200 q->txq_size[1] = t.txq_size[1];
2201 if (t.txq_size[2] >= 0)
2202 q->txq_size[2] = t.txq_size[2];
2203 if (t.cong_thres >= 0)
2204 q->cong_thres = t.cong_thres;
2205 if (t.intr_lat >= 0) {
2206 struct sge_qset *qs =
2207 &adapter->sge.qs[t.qset_idx];
2209 q->coalesce_usecs = t.intr_lat;
2210 t3_update_qset_coalesce(qs, q);
2212 if (t.polling >= 0) {
2213 if (adapter->flags & USING_MSIX)
2214 q->polling = t.polling;
2216 /* No polling with INTx for T3A */
2217 if (adapter->params.rev == 0 &&
2218 !(adapter->flags & USING_MSI))
2221 for (i = 0; i < SGE_QSETS; i++) {
2222 q = &adapter->params.sge.
2224 q->polling = t.polling;
2229 set_qset_lro(dev, t.qset_idx, t.lro);
2233 case CHELSIO_GET_QSET_PARAMS:{
2234 struct qset_params *q;
2235 struct ch_qset_params t;
2236 int q1 = pi->first_qset;
2237 int nqsets = pi->nqsets;
2240 if (copy_from_user(&t, useraddr, sizeof(t)))
2243 /* Display qsets for all ports when offload enabled */
2244 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2246 for_each_port(adapter, i) {
2247 pi = adap2pinfo(adapter, i);
2248 nqsets = pi->first_qset + pi->nqsets;
2252 if (t.qset_idx >= nqsets)
2255 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2256 t.rspq_size = q->rspq_size;
2257 t.txq_size[0] = q->txq_size[0];
2258 t.txq_size[1] = q->txq_size[1];
2259 t.txq_size[2] = q->txq_size[2];
2260 t.fl_size[0] = q->fl_size;
2261 t.fl_size[1] = q->jumbo_size;
2262 t.polling = q->polling;
2264 t.intr_lat = q->coalesce_usecs;
2265 t.cong_thres = q->cong_thres;
2268 if (adapter->flags & USING_MSIX)
2269 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2271 t.vector = adapter->pdev->irq;
2273 if (copy_to_user(useraddr, &t, sizeof(t)))
2277 case CHELSIO_SET_QSET_NUM:{
2278 struct ch_reg edata;
2279 unsigned int i, first_qset = 0, other_qsets = 0;
2281 if (!capable(CAP_NET_ADMIN))
2283 if (adapter->flags & FULL_INIT_DONE)
2285 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2287 if (edata.val < 1 ||
2288 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2291 for_each_port(adapter, i)
2292 if (adapter->port[i] && adapter->port[i] != dev)
2293 other_qsets += adap2pinfo(adapter, i)->nqsets;
2295 if (edata.val + other_qsets > SGE_QSETS)
2298 pi->nqsets = edata.val;
2300 for_each_port(adapter, i)
2301 if (adapter->port[i]) {
2302 pi = adap2pinfo(adapter, i);
2303 pi->first_qset = first_qset;
2304 first_qset += pi->nqsets;
2308 case CHELSIO_GET_QSET_NUM:{
2309 struct ch_reg edata;
2311 memset(&edata, 0, sizeof(struct ch_reg));
2313 edata.cmd = CHELSIO_GET_QSET_NUM;
2314 edata.val = pi->nqsets;
2315 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2319 case CHELSIO_LOAD_FW:{
2321 struct ch_mem_range t;
2323 if (!capable(CAP_SYS_RAWIO))
2325 if (copy_from_user(&t, useraddr, sizeof(t)))
2327 /* Check t.len sanity ? */
2328 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2329 if (IS_ERR(fw_data))
2330 return PTR_ERR(fw_data);
2332 ret = t3_load_fw(adapter, fw_data, t.len);
2338 case CHELSIO_SETMTUTAB:{
2342 if (!is_offload(adapter))
2344 if (!capable(CAP_NET_ADMIN))
2346 if (offload_running(adapter))
2348 if (copy_from_user(&m, useraddr, sizeof(m)))
2350 if (m.nmtus != NMTUS)
2352 if (m.mtus[0] < 81) /* accommodate SACK */
2355 /* MTUs must be in ascending order */
2356 for (i = 1; i < NMTUS; ++i)
2357 if (m.mtus[i] < m.mtus[i - 1])
2360 memcpy(adapter->params.mtus, m.mtus,
2361 sizeof(adapter->params.mtus));
2364 case CHELSIO_GET_PM:{
2365 struct tp_params *p = &adapter->params.tp;
2366 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2368 if (!is_offload(adapter))
2370 m.tx_pg_sz = p->tx_pg_size;
2371 m.tx_num_pg = p->tx_num_pgs;
2372 m.rx_pg_sz = p->rx_pg_size;
2373 m.rx_num_pg = p->rx_num_pgs;
2374 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2375 if (copy_to_user(useraddr, &m, sizeof(m)))
2379 case CHELSIO_SET_PM:{
2381 struct tp_params *p = &adapter->params.tp;
2383 if (!is_offload(adapter))
2385 if (!capable(CAP_NET_ADMIN))
2387 if (adapter->flags & FULL_INIT_DONE)
2389 if (copy_from_user(&m, useraddr, sizeof(m)))
2391 if (!is_power_of_2(m.rx_pg_sz) ||
2392 !is_power_of_2(m.tx_pg_sz))
2393 return -EINVAL; /* not power of 2 */
2394 if (!(m.rx_pg_sz & 0x14000))
2395 return -EINVAL; /* not 16KB or 64KB */
2396 if (!(m.tx_pg_sz & 0x1554000))
2398 if (m.tx_num_pg == -1)
2399 m.tx_num_pg = p->tx_num_pgs;
2400 if (m.rx_num_pg == -1)
2401 m.rx_num_pg = p->rx_num_pgs;
2402 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2404 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2405 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2407 p->rx_pg_size = m.rx_pg_sz;
2408 p->tx_pg_size = m.tx_pg_sz;
2409 p->rx_num_pgs = m.rx_num_pg;
2410 p->tx_num_pgs = m.tx_num_pg;
2413 case CHELSIO_GET_MEM:{
2414 struct ch_mem_range t;
2418 if (!is_offload(adapter))
2420 if (!(adapter->flags & FULL_INIT_DONE))
2421 return -EIO; /* need the memory controllers */
2422 if (copy_from_user(&t, useraddr, sizeof(t)))
2424 if ((t.addr & 7) || (t.len & 7))
2426 if (t.mem_id == MEM_CM)
2428 else if (t.mem_id == MEM_PMRX)
2429 mem = &adapter->pmrx;
2430 else if (t.mem_id == MEM_PMTX)
2431 mem = &adapter->pmtx;
2437 * bits 0..9: chip version
2438 * bits 10..15: chip revision
2440 t.version = 3 | (adapter->params.rev << 10);
2441 if (copy_to_user(useraddr, &t, sizeof(t)))
2445 * Read 256 bytes at a time as len can be large and we don't
2446 * want to use huge intermediate buffers.
2448 useraddr += sizeof(t); /* advance to start of buffer */
2450 unsigned int chunk =
2451 min_t(unsigned int, t.len, sizeof(buf));
2454 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2458 if (copy_to_user(useraddr, buf, chunk))
2466 case CHELSIO_SET_TRACE_FILTER:{
2468 const struct trace_params *tp;
2470 if (!capable(CAP_NET_ADMIN))
2472 if (!offload_running(adapter))
2474 if (copy_from_user(&t, useraddr, sizeof(t)))
2477 tp = (const struct trace_params *)&t.sip;
2479 t3_config_trace_filter(adapter, tp, 0,
2483 t3_config_trace_filter(adapter, tp, 1,
2494 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2496 struct mii_ioctl_data *data = if_mii(req);
2497 struct port_info *pi = netdev_priv(dev);
2498 struct adapter *adapter = pi->adapter;
2503 /* Convert phy_id from older PRTAD/DEVAD format */
2504 if (is_10G(adapter) &&
2505 !mdio_phy_id_is_c45(data->phy_id) &&
2506 (data->phy_id & 0x1f00) &&
2507 !(data->phy_id & 0xe0e0))
2508 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2509 data->phy_id & 0x1f);
2512 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2514 return cxgb_extension_ioctl(dev, req->ifr_data);
2520 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2522 struct port_info *pi = netdev_priv(dev);
2523 struct adapter *adapter = pi->adapter;
2526 if (new_mtu < 81) /* accommodate SACK */
2528 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2531 init_port_mtus(adapter);
2532 if (adapter->params.rev == 0 && offload_running(adapter))
2533 t3_load_mtus(adapter, adapter->params.mtus,
2534 adapter->params.a_wnd, adapter->params.b_wnd,
2535 adapter->port[0]->mtu);
2539 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2541 struct port_info *pi = netdev_priv(dev);
2542 struct adapter *adapter = pi->adapter;
2543 struct sockaddr *addr = p;
2545 if (!is_valid_ether_addr(addr->sa_data))
2548 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2549 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2550 if (offload_running(adapter))
2551 write_smt_entry(adapter, pi->port_id);
2556 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2557 * @adap: the adapter
2560 * Ensures that current Rx processing on any of the queues associated with
2561 * the given port completes before returning. We do this by acquiring and
2562 * releasing the locks of the response queues associated with the port.
2564 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2568 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2569 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2571 spin_lock_irq(&q->lock);
2572 spin_unlock_irq(&q->lock);
2576 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2578 struct port_info *pi = netdev_priv(dev);
2579 struct adapter *adapter = pi->adapter;
2582 if (adapter->params.rev > 0)
2583 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2585 /* single control for all ports */
2586 unsigned int i, have_vlans = 0;
2587 for_each_port(adapter, i)
2588 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2590 t3_set_vlan_accel(adapter, 1, have_vlans);
2592 t3_synchronize_rx(adapter, pi);
2595 #ifdef CONFIG_NET_POLL_CONTROLLER
2596 static void cxgb_netpoll(struct net_device *dev)
2598 struct port_info *pi = netdev_priv(dev);
2599 struct adapter *adapter = pi->adapter;
2602 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2603 struct sge_qset *qs = &adapter->sge.qs[qidx];
2606 if (adapter->flags & USING_MSIX)
2611 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2617 * Periodic accumulation of MAC statistics.
2619 static void mac_stats_update(struct adapter *adapter)
2623 for_each_port(adapter, i) {
2624 struct net_device *dev = adapter->port[i];
2625 struct port_info *p = netdev_priv(dev);
2627 if (netif_running(dev)) {
2628 spin_lock(&adapter->stats_lock);
2629 t3_mac_update_stats(&p->mac);
2630 spin_unlock(&adapter->stats_lock);
2635 static void check_link_status(struct adapter *adapter)
2639 for_each_port(adapter, i) {
2640 struct net_device *dev = adapter->port[i];
2641 struct port_info *p = netdev_priv(dev);
2644 spin_lock_irq(&adapter->work_lock);
2645 link_fault = p->link_fault;
2646 spin_unlock_irq(&adapter->work_lock);
2649 t3_link_fault(adapter, i);
2653 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2654 t3_xgm_intr_disable(adapter, i);
2655 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2657 t3_link_changed(adapter, i);
2658 t3_xgm_intr_enable(adapter, i);
2663 static void check_t3b2_mac(struct adapter *adapter)
2667 if (!rtnl_trylock()) /* synchronize with ifdown */
2670 for_each_port(adapter, i) {
2671 struct net_device *dev = adapter->port[i];
2672 struct port_info *p = netdev_priv(dev);
2675 if (!netif_running(dev))
2679 if (netif_running(dev) && netif_carrier_ok(dev))
2680 status = t3b2_mac_watchdog_task(&p->mac);
2682 p->mac.stats.num_toggled++;
2683 else if (status == 2) {
2684 struct cmac *mac = &p->mac;
2686 t3_mac_set_mtu(mac, dev->mtu);
2687 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2688 cxgb_set_rxmode(dev);
2689 t3_link_start(&p->phy, mac, &p->link_config);
2690 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2691 t3_port_intr_enable(adapter, p->port_id);
2692 p->mac.stats.num_resets++;
2699 static void t3_adap_check_task(struct work_struct *work)
2701 struct adapter *adapter = container_of(work, struct adapter,
2702 adap_check_task.work);
2703 const struct adapter_params *p = &adapter->params;
2705 unsigned int v, status, reset;
2707 adapter->check_task_cnt++;
2709 check_link_status(adapter);
2711 /* Accumulate MAC stats if needed */
2712 if (!p->linkpoll_period ||
2713 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2714 p->stats_update_period) {
2715 mac_stats_update(adapter);
2716 adapter->check_task_cnt = 0;
2719 if (p->rev == T3_REV_B2)
2720 check_t3b2_mac(adapter);
2723 * Scan the XGMAC's to check for various conditions which we want to
2724 * monitor in a periodic polling manner rather than via an interrupt
2725 * condition. This is used for conditions which would otherwise flood
2726 * the system with interrupts and we only really need to know that the
2727 * conditions are "happening" ... For each condition we count the
2728 * detection of the condition and reset it for the next polling loop.
2730 for_each_port(adapter, port) {
2731 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2734 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2736 if (cause & F_RXFIFO_OVERFLOW) {
2737 mac->stats.rx_fifo_ovfl++;
2738 reset |= F_RXFIFO_OVERFLOW;
2741 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2745 * We do the same as above for FL_EMPTY interrupts.
2747 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2750 if (status & F_FLEMPTY) {
2751 struct sge_qset *qs = &adapter->sge.qs[0];
2756 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2760 qs->fl[i].empty += (v & 1);
2768 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2770 /* Schedule the next check update if any port is active. */
2771 spin_lock_irq(&adapter->work_lock);
2772 if (adapter->open_device_map & PORT_MASK)
2773 schedule_chk_task(adapter);
2774 spin_unlock_irq(&adapter->work_lock);
2777 static void db_full_task(struct work_struct *work)
2779 struct adapter *adapter = container_of(work, struct adapter,
2782 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2785 static void db_empty_task(struct work_struct *work)
2787 struct adapter *adapter = container_of(work, struct adapter,
2790 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2793 static void db_drop_task(struct work_struct *work)
2795 struct adapter *adapter = container_of(work, struct adapter,
2797 unsigned long delay = 1000;
2800 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2803 * Sleep a while before ringing the driver qset dbs.
2804 * The delay is between 1000-2023 usecs.
2806 get_random_bytes(&r, 2);
2808 set_current_state(TASK_UNINTERRUPTIBLE);
2809 schedule_timeout(usecs_to_jiffies(delay));
2814 * Processes external (PHY) interrupts in process context.
2816 static void ext_intr_task(struct work_struct *work)
2818 struct adapter *adapter = container_of(work, struct adapter,
2819 ext_intr_handler_task);
2822 /* Disable link fault interrupts */
2823 for_each_port(adapter, i) {
2824 struct net_device *dev = adapter->port[i];
2825 struct port_info *p = netdev_priv(dev);
2827 t3_xgm_intr_disable(adapter, i);
2828 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2831 /* Re-enable link fault interrupts */
2832 t3_phy_intr_handler(adapter);
2834 for_each_port(adapter, i)
2835 t3_xgm_intr_enable(adapter, i);
2837 /* Now reenable external interrupts */
2838 spin_lock_irq(&adapter->work_lock);
2839 if (adapter->slow_intr_mask) {
2840 adapter->slow_intr_mask |= F_T3DBG;
2841 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2842 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2843 adapter->slow_intr_mask);
2845 spin_unlock_irq(&adapter->work_lock);
2849 * Interrupt-context handler for external (PHY) interrupts.
2851 void t3_os_ext_intr_handler(struct adapter *adapter)
2854 * Schedule a task to handle external interrupts as they may be slow
2855 * and we use a mutex to protect MDIO registers. We disable PHY
2856 * interrupts in the meantime and let the task reenable them when
2859 spin_lock(&adapter->work_lock);
2860 if (adapter->slow_intr_mask) {
2861 adapter->slow_intr_mask &= ~F_T3DBG;
2862 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2863 adapter->slow_intr_mask);
2864 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2866 spin_unlock(&adapter->work_lock);
2869 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2871 struct net_device *netdev = adapter->port[port_id];
2872 struct port_info *pi = netdev_priv(netdev);
2874 spin_lock(&adapter->work_lock);
2876 spin_unlock(&adapter->work_lock);
2879 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2883 if (is_offload(adapter) &&
2884 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2885 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2886 offload_close(&adapter->tdev);
2889 /* Stop all ports */
2890 for_each_port(adapter, i) {
2891 struct net_device *netdev = adapter->port[i];
2893 if (netif_running(netdev))
2894 __cxgb_close(netdev, on_wq);
2897 /* Stop SGE timers */
2898 t3_stop_sge_timers(adapter);
2900 adapter->flags &= ~FULL_INIT_DONE;
2903 ret = t3_reset_adapter(adapter);
2905 pci_disable_device(adapter->pdev);
2910 static int t3_reenable_adapter(struct adapter *adapter)
2912 if (pci_enable_device(adapter->pdev)) {
2913 dev_err(&adapter->pdev->dev,
2914 "Cannot re-enable PCI device after reset.\n");
2917 pci_set_master(adapter->pdev);
2918 pci_restore_state(adapter->pdev);
2919 pci_save_state(adapter->pdev);
2921 /* Free sge resources */
2922 t3_free_sge_resources(adapter);
2924 if (t3_replay_prep_adapter(adapter))
2932 static void t3_resume_ports(struct adapter *adapter)
2936 /* Restart the ports */
2937 for_each_port(adapter, i) {
2938 struct net_device *netdev = adapter->port[i];
2940 if (netif_running(netdev)) {
2941 if (cxgb_open(netdev)) {
2942 dev_err(&adapter->pdev->dev,
2943 "can't bring device back up"
2950 if (is_offload(adapter) && !ofld_disable)
2951 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2955 * processes a fatal error.
2956 * Bring the ports down, reset the chip, bring the ports back up.
2958 static void fatal_error_task(struct work_struct *work)
2960 struct adapter *adapter = container_of(work, struct adapter,
2961 fatal_error_handler_task);
2965 err = t3_adapter_error(adapter, 1, 1);
2967 err = t3_reenable_adapter(adapter);
2969 t3_resume_ports(adapter);
2971 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2975 void t3_fatal_err(struct adapter *adapter)
2977 unsigned int fw_status[4];
2979 if (adapter->flags & FULL_INIT_DONE) {
2980 t3_sge_stop(adapter);
2981 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2982 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2983 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2984 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2986 spin_lock(&adapter->work_lock);
2987 t3_intr_disable(adapter);
2988 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2989 spin_unlock(&adapter->work_lock);
2991 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2992 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2993 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2994 fw_status[0], fw_status[1],
2995 fw_status[2], fw_status[3]);
2999 * t3_io_error_detected - called when PCI error is detected
3000 * @pdev: Pointer to PCI device
3001 * @state: The current pci connection state
3003 * This function is called after a PCI bus error affecting
3004 * this device has been detected.
3006 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3007 pci_channel_state_t state)
3009 struct adapter *adapter = pci_get_drvdata(pdev);
3011 if (state == pci_channel_io_perm_failure)
3012 return PCI_ERS_RESULT_DISCONNECT;
3014 t3_adapter_error(adapter, 0, 0);
3016 /* Request a slot reset. */
3017 return PCI_ERS_RESULT_NEED_RESET;
3021 * t3_io_slot_reset - called after the pci bus has been reset.
3022 * @pdev: Pointer to PCI device
3024 * Restart the card from scratch, as if from a cold-boot.
3026 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3028 struct adapter *adapter = pci_get_drvdata(pdev);
3030 if (!t3_reenable_adapter(adapter))
3031 return PCI_ERS_RESULT_RECOVERED;
3033 return PCI_ERS_RESULT_DISCONNECT;
3037 * t3_io_resume - called when traffic can start flowing again.
3038 * @pdev: Pointer to PCI device
3040 * This callback is called when the error recovery driver tells us that
3041 * its OK to resume normal operation.
3043 static void t3_io_resume(struct pci_dev *pdev)
3045 struct adapter *adapter = pci_get_drvdata(pdev);
3047 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3048 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3050 t3_resume_ports(adapter);
3053 static struct pci_error_handlers t3_err_handler = {
3054 .error_detected = t3_io_error_detected,
3055 .slot_reset = t3_io_slot_reset,
3056 .resume = t3_io_resume,
3060 * Set the number of qsets based on the number of CPUs and the number of ports,
3061 * not to exceed the number of available qsets, assuming there are enough qsets
3064 static void set_nqsets(struct adapter *adap)
3067 int num_cpus = num_online_cpus();
3068 int hwports = adap->params.nports;
3069 int nqsets = adap->msix_nvectors - 1;
3071 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3073 (hwports * nqsets > SGE_QSETS ||
3074 num_cpus >= nqsets / hwports))
3076 if (nqsets > num_cpus)
3078 if (nqsets < 1 || hwports == 4)
3083 for_each_port(adap, i) {
3084 struct port_info *pi = adap2pinfo(adap, i);
3087 pi->nqsets = nqsets;
3088 j = pi->first_qset + nqsets;
3090 dev_info(&adap->pdev->dev,
3091 "Port %d using %d queue sets.\n", i, nqsets);
3095 static int __devinit cxgb_enable_msix(struct adapter *adap)
3097 struct msix_entry entries[SGE_QSETS + 1];
3101 vectors = ARRAY_SIZE(entries);
3102 for (i = 0; i < vectors; ++i)
3103 entries[i].entry = i;
3105 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3109 pci_disable_msix(adap->pdev);
3111 if (!err && vectors < (adap->params.nports + 1)) {
3112 pci_disable_msix(adap->pdev);
3117 for (i = 0; i < vectors; ++i)
3118 adap->msix_info[i].vec = entries[i].vector;
3119 adap->msix_nvectors = vectors;
3125 static void __devinit print_port_info(struct adapter *adap,
3126 const struct adapter_info *ai)
3128 static const char *pci_variant[] = {
3129 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3136 snprintf(buf, sizeof(buf), "%s x%d",
3137 pci_variant[adap->params.pci.variant],
3138 adap->params.pci.width);
3140 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3141 pci_variant[adap->params.pci.variant],
3142 adap->params.pci.speed, adap->params.pci.width);
3144 for_each_port(adap, i) {
3145 struct net_device *dev = adap->port[i];
3146 const struct port_info *pi = netdev_priv(dev);
3148 if (!test_bit(i, &adap->registered_device_map))
3150 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3151 dev->name, ai->desc, pi->phy.desc,
3152 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3153 (adap->flags & USING_MSIX) ? " MSI-X" :
3154 (adap->flags & USING_MSI) ? " MSI" : "");
3155 if (adap->name == dev->name && adap->params.vpd.mclk)
3157 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3158 adap->name, t3_mc7_size(&adap->cm) >> 20,
3159 t3_mc7_size(&adap->pmtx) >> 20,
3160 t3_mc7_size(&adap->pmrx) >> 20,
3161 adap->params.vpd.sn);
3165 static const struct net_device_ops cxgb_netdev_ops = {
3166 .ndo_open = cxgb_open,
3167 .ndo_stop = cxgb_close,
3168 .ndo_start_xmit = t3_eth_xmit,
3169 .ndo_get_stats = cxgb_get_stats,
3170 .ndo_validate_addr = eth_validate_addr,
3171 .ndo_set_multicast_list = cxgb_set_rxmode,
3172 .ndo_do_ioctl = cxgb_ioctl,
3173 .ndo_change_mtu = cxgb_change_mtu,
3174 .ndo_set_mac_address = cxgb_set_mac_addr,
3175 .ndo_vlan_rx_register = vlan_rx_register,
3176 #ifdef CONFIG_NET_POLL_CONTROLLER
3177 .ndo_poll_controller = cxgb_netpoll,
3181 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3183 struct port_info *pi = netdev_priv(dev);
3185 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3186 pi->iscsic.mac_addr[3] |= 0x80;
3189 static int __devinit init_one(struct pci_dev *pdev,
3190 const struct pci_device_id *ent)
3192 static int version_printed;
3194 int i, err, pci_using_dac = 0;
3195 resource_size_t mmio_start, mmio_len;
3196 const struct adapter_info *ai;
3197 struct adapter *adapter = NULL;
3198 struct port_info *pi;
3200 if (!version_printed) {
3201 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3206 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3208 printk(KERN_ERR DRV_NAME
3209 ": cannot initialize work queue\n");
3214 err = pci_enable_device(pdev);
3216 dev_err(&pdev->dev, "cannot enable PCI device\n");
3220 err = pci_request_regions(pdev, DRV_NAME);
3222 /* Just info, some other driver may have claimed the device. */
3223 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3224 goto out_disable_device;
3227 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3229 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3231 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3232 "coherent allocations\n");
3233 goto out_release_regions;
3235 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3236 dev_err(&pdev->dev, "no usable DMA configuration\n");
3237 goto out_release_regions;
3240 pci_set_master(pdev);
3241 pci_save_state(pdev);
3243 mmio_start = pci_resource_start(pdev, 0);
3244 mmio_len = pci_resource_len(pdev, 0);
3245 ai = t3_get_adapter_info(ent->driver_data);
3247 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3250 goto out_release_regions;
3253 adapter->nofail_skb =
3254 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3255 if (!adapter->nofail_skb) {
3256 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3258 goto out_free_adapter;
3261 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3262 if (!adapter->regs) {
3263 dev_err(&pdev->dev, "cannot map device registers\n");
3265 goto out_free_adapter;
3268 adapter->pdev = pdev;
3269 adapter->name = pci_name(pdev);
3270 adapter->msg_enable = dflt_msg_enable;
3271 adapter->mmio_len = mmio_len;
3273 mutex_init(&adapter->mdio_lock);
3274 spin_lock_init(&adapter->work_lock);
3275 spin_lock_init(&adapter->stats_lock);
3277 INIT_LIST_HEAD(&adapter->adapter_list);
3278 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3279 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3281 INIT_WORK(&adapter->db_full_task, db_full_task);
3282 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3283 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3285 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3287 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3288 struct net_device *netdev;
3290 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3296 SET_NETDEV_DEV(netdev, &pdev->dev);
3298 adapter->port[i] = netdev;
3299 pi = netdev_priv(netdev);
3300 pi->adapter = adapter;
3301 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3303 netif_carrier_off(netdev);
3304 netdev->irq = pdev->irq;
3305 netdev->mem_start = mmio_start;
3306 netdev->mem_end = mmio_start + mmio_len - 1;
3307 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3308 netdev->features |= NETIF_F_GRO;
3310 netdev->features |= NETIF_F_HIGHDMA;
3312 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3313 netdev->netdev_ops = &cxgb_netdev_ops;
3314 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3317 pci_set_drvdata(pdev, adapter);
3318 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3324 * The card is now ready to go. If any errors occur during device
3325 * registration we do not fail the whole card but rather proceed only
3326 * with the ports we manage to register successfully. However we must
3327 * register at least one net device.
3329 for_each_port(adapter, i) {
3330 err = register_netdev(adapter->port[i]);
3332 dev_warn(&pdev->dev,
3333 "cannot register net device %s, skipping\n",
3334 adapter->port[i]->name);
3337 * Change the name we use for messages to the name of
3338 * the first successfully registered interface.
3340 if (!adapter->registered_device_map)
3341 adapter->name = adapter->port[i]->name;
3343 __set_bit(i, &adapter->registered_device_map);
3346 if (!adapter->registered_device_map) {
3347 dev_err(&pdev->dev, "could not register any net devices\n");
3351 for_each_port(adapter, i)
3352 cxgb3_init_iscsi_mac(adapter->port[i]);
3354 /* Driver's ready. Reflect it on LEDs */
3355 t3_led_ready(adapter);
3357 if (is_offload(adapter)) {
3358 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3359 cxgb3_adapter_ofld(adapter);
3362 /* See what interrupts we'll be using */
3363 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3364 adapter->flags |= USING_MSIX;
3365 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3366 adapter->flags |= USING_MSI;
3368 set_nqsets(adapter);
3370 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3373 print_port_info(adapter, ai);
3377 iounmap(adapter->regs);
3378 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3379 if (adapter->port[i])
3380 free_netdev(adapter->port[i]);
3385 out_release_regions:
3386 pci_release_regions(pdev);
3388 pci_disable_device(pdev);
3389 pci_set_drvdata(pdev, NULL);
3394 static void __devexit remove_one(struct pci_dev *pdev)
3396 struct adapter *adapter = pci_get_drvdata(pdev);
3401 t3_sge_stop(adapter);
3402 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3405 if (is_offload(adapter)) {
3406 cxgb3_adapter_unofld(adapter);
3407 if (test_bit(OFFLOAD_DEVMAP_BIT,
3408 &adapter->open_device_map))
3409 offload_close(&adapter->tdev);
3412 for_each_port(adapter, i)
3413 if (test_bit(i, &adapter->registered_device_map))
3414 unregister_netdev(adapter->port[i]);
3416 t3_stop_sge_timers(adapter);
3417 t3_free_sge_resources(adapter);
3418 cxgb_disable_msi(adapter);
3420 for_each_port(adapter, i)
3421 if (adapter->port[i])
3422 free_netdev(adapter->port[i]);
3424 iounmap(adapter->regs);
3425 if (adapter->nofail_skb)
3426 kfree_skb(adapter->nofail_skb);
3428 pci_release_regions(pdev);
3429 pci_disable_device(pdev);
3430 pci_set_drvdata(pdev, NULL);
3434 static struct pci_driver driver = {
3436 .id_table = cxgb3_pci_tbl,
3438 .remove = __devexit_p(remove_one),
3439 .err_handler = &t3_err_handler,
3442 static int __init cxgb3_init_module(void)
3446 cxgb3_offload_init();
3448 ret = pci_register_driver(&driver);
3452 static void __exit cxgb3_cleanup_module(void)
3454 pci_unregister_driver(&driver);
3456 destroy_workqueue(cxgb3_wq);
3459 module_init(cxgb3_init_module);
3460 module_exit(cxgb3_cleanup_module);