2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
322 static int init_tp_parity(struct adapter *adap)
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
329 t3_tp_set_offload_mode(adap, 1);
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
340 t3_mgmt_tx(adap, skb);
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
381 * setup_rss - configure RSS
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
391 static void setup_rss(struct adapter *adap)
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
399 for (i = 0; i < SGE_QSETS; ++i)
401 cpus[SGE_QSETS] = 0xff; /* terminator */
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
413 static void init_napi(struct adapter *adap)
417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
426 * netif_napi_add() can be called only once per napi_struct because it
427 * adds each new napi_struct to a list. Be careful not to call it a
428 * second time, e.g., during EEH recovery, by making a note of it.
430 adap->flags |= NAPI_INIT;
434 * Wait until all NAPI handlers are descheduled. This includes the handlers of
435 * both netdevices representing interfaces and the dummy ones for the extra
438 static void quiesce_rx(struct adapter *adap)
442 for (i = 0; i < SGE_QSETS; i++)
443 if (adap->sge.qs[i].adap)
444 napi_disable(&adap->sge.qs[i].napi);
447 static void enable_all_napi(struct adapter *adap)
450 for (i = 0; i < SGE_QSETS; i++)
451 if (adap->sge.qs[i].adap)
452 napi_enable(&adap->sge.qs[i].napi);
456 * setup_sge_qsets - configure SGE Tx/Rx/response queues
459 * Determines how many sets of SGE queues to use and initializes them.
460 * We support multiple queue sets per port if we have MSI-X, otherwise
461 * just one queue set per port.
463 static int setup_sge_qsets(struct adapter *adap)
465 int i, j, err, irq_idx = 0, qset_idx = 0;
466 unsigned int ntxq = SGE_TXQ_PER_SET;
468 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
471 for_each_port(adap, i) {
472 struct net_device *dev = adap->port[i];
473 struct port_info *pi = netdev_priv(dev);
475 pi->qs = &adap->sge.qs[pi->first_qset];
476 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
477 err = t3_sge_alloc_qset(adap, qset_idx, 1,
478 (adap->flags & USING_MSIX) ? qset_idx + 1 :
480 &adap->params.sge.qset[qset_idx], ntxq, dev);
482 t3_stop_sge_timers(adap);
483 t3_free_sge_resources(adap);
492 static ssize_t attr_show(struct device *d, char *buf,
493 ssize_t(*format) (struct net_device *, char *))
497 /* Synchronize with ioctls that may shut down the device */
499 len = (*format) (to_net_dev(d), buf);
504 static ssize_t attr_store(struct device *d,
505 const char *buf, size_t len,
506 ssize_t(*set) (struct net_device *, unsigned int),
507 unsigned int min_val, unsigned int max_val)
513 if (!capable(CAP_NET_ADMIN))
516 val = simple_strtoul(buf, &endp, 0);
517 if (endp == buf || val < min_val || val > max_val)
521 ret = (*set) (to_net_dev(d), val);
528 #define CXGB3_SHOW(name, val_expr) \
529 static ssize_t format_##name(struct net_device *dev, char *buf) \
531 struct port_info *pi = netdev_priv(dev); \
532 struct adapter *adap = pi->adapter; \
533 return sprintf(buf, "%u\n", val_expr); \
535 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
538 return attr_show(d, buf, format_##name); \
541 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
543 struct port_info *pi = netdev_priv(dev);
544 struct adapter *adap = pi->adapter;
545 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
547 if (adap->flags & FULL_INIT_DONE)
549 if (val && adap->params.rev == 0)
551 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
554 adap->params.mc5.nfilters = val;
558 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
559 const char *buf, size_t len)
561 return attr_store(d, buf, len, set_nfilters, 0, ~0);
564 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
566 struct port_info *pi = netdev_priv(dev);
567 struct adapter *adap = pi->adapter;
569 if (adap->flags & FULL_INIT_DONE)
571 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
574 adap->params.mc5.nservers = val;
578 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
579 const char *buf, size_t len)
581 return attr_store(d, buf, len, set_nservers, 0, ~0);
584 #define CXGB3_ATTR_R(name, val_expr) \
585 CXGB3_SHOW(name, val_expr) \
586 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
588 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
589 CXGB3_SHOW(name, val_expr) \
590 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
592 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
593 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
594 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
596 static struct attribute *cxgb3_attrs[] = {
597 &dev_attr_cam_size.attr,
598 &dev_attr_nfilters.attr,
599 &dev_attr_nservers.attr,
603 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
605 static ssize_t tm_attr_show(struct device *d,
606 char *buf, int sched)
608 struct port_info *pi = netdev_priv(to_net_dev(d));
609 struct adapter *adap = pi->adapter;
610 unsigned int v, addr, bpt, cpt;
613 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
615 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
616 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
619 bpt = (v >> 8) & 0xff;
622 len = sprintf(buf, "disabled\n");
624 v = (adap->params.vpd.cclk * 1000) / cpt;
625 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
631 static ssize_t tm_attr_store(struct device *d,
632 const char *buf, size_t len, int sched)
634 struct port_info *pi = netdev_priv(to_net_dev(d));
635 struct adapter *adap = pi->adapter;
640 if (!capable(CAP_NET_ADMIN))
643 val = simple_strtoul(buf, &endp, 0);
644 if (endp == buf || val > 10000000)
648 ret = t3_config_sched(adap, val, sched);
655 #define TM_ATTR(name, sched) \
656 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
659 return tm_attr_show(d, buf, sched); \
661 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
662 const char *buf, size_t len) \
664 return tm_attr_store(d, buf, len, sched); \
666 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
677 static struct attribute *offload_attrs[] = {
678 &dev_attr_sched0.attr,
679 &dev_attr_sched1.attr,
680 &dev_attr_sched2.attr,
681 &dev_attr_sched3.attr,
682 &dev_attr_sched4.attr,
683 &dev_attr_sched5.attr,
684 &dev_attr_sched6.attr,
685 &dev_attr_sched7.attr,
689 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
692 * Sends an sk_buff to an offload queue driver
693 * after dealing with any active network taps.
695 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
700 ret = t3_offload_tx(tdev, skb);
705 static int write_smt_entry(struct adapter *adapter, int idx)
707 struct cpl_smt_write_req *req;
708 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
713 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
714 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
715 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
716 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
718 memset(req->src_mac1, 0, sizeof(req->src_mac1));
719 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
721 offload_tx(&adapter->tdev, skb);
725 static int init_smt(struct adapter *adapter)
729 for_each_port(adapter, i)
730 write_smt_entry(adapter, i);
734 static void init_port_mtus(struct adapter *adapter)
736 unsigned int mtus = adapter->port[0]->mtu;
738 if (adapter->port[1])
739 mtus |= adapter->port[1]->mtu << 16;
740 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
743 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
747 struct mngt_pktsched_wr *req;
749 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
750 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
751 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
752 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
758 t3_mgmt_tx(adap, skb);
761 static void bind_qsets(struct adapter *adap)
765 for_each_port(adap, i) {
766 const struct port_info *pi = adap2pinfo(adap, i);
768 for (j = 0; j < pi->nqsets; ++j)
769 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
774 #define FW_FNAME "t3fw-%d.%d.%d.bin"
775 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
777 static int upgrade_fw(struct adapter *adap)
781 const struct firmware *fw;
782 struct device *dev = &adap->pdev->dev;
784 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
785 FW_VERSION_MINOR, FW_VERSION_MICRO);
786 ret = request_firmware(&fw, buf, dev);
788 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
792 ret = t3_load_fw(adap, fw->data, fw->size);
793 release_firmware(fw);
796 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
797 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
799 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
800 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
805 static inline char t3rev2char(struct adapter *adapter)
809 switch(adapter->params.rev) {
821 static int update_tpsram(struct adapter *adap)
823 const struct firmware *tpsram;
825 struct device *dev = &adap->pdev->dev;
829 rev = t3rev2char(adap);
833 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
834 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
836 ret = request_firmware(&tpsram, buf, dev);
838 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
843 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
847 ret = t3_set_proto_sram(adap, tpsram->data);
850 "successful update of protocol engine "
852 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
854 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
855 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
857 dev_err(dev, "loading protocol SRAM failed\n");
860 release_firmware(tpsram);
866 * cxgb_up - enable the adapter
867 * @adapter: adapter being enabled
869 * Called when the first port is enabled, this function performs the
870 * actions necessary to make an adapter operational, such as completing
871 * the initialization of HW modules, and enabling interrupts.
873 * Must be called with the rtnl lock held.
875 static int cxgb_up(struct adapter *adap)
880 if (!(adap->flags & FULL_INIT_DONE)) {
881 err = t3_check_fw_version(adap, &must_load);
882 if (err == -EINVAL) {
883 err = upgrade_fw(adap);
884 if (err && must_load)
888 err = t3_check_tpsram_version(adap, &must_load);
889 if (err == -EINVAL) {
890 err = update_tpsram(adap);
891 if (err && must_load)
896 * Clear interrupts now to catch errors if t3_init_hw fails.
897 * We clear them again later as initialization may trigger
898 * conditions that can interrupt.
902 err = t3_init_hw(adap, 0);
906 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
907 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
909 err = setup_sge_qsets(adap);
914 if (!(adap->flags & NAPI_INIT))
916 adap->flags |= FULL_INIT_DONE;
921 if (adap->flags & USING_MSIX) {
922 name_msix_vecs(adap);
923 err = request_irq(adap->msix_info[0].vec,
924 t3_async_intr_handler, 0,
925 adap->msix_info[0].desc, adap);
929 err = request_msix_data_irqs(adap);
931 free_irq(adap->msix_info[0].vec, adap);
934 } else if ((err = request_irq(adap->pdev->irq,
935 t3_intr_handler(adap,
936 adap->sge.qs[0].rspq.
938 (adap->flags & USING_MSI) ?
943 enable_all_napi(adap);
945 t3_intr_enable(adap);
947 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
948 is_offload(adap) && init_tp_parity(adap) == 0)
949 adap->flags |= TP_PARITY_INIT;
951 if (adap->flags & TP_PARITY_INIT) {
952 t3_write_reg(adap, A_TP_INT_CAUSE,
953 F_CMCACHEPERR | F_ARPLUTPERR);
954 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
957 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
959 adap->flags |= QUEUES_BOUND;
964 CH_ERR(adap, "request_irq failed, err %d\n", err);
969 * Release resources when all the ports and offloading have been stopped.
971 static void cxgb_down(struct adapter *adapter)
973 t3_sge_stop(adapter);
974 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
975 t3_intr_disable(adapter);
976 spin_unlock_irq(&adapter->work_lock);
978 if (adapter->flags & USING_MSIX) {
981 free_irq(adapter->msix_info[0].vec, adapter);
982 for_each_port(adapter, i)
983 n += adap2pinfo(adapter, i)->nqsets;
985 for (i = 0; i < n; ++i)
986 free_irq(adapter->msix_info[i + 1].vec,
987 &adapter->sge.qs[i]);
989 free_irq(adapter->pdev->irq, adapter);
991 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
995 static void schedule_chk_task(struct adapter *adap)
999 timeo = adap->params.linkpoll_period ?
1000 (HZ * adap->params.linkpoll_period) / 10 :
1001 adap->params.stats_update_period * HZ;
1003 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1006 static int offload_open(struct net_device *dev)
1008 struct port_info *pi = netdev_priv(dev);
1009 struct adapter *adapter = pi->adapter;
1010 struct t3cdev *tdev = dev2t3cdev(dev);
1011 int adap_up = adapter->open_device_map & PORT_MASK;
1014 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1017 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1020 t3_tp_set_offload_mode(adapter, 1);
1021 tdev->lldev = adapter->port[0];
1022 err = cxgb3_offload_activate(adapter);
1026 init_port_mtus(adapter);
1027 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1028 adapter->params.b_wnd,
1029 adapter->params.rev == 0 ?
1030 adapter->port[0]->mtu : 0xffff);
1033 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1034 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1036 /* Call back all registered clients */
1037 cxgb3_add_clients(tdev);
1040 /* restore them in case the offload module has changed them */
1042 t3_tp_set_offload_mode(adapter, 0);
1043 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1044 cxgb3_set_dummy_ops(tdev);
1049 static int offload_close(struct t3cdev *tdev)
1051 struct adapter *adapter = tdev2adap(tdev);
1053 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1056 /* Call back all registered clients */
1057 cxgb3_remove_clients(tdev);
1059 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1062 cxgb3_set_dummy_ops(tdev);
1063 t3_tp_set_offload_mode(adapter, 0);
1064 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1066 if (!adapter->open_device_map)
1069 cxgb3_offload_deactivate(adapter);
1073 static int cxgb_open(struct net_device *dev)
1075 struct port_info *pi = netdev_priv(dev);
1076 struct adapter *adapter = pi->adapter;
1077 int other_ports = adapter->open_device_map & PORT_MASK;
1080 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1083 set_bit(pi->port_id, &adapter->open_device_map);
1084 if (is_offload(adapter) && !ofld_disable) {
1085 err = offload_open(dev);
1088 "Could not initialize offload capabilities\n");
1092 t3_port_intr_enable(adapter, pi->port_id);
1093 netif_start_queue(dev);
1095 schedule_chk_task(adapter);
1100 static int cxgb_close(struct net_device *dev)
1102 struct port_info *pi = netdev_priv(dev);
1103 struct adapter *adapter = pi->adapter;
1105 t3_port_intr_disable(adapter, pi->port_id);
1106 netif_stop_queue(dev);
1107 pi->phy.ops->power_down(&pi->phy, 1);
1108 netif_carrier_off(dev);
1109 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1111 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1112 clear_bit(pi->port_id, &adapter->open_device_map);
1113 spin_unlock_irq(&adapter->work_lock);
1115 if (!(adapter->open_device_map & PORT_MASK))
1116 cancel_rearming_delayed_workqueue(cxgb3_wq,
1117 &adapter->adap_check_task);
1119 if (!adapter->open_device_map)
1125 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1127 struct port_info *pi = netdev_priv(dev);
1128 struct adapter *adapter = pi->adapter;
1129 struct net_device_stats *ns = &pi->netstats;
1130 const struct mac_stats *pstats;
1132 spin_lock(&adapter->stats_lock);
1133 pstats = t3_mac_update_stats(&pi->mac);
1134 spin_unlock(&adapter->stats_lock);
1136 ns->tx_bytes = pstats->tx_octets;
1137 ns->tx_packets = pstats->tx_frames;
1138 ns->rx_bytes = pstats->rx_octets;
1139 ns->rx_packets = pstats->rx_frames;
1140 ns->multicast = pstats->rx_mcast_frames;
1142 ns->tx_errors = pstats->tx_underrun;
1143 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1144 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1145 pstats->rx_fifo_ovfl;
1147 /* detailed rx_errors */
1148 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1149 ns->rx_over_errors = 0;
1150 ns->rx_crc_errors = pstats->rx_fcs_errs;
1151 ns->rx_frame_errors = pstats->rx_symbol_errs;
1152 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1153 ns->rx_missed_errors = pstats->rx_cong_drops;
1155 /* detailed tx_errors */
1156 ns->tx_aborted_errors = 0;
1157 ns->tx_carrier_errors = 0;
1158 ns->tx_fifo_errors = pstats->tx_underrun;
1159 ns->tx_heartbeat_errors = 0;
1160 ns->tx_window_errors = 0;
1164 static u32 get_msglevel(struct net_device *dev)
1166 struct port_info *pi = netdev_priv(dev);
1167 struct adapter *adapter = pi->adapter;
1169 return adapter->msg_enable;
1172 static void set_msglevel(struct net_device *dev, u32 val)
1174 struct port_info *pi = netdev_priv(dev);
1175 struct adapter *adapter = pi->adapter;
1177 adapter->msg_enable = val;
1180 static char stats_strings[][ETH_GSTRING_LEN] = {
1183 "TxMulticastFramesOK",
1184 "TxBroadcastFramesOK",
1191 "TxFrames128To255 ",
1192 "TxFrames256To511 ",
1193 "TxFrames512To1023 ",
1194 "TxFrames1024To1518 ",
1195 "TxFrames1519ToMax ",
1199 "RxMulticastFramesOK",
1200 "RxBroadcastFramesOK",
1211 "RxFrames128To255 ",
1212 "RxFrames256To511 ",
1213 "RxFrames512To1023 ",
1214 "RxFrames1024To1518 ",
1215 "RxFrames1519ToMax ",
1228 "CheckTXEnToggled ",
1233 static int get_sset_count(struct net_device *dev, int sset)
1237 return ARRAY_SIZE(stats_strings);
1243 #define T3_REGMAP_SIZE (3 * 1024)
1245 static int get_regs_len(struct net_device *dev)
1247 return T3_REGMAP_SIZE;
1250 static int get_eeprom_len(struct net_device *dev)
1255 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1257 struct port_info *pi = netdev_priv(dev);
1258 struct adapter *adapter = pi->adapter;
1262 t3_get_fw_version(adapter, &fw_vers);
1263 t3_get_tp_version(adapter, &tp_vers);
1265 strcpy(info->driver, DRV_NAME);
1266 strcpy(info->version, DRV_VERSION);
1267 strcpy(info->bus_info, pci_name(adapter->pdev));
1269 strcpy(info->fw_version, "N/A");
1271 snprintf(info->fw_version, sizeof(info->fw_version),
1272 "%s %u.%u.%u TP %u.%u.%u",
1273 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1274 G_FW_VERSION_MAJOR(fw_vers),
1275 G_FW_VERSION_MINOR(fw_vers),
1276 G_FW_VERSION_MICRO(fw_vers),
1277 G_TP_VERSION_MAJOR(tp_vers),
1278 G_TP_VERSION_MINOR(tp_vers),
1279 G_TP_VERSION_MICRO(tp_vers));
1283 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1285 if (stringset == ETH_SS_STATS)
1286 memcpy(data, stats_strings, sizeof(stats_strings));
1289 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1290 struct port_info *p, int idx)
1293 unsigned long tot = 0;
1295 for (i = 0; i < p->nqsets; ++i)
1296 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1300 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1303 struct port_info *pi = netdev_priv(dev);
1304 struct adapter *adapter = pi->adapter;
1305 const struct mac_stats *s;
1307 spin_lock(&adapter->stats_lock);
1308 s = t3_mac_update_stats(&pi->mac);
1309 spin_unlock(&adapter->stats_lock);
1311 *data++ = s->tx_octets;
1312 *data++ = s->tx_frames;
1313 *data++ = s->tx_mcast_frames;
1314 *data++ = s->tx_bcast_frames;
1315 *data++ = s->tx_pause;
1316 *data++ = s->tx_underrun;
1317 *data++ = s->tx_fifo_urun;
1319 *data++ = s->tx_frames_64;
1320 *data++ = s->tx_frames_65_127;
1321 *data++ = s->tx_frames_128_255;
1322 *data++ = s->tx_frames_256_511;
1323 *data++ = s->tx_frames_512_1023;
1324 *data++ = s->tx_frames_1024_1518;
1325 *data++ = s->tx_frames_1519_max;
1327 *data++ = s->rx_octets;
1328 *data++ = s->rx_frames;
1329 *data++ = s->rx_mcast_frames;
1330 *data++ = s->rx_bcast_frames;
1331 *data++ = s->rx_pause;
1332 *data++ = s->rx_fcs_errs;
1333 *data++ = s->rx_symbol_errs;
1334 *data++ = s->rx_short;
1335 *data++ = s->rx_jabber;
1336 *data++ = s->rx_too_long;
1337 *data++ = s->rx_fifo_ovfl;
1339 *data++ = s->rx_frames_64;
1340 *data++ = s->rx_frames_65_127;
1341 *data++ = s->rx_frames_128_255;
1342 *data++ = s->rx_frames_256_511;
1343 *data++ = s->rx_frames_512_1023;
1344 *data++ = s->rx_frames_1024_1518;
1345 *data++ = s->rx_frames_1519_max;
1347 *data++ = pi->phy.fifo_errors;
1349 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1350 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1351 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1352 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1353 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1354 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1355 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1356 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1357 *data++ = s->rx_cong_drops;
1359 *data++ = s->num_toggled;
1360 *data++ = s->num_resets;
1363 static inline void reg_block_dump(struct adapter *ap, void *buf,
1364 unsigned int start, unsigned int end)
1366 u32 *p = buf + start;
1368 for (; start <= end; start += sizeof(u32))
1369 *p++ = t3_read_reg(ap, start);
1372 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1375 struct port_info *pi = netdev_priv(dev);
1376 struct adapter *ap = pi->adapter;
1380 * bits 0..9: chip version
1381 * bits 10..15: chip revision
1382 * bit 31: set for PCIe cards
1384 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1387 * We skip the MAC statistics registers because they are clear-on-read.
1388 * Also reading multi-register stats would need to synchronize with the
1389 * periodic mac stats accumulation. Hard to justify the complexity.
1391 memset(buf, 0, T3_REGMAP_SIZE);
1392 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1393 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1394 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1395 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1396 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1397 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1398 XGM_REG(A_XGM_SERDES_STAT3, 1));
1399 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1400 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1403 static int restart_autoneg(struct net_device *dev)
1405 struct port_info *p = netdev_priv(dev);
1407 if (!netif_running(dev))
1409 if (p->link_config.autoneg != AUTONEG_ENABLE)
1411 p->phy.ops->autoneg_restart(&p->phy);
1415 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1417 struct port_info *pi = netdev_priv(dev);
1418 struct adapter *adapter = pi->adapter;
1424 for (i = 0; i < data * 2; i++) {
1425 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1426 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1427 if (msleep_interruptible(500))
1430 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1435 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1437 struct port_info *p = netdev_priv(dev);
1439 cmd->supported = p->link_config.supported;
1440 cmd->advertising = p->link_config.advertising;
1442 if (netif_carrier_ok(dev)) {
1443 cmd->speed = p->link_config.speed;
1444 cmd->duplex = p->link_config.duplex;
1450 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1451 cmd->phy_address = p->phy.addr;
1452 cmd->transceiver = XCVR_EXTERNAL;
1453 cmd->autoneg = p->link_config.autoneg;
1459 static int speed_duplex_to_caps(int speed, int duplex)
1465 if (duplex == DUPLEX_FULL)
1466 cap = SUPPORTED_10baseT_Full;
1468 cap = SUPPORTED_10baseT_Half;
1471 if (duplex == DUPLEX_FULL)
1472 cap = SUPPORTED_100baseT_Full;
1474 cap = SUPPORTED_100baseT_Half;
1477 if (duplex == DUPLEX_FULL)
1478 cap = SUPPORTED_1000baseT_Full;
1480 cap = SUPPORTED_1000baseT_Half;
1483 if (duplex == DUPLEX_FULL)
1484 cap = SUPPORTED_10000baseT_Full;
1489 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1490 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1491 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1492 ADVERTISED_10000baseT_Full)
1494 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1496 struct port_info *p = netdev_priv(dev);
1497 struct link_config *lc = &p->link_config;
1499 if (!(lc->supported & SUPPORTED_Autoneg))
1500 return -EOPNOTSUPP; /* can't change speed/duplex */
1502 if (cmd->autoneg == AUTONEG_DISABLE) {
1503 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1505 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1507 lc->requested_speed = cmd->speed;
1508 lc->requested_duplex = cmd->duplex;
1509 lc->advertising = 0;
1511 cmd->advertising &= ADVERTISED_MASK;
1512 cmd->advertising &= lc->supported;
1513 if (!cmd->advertising)
1515 lc->requested_speed = SPEED_INVALID;
1516 lc->requested_duplex = DUPLEX_INVALID;
1517 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1519 lc->autoneg = cmd->autoneg;
1520 if (netif_running(dev))
1521 t3_link_start(&p->phy, &p->mac, lc);
1525 static void get_pauseparam(struct net_device *dev,
1526 struct ethtool_pauseparam *epause)
1528 struct port_info *p = netdev_priv(dev);
1530 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1531 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1532 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1535 static int set_pauseparam(struct net_device *dev,
1536 struct ethtool_pauseparam *epause)
1538 struct port_info *p = netdev_priv(dev);
1539 struct link_config *lc = &p->link_config;
1541 if (epause->autoneg == AUTONEG_DISABLE)
1542 lc->requested_fc = 0;
1543 else if (lc->supported & SUPPORTED_Autoneg)
1544 lc->requested_fc = PAUSE_AUTONEG;
1548 if (epause->rx_pause)
1549 lc->requested_fc |= PAUSE_RX;
1550 if (epause->tx_pause)
1551 lc->requested_fc |= PAUSE_TX;
1552 if (lc->autoneg == AUTONEG_ENABLE) {
1553 if (netif_running(dev))
1554 t3_link_start(&p->phy, &p->mac, lc);
1556 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1557 if (netif_running(dev))
1558 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1563 static u32 get_rx_csum(struct net_device *dev)
1565 struct port_info *p = netdev_priv(dev);
1567 return p->rx_csum_offload;
1570 static int set_rx_csum(struct net_device *dev, u32 data)
1572 struct port_info *p = netdev_priv(dev);
1574 p->rx_csum_offload = data;
1576 struct adapter *adap = p->adapter;
1579 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1580 adap->sge.qs[i].lro_enabled = 0;
1585 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1587 struct port_info *pi = netdev_priv(dev);
1588 struct adapter *adapter = pi->adapter;
1589 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1591 e->rx_max_pending = MAX_RX_BUFFERS;
1592 e->rx_mini_max_pending = 0;
1593 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1594 e->tx_max_pending = MAX_TXQ_ENTRIES;
1596 e->rx_pending = q->fl_size;
1597 e->rx_mini_pending = q->rspq_size;
1598 e->rx_jumbo_pending = q->jumbo_size;
1599 e->tx_pending = q->txq_size[0];
1602 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1604 struct port_info *pi = netdev_priv(dev);
1605 struct adapter *adapter = pi->adapter;
1606 struct qset_params *q;
1609 if (e->rx_pending > MAX_RX_BUFFERS ||
1610 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1611 e->tx_pending > MAX_TXQ_ENTRIES ||
1612 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1613 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1614 e->rx_pending < MIN_FL_ENTRIES ||
1615 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1616 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1619 if (adapter->flags & FULL_INIT_DONE)
1622 q = &adapter->params.sge.qset[pi->first_qset];
1623 for (i = 0; i < pi->nqsets; ++i, ++q) {
1624 q->rspq_size = e->rx_mini_pending;
1625 q->fl_size = e->rx_pending;
1626 q->jumbo_size = e->rx_jumbo_pending;
1627 q->txq_size[0] = e->tx_pending;
1628 q->txq_size[1] = e->tx_pending;
1629 q->txq_size[2] = e->tx_pending;
1634 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1636 struct port_info *pi = netdev_priv(dev);
1637 struct adapter *adapter = pi->adapter;
1638 struct qset_params *qsp = &adapter->params.sge.qset[0];
1639 struct sge_qset *qs = &adapter->sge.qs[0];
1641 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1644 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1645 t3_update_qset_coalesce(qs, qsp);
1649 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1651 struct port_info *pi = netdev_priv(dev);
1652 struct adapter *adapter = pi->adapter;
1653 struct qset_params *q = adapter->params.sge.qset;
1655 c->rx_coalesce_usecs = q->coalesce_usecs;
1659 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1662 struct port_info *pi = netdev_priv(dev);
1663 struct adapter *adapter = pi->adapter;
1666 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1670 e->magic = EEPROM_MAGIC;
1671 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1672 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1675 memcpy(data, buf + e->offset, e->len);
1680 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1683 struct port_info *pi = netdev_priv(dev);
1684 struct adapter *adapter = pi->adapter;
1685 u32 aligned_offset, aligned_len;
1690 if (eeprom->magic != EEPROM_MAGIC)
1693 aligned_offset = eeprom->offset & ~3;
1694 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1696 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1697 buf = kmalloc(aligned_len, GFP_KERNEL);
1700 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1701 if (!err && aligned_len > 4)
1702 err = t3_seeprom_read(adapter,
1703 aligned_offset + aligned_len - 4,
1704 (__le32 *) & buf[aligned_len - 4]);
1707 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1711 err = t3_seeprom_wp(adapter, 0);
1715 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1716 err = t3_seeprom_write(adapter, aligned_offset, *p);
1717 aligned_offset += 4;
1721 err = t3_seeprom_wp(adapter, 1);
1728 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1732 memset(&wol->sopass, 0, sizeof(wol->sopass));
1735 static const struct ethtool_ops cxgb_ethtool_ops = {
1736 .get_settings = get_settings,
1737 .set_settings = set_settings,
1738 .get_drvinfo = get_drvinfo,
1739 .get_msglevel = get_msglevel,
1740 .set_msglevel = set_msglevel,
1741 .get_ringparam = get_sge_param,
1742 .set_ringparam = set_sge_param,
1743 .get_coalesce = get_coalesce,
1744 .set_coalesce = set_coalesce,
1745 .get_eeprom_len = get_eeprom_len,
1746 .get_eeprom = get_eeprom,
1747 .set_eeprom = set_eeprom,
1748 .get_pauseparam = get_pauseparam,
1749 .set_pauseparam = set_pauseparam,
1750 .get_rx_csum = get_rx_csum,
1751 .set_rx_csum = set_rx_csum,
1752 .set_tx_csum = ethtool_op_set_tx_csum,
1753 .set_sg = ethtool_op_set_sg,
1754 .get_link = ethtool_op_get_link,
1755 .get_strings = get_strings,
1756 .phys_id = cxgb3_phys_id,
1757 .nway_reset = restart_autoneg,
1758 .get_sset_count = get_sset_count,
1759 .get_ethtool_stats = get_stats,
1760 .get_regs_len = get_regs_len,
1761 .get_regs = get_regs,
1763 .set_tso = ethtool_op_set_tso,
1766 static int in_range(int val, int lo, int hi)
1768 return val < 0 || (val <= hi && val >= lo);
1771 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1773 struct port_info *pi = netdev_priv(dev);
1774 struct adapter *adapter = pi->adapter;
1778 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1782 case CHELSIO_SET_QSET_PARAMS:{
1784 struct qset_params *q;
1785 struct ch_qset_params t;
1787 if (!capable(CAP_NET_ADMIN))
1789 if (copy_from_user(&t, useraddr, sizeof(t)))
1791 if (t.qset_idx >= SGE_QSETS)
1793 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1794 !in_range(t.cong_thres, 0, 255) ||
1795 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1797 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1799 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1800 MAX_CTRL_TXQ_ENTRIES) ||
1801 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1803 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1804 MAX_RX_JUMBO_BUFFERS)
1805 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1808 if ((adapter->flags & FULL_INIT_DONE) &&
1809 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1810 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1811 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1812 t.polling >= 0 || t.cong_thres >= 0))
1815 q = &adapter->params.sge.qset[t.qset_idx];
1817 if (t.rspq_size >= 0)
1818 q->rspq_size = t.rspq_size;
1819 if (t.fl_size[0] >= 0)
1820 q->fl_size = t.fl_size[0];
1821 if (t.fl_size[1] >= 0)
1822 q->jumbo_size = t.fl_size[1];
1823 if (t.txq_size[0] >= 0)
1824 q->txq_size[0] = t.txq_size[0];
1825 if (t.txq_size[1] >= 0)
1826 q->txq_size[1] = t.txq_size[1];
1827 if (t.txq_size[2] >= 0)
1828 q->txq_size[2] = t.txq_size[2];
1829 if (t.cong_thres >= 0)
1830 q->cong_thres = t.cong_thres;
1831 if (t.intr_lat >= 0) {
1832 struct sge_qset *qs =
1833 &adapter->sge.qs[t.qset_idx];
1835 q->coalesce_usecs = t.intr_lat;
1836 t3_update_qset_coalesce(qs, q);
1838 if (t.polling >= 0) {
1839 if (adapter->flags & USING_MSIX)
1840 q->polling = t.polling;
1842 /* No polling with INTx for T3A */
1843 if (adapter->params.rev == 0 &&
1844 !(adapter->flags & USING_MSI))
1847 for (i = 0; i < SGE_QSETS; i++) {
1848 q = &adapter->params.sge.
1850 q->polling = t.polling;
1855 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1857 qs->lro_enabled = t.lro;
1861 case CHELSIO_GET_QSET_PARAMS:{
1862 struct qset_params *q;
1863 struct ch_qset_params t;
1865 if (copy_from_user(&t, useraddr, sizeof(t)))
1867 if (t.qset_idx >= SGE_QSETS)
1870 q = &adapter->params.sge.qset[t.qset_idx];
1871 t.rspq_size = q->rspq_size;
1872 t.txq_size[0] = q->txq_size[0];
1873 t.txq_size[1] = q->txq_size[1];
1874 t.txq_size[2] = q->txq_size[2];
1875 t.fl_size[0] = q->fl_size;
1876 t.fl_size[1] = q->jumbo_size;
1877 t.polling = q->polling;
1879 t.intr_lat = q->coalesce_usecs;
1880 t.cong_thres = q->cong_thres;
1882 if (copy_to_user(useraddr, &t, sizeof(t)))
1886 case CHELSIO_SET_QSET_NUM:{
1887 struct ch_reg edata;
1888 unsigned int i, first_qset = 0, other_qsets = 0;
1890 if (!capable(CAP_NET_ADMIN))
1892 if (adapter->flags & FULL_INIT_DONE)
1894 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1896 if (edata.val < 1 ||
1897 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1900 for_each_port(adapter, i)
1901 if (adapter->port[i] && adapter->port[i] != dev)
1902 other_qsets += adap2pinfo(adapter, i)->nqsets;
1904 if (edata.val + other_qsets > SGE_QSETS)
1907 pi->nqsets = edata.val;
1909 for_each_port(adapter, i)
1910 if (adapter->port[i]) {
1911 pi = adap2pinfo(adapter, i);
1912 pi->first_qset = first_qset;
1913 first_qset += pi->nqsets;
1917 case CHELSIO_GET_QSET_NUM:{
1918 struct ch_reg edata;
1920 edata.cmd = CHELSIO_GET_QSET_NUM;
1921 edata.val = pi->nqsets;
1922 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1926 case CHELSIO_LOAD_FW:{
1928 struct ch_mem_range t;
1930 if (!capable(CAP_SYS_RAWIO))
1932 if (copy_from_user(&t, useraddr, sizeof(t)))
1934 /* Check t.len sanity ? */
1935 fw_data = kmalloc(t.len, GFP_KERNEL);
1940 (fw_data, useraddr + sizeof(t), t.len)) {
1945 ret = t3_load_fw(adapter, fw_data, t.len);
1951 case CHELSIO_SETMTUTAB:{
1955 if (!is_offload(adapter))
1957 if (!capable(CAP_NET_ADMIN))
1959 if (offload_running(adapter))
1961 if (copy_from_user(&m, useraddr, sizeof(m)))
1963 if (m.nmtus != NMTUS)
1965 if (m.mtus[0] < 81) /* accommodate SACK */
1968 /* MTUs must be in ascending order */
1969 for (i = 1; i < NMTUS; ++i)
1970 if (m.mtus[i] < m.mtus[i - 1])
1973 memcpy(adapter->params.mtus, m.mtus,
1974 sizeof(adapter->params.mtus));
1977 case CHELSIO_GET_PM:{
1978 struct tp_params *p = &adapter->params.tp;
1979 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1981 if (!is_offload(adapter))
1983 m.tx_pg_sz = p->tx_pg_size;
1984 m.tx_num_pg = p->tx_num_pgs;
1985 m.rx_pg_sz = p->rx_pg_size;
1986 m.rx_num_pg = p->rx_num_pgs;
1987 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1988 if (copy_to_user(useraddr, &m, sizeof(m)))
1992 case CHELSIO_SET_PM:{
1994 struct tp_params *p = &adapter->params.tp;
1996 if (!is_offload(adapter))
1998 if (!capable(CAP_NET_ADMIN))
2000 if (adapter->flags & FULL_INIT_DONE)
2002 if (copy_from_user(&m, useraddr, sizeof(m)))
2004 if (!is_power_of_2(m.rx_pg_sz) ||
2005 !is_power_of_2(m.tx_pg_sz))
2006 return -EINVAL; /* not power of 2 */
2007 if (!(m.rx_pg_sz & 0x14000))
2008 return -EINVAL; /* not 16KB or 64KB */
2009 if (!(m.tx_pg_sz & 0x1554000))
2011 if (m.tx_num_pg == -1)
2012 m.tx_num_pg = p->tx_num_pgs;
2013 if (m.rx_num_pg == -1)
2014 m.rx_num_pg = p->rx_num_pgs;
2015 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2017 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2018 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2020 p->rx_pg_size = m.rx_pg_sz;
2021 p->tx_pg_size = m.tx_pg_sz;
2022 p->rx_num_pgs = m.rx_num_pg;
2023 p->tx_num_pgs = m.tx_num_pg;
2026 case CHELSIO_GET_MEM:{
2027 struct ch_mem_range t;
2031 if (!is_offload(adapter))
2033 if (!(adapter->flags & FULL_INIT_DONE))
2034 return -EIO; /* need the memory controllers */
2035 if (copy_from_user(&t, useraddr, sizeof(t)))
2037 if ((t.addr & 7) || (t.len & 7))
2039 if (t.mem_id == MEM_CM)
2041 else if (t.mem_id == MEM_PMRX)
2042 mem = &adapter->pmrx;
2043 else if (t.mem_id == MEM_PMTX)
2044 mem = &adapter->pmtx;
2050 * bits 0..9: chip version
2051 * bits 10..15: chip revision
2053 t.version = 3 | (adapter->params.rev << 10);
2054 if (copy_to_user(useraddr, &t, sizeof(t)))
2058 * Read 256 bytes at a time as len can be large and we don't
2059 * want to use huge intermediate buffers.
2061 useraddr += sizeof(t); /* advance to start of buffer */
2063 unsigned int chunk =
2064 min_t(unsigned int, t.len, sizeof(buf));
2067 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2071 if (copy_to_user(useraddr, buf, chunk))
2079 case CHELSIO_SET_TRACE_FILTER:{
2081 const struct trace_params *tp;
2083 if (!capable(CAP_NET_ADMIN))
2085 if (!offload_running(adapter))
2087 if (copy_from_user(&t, useraddr, sizeof(t)))
2090 tp = (const struct trace_params *)&t.sip;
2092 t3_config_trace_filter(adapter, tp, 0,
2096 t3_config_trace_filter(adapter, tp, 1,
2107 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2109 struct mii_ioctl_data *data = if_mii(req);
2110 struct port_info *pi = netdev_priv(dev);
2111 struct adapter *adapter = pi->adapter;
2116 data->phy_id = pi->phy.addr;
2120 struct cphy *phy = &pi->phy;
2122 if (!phy->mdio_read)
2124 if (is_10G(adapter)) {
2125 mmd = data->phy_id >> 8;
2128 else if (mmd > MDIO_DEV_XGXS)
2132 phy->mdio_read(adapter, data->phy_id & 0x1f,
2133 mmd, data->reg_num, &val);
2136 phy->mdio_read(adapter, data->phy_id & 0x1f,
2137 0, data->reg_num & 0x1f,
2140 data->val_out = val;
2144 struct cphy *phy = &pi->phy;
2146 if (!capable(CAP_NET_ADMIN))
2148 if (!phy->mdio_write)
2150 if (is_10G(adapter)) {
2151 mmd = data->phy_id >> 8;
2154 else if (mmd > MDIO_DEV_XGXS)
2158 phy->mdio_write(adapter,
2159 data->phy_id & 0x1f, mmd,
2164 phy->mdio_write(adapter,
2165 data->phy_id & 0x1f, 0,
2166 data->reg_num & 0x1f,
2171 return cxgb_extension_ioctl(dev, req->ifr_data);
2178 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2180 struct port_info *pi = netdev_priv(dev);
2181 struct adapter *adapter = pi->adapter;
2184 if (new_mtu < 81) /* accommodate SACK */
2186 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2189 init_port_mtus(adapter);
2190 if (adapter->params.rev == 0 && offload_running(adapter))
2191 t3_load_mtus(adapter, adapter->params.mtus,
2192 adapter->params.a_wnd, adapter->params.b_wnd,
2193 adapter->port[0]->mtu);
2197 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2199 struct port_info *pi = netdev_priv(dev);
2200 struct adapter *adapter = pi->adapter;
2201 struct sockaddr *addr = p;
2203 if (!is_valid_ether_addr(addr->sa_data))
2206 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2207 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2208 if (offload_running(adapter))
2209 write_smt_entry(adapter, pi->port_id);
2214 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2215 * @adap: the adapter
2218 * Ensures that current Rx processing on any of the queues associated with
2219 * the given port completes before returning. We do this by acquiring and
2220 * releasing the locks of the response queues associated with the port.
2222 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2226 for (i = 0; i < p->nqsets; i++) {
2227 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2229 spin_lock_irq(&q->lock);
2230 spin_unlock_irq(&q->lock);
2234 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2236 struct port_info *pi = netdev_priv(dev);
2237 struct adapter *adapter = pi->adapter;
2240 if (adapter->params.rev > 0)
2241 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2243 /* single control for all ports */
2244 unsigned int i, have_vlans = 0;
2245 for_each_port(adapter, i)
2246 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2248 t3_set_vlan_accel(adapter, 1, have_vlans);
2250 t3_synchronize_rx(adapter, pi);
2253 #ifdef CONFIG_NET_POLL_CONTROLLER
2254 static void cxgb_netpoll(struct net_device *dev)
2256 struct port_info *pi = netdev_priv(dev);
2257 struct adapter *adapter = pi->adapter;
2260 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2261 struct sge_qset *qs = &adapter->sge.qs[qidx];
2264 if (adapter->flags & USING_MSIX)
2269 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2275 * Periodic accumulation of MAC statistics.
2277 static void mac_stats_update(struct adapter *adapter)
2281 for_each_port(adapter, i) {
2282 struct net_device *dev = adapter->port[i];
2283 struct port_info *p = netdev_priv(dev);
2285 if (netif_running(dev)) {
2286 spin_lock(&adapter->stats_lock);
2287 t3_mac_update_stats(&p->mac);
2288 spin_unlock(&adapter->stats_lock);
2293 static void check_link_status(struct adapter *adapter)
2297 for_each_port(adapter, i) {
2298 struct net_device *dev = adapter->port[i];
2299 struct port_info *p = netdev_priv(dev);
2301 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2302 t3_link_changed(adapter, i);
2306 static void check_t3b2_mac(struct adapter *adapter)
2310 if (!rtnl_trylock()) /* synchronize with ifdown */
2313 for_each_port(adapter, i) {
2314 struct net_device *dev = adapter->port[i];
2315 struct port_info *p = netdev_priv(dev);
2318 if (!netif_running(dev))
2322 if (netif_running(dev) && netif_carrier_ok(dev))
2323 status = t3b2_mac_watchdog_task(&p->mac);
2325 p->mac.stats.num_toggled++;
2326 else if (status == 2) {
2327 struct cmac *mac = &p->mac;
2329 t3_mac_set_mtu(mac, dev->mtu);
2330 t3_mac_set_address(mac, 0, dev->dev_addr);
2331 cxgb_set_rxmode(dev);
2332 t3_link_start(&p->phy, mac, &p->link_config);
2333 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2334 t3_port_intr_enable(adapter, p->port_id);
2335 p->mac.stats.num_resets++;
2342 static void t3_adap_check_task(struct work_struct *work)
2344 struct adapter *adapter = container_of(work, struct adapter,
2345 adap_check_task.work);
2346 const struct adapter_params *p = &adapter->params;
2348 adapter->check_task_cnt++;
2350 /* Check link status for PHYs without interrupts */
2351 if (p->linkpoll_period)
2352 check_link_status(adapter);
2354 /* Accumulate MAC stats if needed */
2355 if (!p->linkpoll_period ||
2356 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2357 p->stats_update_period) {
2358 mac_stats_update(adapter);
2359 adapter->check_task_cnt = 0;
2362 if (p->rev == T3_REV_B2)
2363 check_t3b2_mac(adapter);
2365 /* Schedule the next check update if any port is active. */
2366 spin_lock_irq(&adapter->work_lock);
2367 if (adapter->open_device_map & PORT_MASK)
2368 schedule_chk_task(adapter);
2369 spin_unlock_irq(&adapter->work_lock);
2373 * Processes external (PHY) interrupts in process context.
2375 static void ext_intr_task(struct work_struct *work)
2377 struct adapter *adapter = container_of(work, struct adapter,
2378 ext_intr_handler_task);
2380 t3_phy_intr_handler(adapter);
2382 /* Now reenable external interrupts */
2383 spin_lock_irq(&adapter->work_lock);
2384 if (adapter->slow_intr_mask) {
2385 adapter->slow_intr_mask |= F_T3DBG;
2386 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2387 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2388 adapter->slow_intr_mask);
2390 spin_unlock_irq(&adapter->work_lock);
2394 * Interrupt-context handler for external (PHY) interrupts.
2396 void t3_os_ext_intr_handler(struct adapter *adapter)
2399 * Schedule a task to handle external interrupts as they may be slow
2400 * and we use a mutex to protect MDIO registers. We disable PHY
2401 * interrupts in the meantime and let the task reenable them when
2404 spin_lock(&adapter->work_lock);
2405 if (adapter->slow_intr_mask) {
2406 adapter->slow_intr_mask &= ~F_T3DBG;
2407 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2408 adapter->slow_intr_mask);
2409 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2411 spin_unlock(&adapter->work_lock);
2414 static int t3_adapter_error(struct adapter *adapter, int reset)
2418 /* Stop all ports */
2419 for_each_port(adapter, i) {
2420 struct net_device *netdev = adapter->port[i];
2422 if (netif_running(netdev))
2426 if (is_offload(adapter) &&
2427 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2428 offload_close(&adapter->tdev);
2430 /* Stop SGE timers */
2431 t3_stop_sge_timers(adapter);
2433 adapter->flags &= ~FULL_INIT_DONE;
2436 ret = t3_reset_adapter(adapter);
2438 pci_disable_device(adapter->pdev);
2443 static int t3_reenable_adapter(struct adapter *adapter)
2445 if (pci_enable_device(adapter->pdev)) {
2446 dev_err(&adapter->pdev->dev,
2447 "Cannot re-enable PCI device after reset.\n");
2450 pci_set_master(adapter->pdev);
2451 pci_restore_state(adapter->pdev);
2453 /* Free sge resources */
2454 t3_free_sge_resources(adapter);
2456 if (t3_replay_prep_adapter(adapter))
2464 static void t3_resume_ports(struct adapter *adapter)
2468 /* Restart the ports */
2469 for_each_port(adapter, i) {
2470 struct net_device *netdev = adapter->port[i];
2472 if (netif_running(netdev)) {
2473 if (cxgb_open(netdev)) {
2474 dev_err(&adapter->pdev->dev,
2475 "can't bring device back up"
2484 * processes a fatal error.
2485 * Bring the ports down, reset the chip, bring the ports back up.
2487 static void fatal_error_task(struct work_struct *work)
2489 struct adapter *adapter = container_of(work, struct adapter,
2490 fatal_error_handler_task);
2494 err = t3_adapter_error(adapter, 1);
2496 err = t3_reenable_adapter(adapter);
2498 t3_resume_ports(adapter);
2500 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2504 void t3_fatal_err(struct adapter *adapter)
2506 unsigned int fw_status[4];
2508 if (adapter->flags & FULL_INIT_DONE) {
2509 t3_sge_stop(adapter);
2510 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2511 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2512 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2513 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2515 spin_lock(&adapter->work_lock);
2516 t3_intr_disable(adapter);
2517 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2518 spin_unlock(&adapter->work_lock);
2520 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2521 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2522 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2523 fw_status[0], fw_status[1],
2524 fw_status[2], fw_status[3]);
2529 * t3_io_error_detected - called when PCI error is detected
2530 * @pdev: Pointer to PCI device
2531 * @state: The current pci connection state
2533 * This function is called after a PCI bus error affecting
2534 * this device has been detected.
2536 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2537 pci_channel_state_t state)
2539 struct adapter *adapter = pci_get_drvdata(pdev);
2542 ret = t3_adapter_error(adapter, 0);
2544 /* Request a slot reset. */
2545 return PCI_ERS_RESULT_NEED_RESET;
2549 * t3_io_slot_reset - called after the pci bus has been reset.
2550 * @pdev: Pointer to PCI device
2552 * Restart the card from scratch, as if from a cold-boot.
2554 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2556 struct adapter *adapter = pci_get_drvdata(pdev);
2558 if (!t3_reenable_adapter(adapter))
2559 return PCI_ERS_RESULT_RECOVERED;
2561 return PCI_ERS_RESULT_DISCONNECT;
2565 * t3_io_resume - called when traffic can start flowing again.
2566 * @pdev: Pointer to PCI device
2568 * This callback is called when the error recovery driver tells us that
2569 * its OK to resume normal operation.
2571 static void t3_io_resume(struct pci_dev *pdev)
2573 struct adapter *adapter = pci_get_drvdata(pdev);
2575 t3_resume_ports(adapter);
2578 static struct pci_error_handlers t3_err_handler = {
2579 .error_detected = t3_io_error_detected,
2580 .slot_reset = t3_io_slot_reset,
2581 .resume = t3_io_resume,
2584 static int __devinit cxgb_enable_msix(struct adapter *adap)
2586 struct msix_entry entries[SGE_QSETS + 1];
2589 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2590 entries[i].entry = i;
2592 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2594 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2595 adap->msix_info[i].vec = entries[i].vector;
2597 dev_info(&adap->pdev->dev,
2598 "only %d MSI-X vectors left, not using MSI-X\n", err);
2602 static void __devinit print_port_info(struct adapter *adap,
2603 const struct adapter_info *ai)
2605 static const char *pci_variant[] = {
2606 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2613 snprintf(buf, sizeof(buf), "%s x%d",
2614 pci_variant[adap->params.pci.variant],
2615 adap->params.pci.width);
2617 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2618 pci_variant[adap->params.pci.variant],
2619 adap->params.pci.speed, adap->params.pci.width);
2621 for_each_port(adap, i) {
2622 struct net_device *dev = adap->port[i];
2623 const struct port_info *pi = netdev_priv(dev);
2625 if (!test_bit(i, &adap->registered_device_map))
2627 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2628 dev->name, ai->desc, pi->port_type->desc,
2629 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2630 (adap->flags & USING_MSIX) ? " MSI-X" :
2631 (adap->flags & USING_MSI) ? " MSI" : "");
2632 if (adap->name == dev->name && adap->params.vpd.mclk)
2634 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2635 adap->name, t3_mc7_size(&adap->cm) >> 20,
2636 t3_mc7_size(&adap->pmtx) >> 20,
2637 t3_mc7_size(&adap->pmrx) >> 20,
2638 adap->params.vpd.sn);
2642 static int __devinit init_one(struct pci_dev *pdev,
2643 const struct pci_device_id *ent)
2645 static int version_printed;
2647 int i, err, pci_using_dac = 0;
2648 unsigned long mmio_start, mmio_len;
2649 const struct adapter_info *ai;
2650 struct adapter *adapter = NULL;
2651 struct port_info *pi;
2653 if (!version_printed) {
2654 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2659 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2661 printk(KERN_ERR DRV_NAME
2662 ": cannot initialize work queue\n");
2667 err = pci_request_regions(pdev, DRV_NAME);
2669 /* Just info, some other driver may have claimed the device. */
2670 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2674 err = pci_enable_device(pdev);
2676 dev_err(&pdev->dev, "cannot enable PCI device\n");
2677 goto out_release_regions;
2680 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2682 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2684 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2685 "coherent allocations\n");
2686 goto out_disable_device;
2688 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2689 dev_err(&pdev->dev, "no usable DMA configuration\n");
2690 goto out_disable_device;
2693 pci_set_master(pdev);
2694 pci_save_state(pdev);
2696 mmio_start = pci_resource_start(pdev, 0);
2697 mmio_len = pci_resource_len(pdev, 0);
2698 ai = t3_get_adapter_info(ent->driver_data);
2700 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2703 goto out_disable_device;
2706 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2707 if (!adapter->regs) {
2708 dev_err(&pdev->dev, "cannot map device registers\n");
2710 goto out_free_adapter;
2713 adapter->pdev = pdev;
2714 adapter->name = pci_name(pdev);
2715 adapter->msg_enable = dflt_msg_enable;
2716 adapter->mmio_len = mmio_len;
2718 mutex_init(&adapter->mdio_lock);
2719 spin_lock_init(&adapter->work_lock);
2720 spin_lock_init(&adapter->stats_lock);
2722 INIT_LIST_HEAD(&adapter->adapter_list);
2723 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2724 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2725 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2727 for (i = 0; i < ai->nports; ++i) {
2728 struct net_device *netdev;
2730 netdev = alloc_etherdev(sizeof(struct port_info));
2736 SET_NETDEV_DEV(netdev, &pdev->dev);
2738 adapter->port[i] = netdev;
2739 pi = netdev_priv(netdev);
2740 pi->adapter = adapter;
2741 pi->rx_csum_offload = 1;
2746 netif_carrier_off(netdev);
2747 netdev->irq = pdev->irq;
2748 netdev->mem_start = mmio_start;
2749 netdev->mem_end = mmio_start + mmio_len - 1;
2750 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2751 netdev->features |= NETIF_F_LLTX;
2753 netdev->features |= NETIF_F_HIGHDMA;
2755 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2756 netdev->vlan_rx_register = vlan_rx_register;
2758 netdev->open = cxgb_open;
2759 netdev->stop = cxgb_close;
2760 netdev->hard_start_xmit = t3_eth_xmit;
2761 netdev->get_stats = cxgb_get_stats;
2762 netdev->set_multicast_list = cxgb_set_rxmode;
2763 netdev->do_ioctl = cxgb_ioctl;
2764 netdev->change_mtu = cxgb_change_mtu;
2765 netdev->set_mac_address = cxgb_set_mac_addr;
2766 #ifdef CONFIG_NET_POLL_CONTROLLER
2767 netdev->poll_controller = cxgb_netpoll;
2770 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2773 pci_set_drvdata(pdev, adapter);
2774 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2780 * The card is now ready to go. If any errors occur during device
2781 * registration we do not fail the whole card but rather proceed only
2782 * with the ports we manage to register successfully. However we must
2783 * register at least one net device.
2785 for_each_port(adapter, i) {
2786 err = register_netdev(adapter->port[i]);
2788 dev_warn(&pdev->dev,
2789 "cannot register net device %s, skipping\n",
2790 adapter->port[i]->name);
2793 * Change the name we use for messages to the name of
2794 * the first successfully registered interface.
2796 if (!adapter->registered_device_map)
2797 adapter->name = adapter->port[i]->name;
2799 __set_bit(i, &adapter->registered_device_map);
2802 if (!adapter->registered_device_map) {
2803 dev_err(&pdev->dev, "could not register any net devices\n");
2807 /* Driver's ready. Reflect it on LEDs */
2808 t3_led_ready(adapter);
2810 if (is_offload(adapter)) {
2811 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2812 cxgb3_adapter_ofld(adapter);
2815 /* See what interrupts we'll be using */
2816 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2817 adapter->flags |= USING_MSIX;
2818 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2819 adapter->flags |= USING_MSI;
2821 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2824 print_port_info(adapter, ai);
2828 iounmap(adapter->regs);
2829 for (i = ai->nports - 1; i >= 0; --i)
2830 if (adapter->port[i])
2831 free_netdev(adapter->port[i]);
2837 pci_disable_device(pdev);
2838 out_release_regions:
2839 pci_release_regions(pdev);
2840 pci_set_drvdata(pdev, NULL);
2844 static void __devexit remove_one(struct pci_dev *pdev)
2846 struct adapter *adapter = pci_get_drvdata(pdev);
2851 t3_sge_stop(adapter);
2852 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2855 if (is_offload(adapter)) {
2856 cxgb3_adapter_unofld(adapter);
2857 if (test_bit(OFFLOAD_DEVMAP_BIT,
2858 &adapter->open_device_map))
2859 offload_close(&adapter->tdev);
2862 for_each_port(adapter, i)
2863 if (test_bit(i, &adapter->registered_device_map))
2864 unregister_netdev(adapter->port[i]);
2866 t3_stop_sge_timers(adapter);
2867 t3_free_sge_resources(adapter);
2868 cxgb_disable_msi(adapter);
2870 for_each_port(adapter, i)
2871 if (adapter->port[i])
2872 free_netdev(adapter->port[i]);
2874 iounmap(adapter->regs);
2876 pci_release_regions(pdev);
2877 pci_disable_device(pdev);
2878 pci_set_drvdata(pdev, NULL);
2882 static struct pci_driver driver = {
2884 .id_table = cxgb3_pci_tbl,
2886 .remove = __devexit_p(remove_one),
2887 .err_handler = &t3_err_handler,
2890 static int __init cxgb3_init_module(void)
2894 cxgb3_offload_init();
2896 ret = pci_register_driver(&driver);
2900 static void __exit cxgb3_cleanup_module(void)
2902 pci_unregister_driver(&driver);
2904 destroy_workqueue(cxgb3_wq);
2907 module_init(cxgb3_init_module);
2908 module_exit(cxgb3_cleanup_module);