2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
70 #include "cxgb4_filter.h"
72 #include "t4_values.h"
75 #include "t4fw_version.h"
76 #include "cxgb4_dcb.h"
77 #include "cxgb4_debugfs.h"
81 #include "cxgb4_tc_u32.h"
83 char cxgb4_driver_name[] = KBUILD_MODNAME;
88 #define DRV_VERSION "2.0.0-ko"
89 const char cxgb4_driver_version[] = DRV_VERSION;
90 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
92 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
93 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
94 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
96 /* Macros needed to support the PCI Device ID Table ...
98 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
99 static const struct pci_device_id cxgb4_pci_tbl[] = {
100 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
102 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
105 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
107 #define CH_PCI_ID_TABLE_ENTRY(devid) \
108 {PCI_VDEVICE(CHELSIO, (devid)), 4}
110 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
114 #include "t4_pci_id_tbl.h"
116 #define FW4_FNAME "cxgb4/t4fw.bin"
117 #define FW5_FNAME "cxgb4/t5fw.bin"
118 #define FW6_FNAME "cxgb4/t6fw.bin"
119 #define FW4_CFNAME "cxgb4/t4-config.txt"
120 #define FW5_CFNAME "cxgb4/t5-config.txt"
121 #define FW6_CFNAME "cxgb4/t6-config.txt"
122 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
123 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
124 #define PHY_AQ1202_DEVICEID 0x4409
125 #define PHY_BCM84834_DEVICEID 0x4486
127 MODULE_DESCRIPTION(DRV_DESC);
128 MODULE_AUTHOR("Chelsio Communications");
129 MODULE_LICENSE("Dual BSD/GPL");
130 MODULE_VERSION(DRV_VERSION);
131 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
132 MODULE_FIRMWARE(FW4_FNAME);
133 MODULE_FIRMWARE(FW5_FNAME);
134 MODULE_FIRMWARE(FW6_FNAME);
137 * The driver uses the best interrupt scheme available on a platform in the
138 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
139 * of these schemes the driver may consider as follows:
141 * msi = 2: choose from among all three options
142 * msi = 1: only consider MSI and INTx interrupts
143 * msi = 0: force INTx interrupts
147 module_param(msi, int, 0644);
148 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
151 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
152 * offset by 2 bytes in order to have the IP headers line up on 4-byte
153 * boundaries. This is a requirement for many architectures which will throw
154 * a machine check fault if an attempt is made to access one of the 4-byte IP
155 * header fields on a non-4-byte boundary. And it's a major performance issue
156 * even on some architectures which allow it like some implementations of the
157 * x86 ISA. However, some architectures don't mind this and for some very
158 * edge-case performance sensitive applications (like forwarding large volumes
159 * of small packets), setting this DMA offset to 0 will decrease the number of
160 * PCI-E Bus transfers enough to measurably affect performance.
162 static int rx_dma_offset = 2;
164 /* TX Queue select used to determine what algorithm to use for selecting TX
165 * queue. Select between the kernel provided function (select_queue=0) or user
166 * cxgb_select_queue function (select_queue=1)
168 * Default: select_queue=0
170 static int select_queue;
171 module_param(select_queue, int, 0644);
172 MODULE_PARM_DESC(select_queue,
173 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
175 static struct dentry *cxgb4_debugfs_root;
177 LIST_HEAD(adapter_list);
178 DEFINE_MUTEX(uld_mutex);
180 static void link_report(struct net_device *dev)
182 if (!netif_carrier_ok(dev))
183 netdev_info(dev, "link down\n");
185 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
188 const struct port_info *p = netdev_priv(dev);
190 switch (p->link_cfg.speed) {
210 pr_info("%s: unsupported speed: %d\n",
211 dev->name, p->link_cfg.speed);
215 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
220 #ifdef CONFIG_CHELSIO_T4_DCB
221 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
222 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
224 struct port_info *pi = netdev_priv(dev);
225 struct adapter *adap = pi->adapter;
226 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
229 /* We use a simple mapping of Port TX Queue Index to DCB
230 * Priority when we're enabling DCB.
232 for (i = 0; i < pi->nqsets; i++, txq++) {
236 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
238 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
239 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
240 value = enable ? i : 0xffffffff;
242 /* Since we can be called while atomic (from "interrupt
243 * level") we need to issue the Set Parameters Commannd
244 * without sleeping (timeout < 0).
246 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
248 -FW_CMD_MAX_TIMEOUT);
251 dev_err(adap->pdev_dev,
252 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
253 enable ? "set" : "unset", pi->port_id, i, -err);
255 txq->dcb_prio = value;
259 static int cxgb4_dcb_enabled(const struct net_device *dev)
261 struct port_info *pi = netdev_priv(dev);
263 if (!pi->dcb.enabled)
266 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
267 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
269 #endif /* CONFIG_CHELSIO_T4_DCB */
271 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
273 struct net_device *dev = adapter->port[port_id];
275 /* Skip changes from disabled ports. */
276 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
278 netif_carrier_on(dev);
280 #ifdef CONFIG_CHELSIO_T4_DCB
281 if (cxgb4_dcb_enabled(dev)) {
282 cxgb4_dcb_state_init(dev);
283 dcb_tx_queue_prio_enable(dev, false);
285 #endif /* CONFIG_CHELSIO_T4_DCB */
286 netif_carrier_off(dev);
293 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
295 static const char *mod_str[] = {
296 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
299 const struct net_device *dev = adap->port[port_id];
300 const struct port_info *pi = netdev_priv(dev);
302 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
303 netdev_info(dev, "port module unplugged\n");
304 else if (pi->mod_type < ARRAY_SIZE(mod_str))
305 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
306 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
307 netdev_info(dev, "%s: unsupported port module inserted\n",
309 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
310 netdev_info(dev, "%s: unknown port module inserted\n",
312 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
313 netdev_info(dev, "%s: transceiver module error\n", dev->name);
315 netdev_info(dev, "%s: unknown module type %d inserted\n",
316 dev->name, pi->mod_type);
319 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
320 module_param(dbfifo_int_thresh, int, 0644);
321 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
324 * usecs to sleep while draining the dbfifo
326 static int dbfifo_drain_delay = 1000;
327 module_param(dbfifo_drain_delay, int, 0644);
328 MODULE_PARM_DESC(dbfifo_drain_delay,
329 "usecs to sleep while draining the dbfifo");
331 static inline int cxgb4_set_addr_hash(struct port_info *pi)
333 struct adapter *adap = pi->adapter;
336 struct hash_mac_addr *entry;
338 /* Calculate the hash vector for the updated list and program it */
339 list_for_each_entry(entry, &adap->mac_hlist, list) {
340 ucast |= is_unicast_ether_addr(entry->addr);
341 vec |= (1ULL << hash_mac_addr(entry->addr));
343 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
347 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
349 struct port_info *pi = netdev_priv(netdev);
350 struct adapter *adap = pi->adapter;
355 bool ucast = is_unicast_ether_addr(mac_addr);
356 const u8 *maclist[1] = {mac_addr};
357 struct hash_mac_addr *new_entry;
359 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
360 NULL, ucast ? &uhash : &mhash, false);
363 /* if hash != 0, then add the addr to hash addr list
364 * so on the end we will calculate the hash for the
365 * list and program it
367 if (uhash || mhash) {
368 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
371 ether_addr_copy(new_entry->addr, mac_addr);
372 list_add_tail(&new_entry->list, &adap->mac_hlist);
373 ret = cxgb4_set_addr_hash(pi);
376 return ret < 0 ? ret : 0;
379 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
381 struct port_info *pi = netdev_priv(netdev);
382 struct adapter *adap = pi->adapter;
384 const u8 *maclist[1] = {mac_addr};
385 struct hash_mac_addr *entry, *tmp;
387 /* If the MAC address to be removed is in the hash addr
388 * list, delete it from the list and update hash vector
390 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
391 if (ether_addr_equal(entry->addr, mac_addr)) {
392 list_del(&entry->list);
394 return cxgb4_set_addr_hash(pi);
398 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
399 return ret < 0 ? -EINVAL : 0;
403 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
404 * If @mtu is -1 it is left unchanged.
406 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
408 struct port_info *pi = netdev_priv(dev);
409 struct adapter *adapter = pi->adapter;
411 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
412 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
414 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
415 (dev->flags & IFF_PROMISC) ? 1 : 0,
416 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
421 * link_start - enable a port
422 * @dev: the port to enable
424 * Performs the MAC and PHY actions needed to enable a port.
426 static int link_start(struct net_device *dev)
429 struct port_info *pi = netdev_priv(dev);
430 unsigned int mb = pi->adapter->pf;
433 * We do not set address filters and promiscuity here, the stack does
434 * that step explicitly.
436 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
437 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
439 ret = t4_change_mac(pi->adapter, mb, pi->viid,
440 pi->xact_addr_filt, dev->dev_addr, true,
443 pi->xact_addr_filt = ret;
448 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
452 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
453 true, CXGB4_DCB_ENABLED);
460 #ifdef CONFIG_CHELSIO_T4_DCB
461 /* Handle a Data Center Bridging update message from the firmware. */
462 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
464 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
465 struct net_device *dev = adap->port[adap->chan_map[port]];
466 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
469 cxgb4_dcb_handle_fw_update(adap, pcmd);
470 new_dcb_enabled = cxgb4_dcb_enabled(dev);
472 /* If the DCB has become enabled or disabled on the port then we're
473 * going to need to set up/tear down DCB Priority parameters for the
474 * TX Queues associated with the port.
476 if (new_dcb_enabled != old_dcb_enabled)
477 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
479 #endif /* CONFIG_CHELSIO_T4_DCB */
481 /* Response queue handler for the FW event queue.
483 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
484 const struct pkt_gl *gl)
486 u8 opcode = ((const struct rss_header *)rsp)->opcode;
488 rsp++; /* skip RSS header */
490 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
492 if (unlikely(opcode == CPL_FW4_MSG &&
493 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
495 opcode = ((const struct rss_header *)rsp)->opcode;
497 if (opcode != CPL_SGE_EGR_UPDATE) {
498 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
504 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
505 const struct cpl_sge_egr_update *p = (void *)rsp;
506 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
509 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
511 if (txq->q_type == CXGB4_TXQ_ETH) {
512 struct sge_eth_txq *eq;
514 eq = container_of(txq, struct sge_eth_txq, q);
515 netif_tx_wake_queue(eq->txq);
517 struct sge_uld_txq *oq;
519 oq = container_of(txq, struct sge_uld_txq, q);
520 tasklet_schedule(&oq->qresume_tsk);
522 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
523 const struct cpl_fw6_msg *p = (void *)rsp;
525 #ifdef CONFIG_CHELSIO_T4_DCB
526 const struct fw_port_cmd *pcmd = (const void *)p->data;
527 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
528 unsigned int action =
529 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
531 if (cmd == FW_PORT_CMD &&
532 action == FW_PORT_ACTION_GET_PORT_INFO) {
533 int port = FW_PORT_CMD_PORTID_G(
534 be32_to_cpu(pcmd->op_to_portid));
535 struct net_device *dev =
536 q->adap->port[q->adap->chan_map[port]];
537 int state_input = ((pcmd->u.info.dcbxdis_pkd &
538 FW_PORT_CMD_DCBXDIS_F)
539 ? CXGB4_DCB_INPUT_FW_DISABLED
540 : CXGB4_DCB_INPUT_FW_ENABLED);
542 cxgb4_dcb_state_fsm(dev, state_input);
545 if (cmd == FW_PORT_CMD &&
546 action == FW_PORT_ACTION_L2_DCB_CFG)
547 dcb_rpl(q->adap, pcmd);
551 t4_handle_fw_rpl(q->adap, p->data);
552 } else if (opcode == CPL_L2T_WRITE_RPL) {
553 const struct cpl_l2t_write_rpl *p = (void *)rsp;
555 do_l2t_write_rpl(q->adap, p);
556 } else if (opcode == CPL_SET_TCB_RPL) {
557 const struct cpl_set_tcb_rpl *p = (void *)rsp;
559 filter_rpl(q->adap, p);
561 dev_err(q->adap->pdev_dev,
562 "unexpected CPL %#x on FW event queue\n", opcode);
567 static void disable_msi(struct adapter *adapter)
569 if (adapter->flags & USING_MSIX) {
570 pci_disable_msix(adapter->pdev);
571 adapter->flags &= ~USING_MSIX;
572 } else if (adapter->flags & USING_MSI) {
573 pci_disable_msi(adapter->pdev);
574 adapter->flags &= ~USING_MSI;
579 * Interrupt handler for non-data events used with MSI-X.
581 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
583 struct adapter *adap = cookie;
584 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
588 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
590 if (adap->flags & MASTER_PF)
591 t4_slow_intr_handler(adap);
596 * Name the MSI-X interrupts.
598 static void name_msix_vecs(struct adapter *adap)
600 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
602 /* non-data interrupts */
603 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
606 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
607 adap->port[0]->name);
609 /* Ethernet queues */
610 for_each_port(adap, j) {
611 struct net_device *d = adap->port[j];
612 const struct port_info *pi = netdev_priv(d);
614 for (i = 0; i < pi->nqsets; i++, msi_idx++)
615 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
620 static int request_msix_queue_irqs(struct adapter *adap)
622 struct sge *s = &adap->sge;
626 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
627 adap->msix_info[1].desc, &s->fw_evtq);
631 for_each_ethrxq(s, ethqidx) {
632 err = request_irq(adap->msix_info[msi_index].vec,
634 adap->msix_info[msi_index].desc,
635 &s->ethrxq[ethqidx].rspq);
643 while (--ethqidx >= 0)
644 free_irq(adap->msix_info[--msi_index].vec,
645 &s->ethrxq[ethqidx].rspq);
646 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
650 static void free_msix_queue_irqs(struct adapter *adap)
652 int i, msi_index = 2;
653 struct sge *s = &adap->sge;
655 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
656 for_each_ethrxq(s, i)
657 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
661 * cxgb4_write_rss - write the RSS table for a given port
663 * @queues: array of queue indices for RSS
665 * Sets up the portion of the HW RSS table for the port's VI to distribute
666 * packets to the Rx queues in @queues.
667 * Should never be called before setting up sge eth rx queues
669 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
673 struct adapter *adapter = pi->adapter;
674 const struct sge_eth_rxq *rxq;
676 rxq = &adapter->sge.ethrxq[pi->first_qset];
677 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
681 /* map the queue indices to queue ids */
682 for (i = 0; i < pi->rss_size; i++, queues++)
683 rss[i] = rxq[*queues].rspq.abs_id;
685 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
686 pi->rss_size, rss, pi->rss_size);
687 /* If Tunnel All Lookup isn't specified in the global RSS
688 * Configuration, then we need to specify a default Ingress
689 * Queue for any ingress packets which aren't hashed. We'll
690 * use our first ingress queue ...
693 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
694 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
695 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
696 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
697 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
698 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
705 * setup_rss - configure RSS
708 * Sets up RSS for each port.
710 static int setup_rss(struct adapter *adap)
714 for_each_port(adap, i) {
715 const struct port_info *pi = adap2pinfo(adap, i);
717 /* Fill default values with equal distribution */
718 for (j = 0; j < pi->rss_size; j++)
719 pi->rss[j] = j % pi->nqsets;
721 err = cxgb4_write_rss(pi, pi->rss);
729 * Return the channel of the ingress queue with the given qid.
731 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
733 qid -= p->ingr_start;
734 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
738 * Wait until all NAPI handlers are descheduled.
740 static void quiesce_rx(struct adapter *adap)
744 for (i = 0; i < adap->sge.ingr_sz; i++) {
745 struct sge_rspq *q = adap->sge.ingr_map[i];
748 napi_disable(&q->napi);
752 /* Disable interrupt and napi handler */
753 static void disable_interrupts(struct adapter *adap)
755 if (adap->flags & FULL_INIT_DONE) {
756 t4_intr_disable(adap);
757 if (adap->flags & USING_MSIX) {
758 free_msix_queue_irqs(adap);
759 free_irq(adap->msix_info[0].vec, adap);
761 free_irq(adap->pdev->irq, adap);
768 * Enable NAPI scheduling and interrupt generation for all Rx queues.
770 static void enable_rx(struct adapter *adap)
774 for (i = 0; i < adap->sge.ingr_sz; i++) {
775 struct sge_rspq *q = adap->sge.ingr_map[i];
780 napi_enable(&q->napi);
782 /* 0-increment GTS to start the timer and enable interrupts */
783 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
784 SEINTARM_V(q->intr_params) |
785 INGRESSQID_V(q->cntxt_id));
790 static int setup_fw_sge_queues(struct adapter *adap)
792 struct sge *s = &adap->sge;
795 bitmap_zero(s->starving_fl, s->egr_sz);
796 bitmap_zero(s->txq_maperr, s->egr_sz);
798 if (adap->flags & USING_MSIX)
799 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
801 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
802 NULL, NULL, NULL, -1);
805 adap->msi_idx = -((int)s->intrq.abs_id + 1);
808 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
809 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
811 t4_free_sge_resources(adap);
816 * setup_sge_queues - configure SGE Tx/Rx/response queues
819 * Determines how many sets of SGE queues to use and initializes them.
820 * We support multiple queue sets per port if we have MSI-X, otherwise
821 * just one queue set per port.
823 static int setup_sge_queues(struct adapter *adap)
826 struct sge *s = &adap->sge;
827 struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
828 unsigned int cmplqid = 0;
830 for_each_port(adap, i) {
831 struct net_device *dev = adap->port[i];
832 struct port_info *pi = netdev_priv(dev);
833 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
834 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
836 for (j = 0; j < pi->nqsets; j++, q++) {
837 if (adap->msi_idx > 0)
839 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
840 adap->msi_idx, &q->fl,
843 t4_get_mps_bg_map(adap,
848 memset(&q->stats, 0, sizeof(q->stats));
850 for (j = 0; j < pi->nqsets; j++, t++) {
851 err = t4_sge_alloc_eth_txq(adap, t, dev,
852 netdev_get_tx_queue(dev, j),
853 s->fw_evtq.cntxt_id);
859 for_each_port(adap, i) {
860 /* Note that cmplqid below is 0 if we don't
861 * have RDMA queues, and that's the right value.
864 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
866 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
867 s->fw_evtq.cntxt_id, cmplqid);
872 t4_write_reg(adap, is_t4(adap->params.chip) ?
873 MPS_TRC_RSS_CONTROL_A :
874 MPS_T5_TRC_RSS_CONTROL_A,
875 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
876 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
879 t4_free_sge_resources(adap);
884 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
885 * The allocated memory is cleared.
887 void *t4_alloc_mem(size_t size)
889 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
897 * Free memory allocated through alloc_mem().
899 void t4_free_mem(void *addr)
904 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
905 void *accel_priv, select_queue_fallback_t fallback)
909 #ifdef CONFIG_CHELSIO_T4_DCB
910 /* If a Data Center Bridging has been successfully negotiated on this
911 * link then we'll use the skb's priority to map it to a TX Queue.
912 * The skb's priority is determined via the VLAN Tag Priority Code
915 if (cxgb4_dcb_enabled(dev)) {
919 err = vlan_get_tag(skb, &vlan_tci);
923 "TX Packet without VLAN Tag on DCB Link\n");
926 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
927 #ifdef CONFIG_CHELSIO_T4_FCOE
928 if (skb->protocol == htons(ETH_P_FCOE))
929 txq = skb->priority & 0x7;
930 #endif /* CONFIG_CHELSIO_T4_FCOE */
934 #endif /* CONFIG_CHELSIO_T4_DCB */
937 txq = (skb_rx_queue_recorded(skb)
938 ? skb_get_rx_queue(skb)
939 : smp_processor_id());
941 while (unlikely(txq >= dev->real_num_tx_queues))
942 txq -= dev->real_num_tx_queues;
947 return fallback(dev, skb) % dev->real_num_tx_queues;
950 static int closest_timer(const struct sge *s, int time)
952 int i, delta, match = 0, min_delta = INT_MAX;
954 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
955 delta = time - s->timer_val[i];
958 if (delta < min_delta) {
966 static int closest_thres(const struct sge *s, int thres)
968 int i, delta, match = 0, min_delta = INT_MAX;
970 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
971 delta = thres - s->counter_val[i];
974 if (delta < min_delta) {
983 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
985 * @us: the hold-off time in us, or 0 to disable timer
986 * @cnt: the hold-off packet count, or 0 to disable counter
988 * Sets an Rx queue's interrupt hold-off time and packet count. At least
989 * one of the two needs to be enabled for the queue to generate interrupts.
991 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
992 unsigned int us, unsigned int cnt)
994 struct adapter *adap = q->adap;
1003 new_idx = closest_thres(&adap->sge, cnt);
1004 if (q->desc && q->pktcnt_idx != new_idx) {
1005 /* the queue has already been created, update it */
1006 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1007 FW_PARAMS_PARAM_X_V(
1008 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1009 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1010 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1015 q->pktcnt_idx = new_idx;
1018 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1019 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1023 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1025 const struct port_info *pi = netdev_priv(dev);
1026 netdev_features_t changed = dev->features ^ features;
1029 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1032 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1034 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1036 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1040 static int setup_debugfs(struct adapter *adap)
1042 if (IS_ERR_OR_NULL(adap->debugfs_root))
1045 #ifdef CONFIG_DEBUG_FS
1046 t4_setup_debugfs(adap);
1052 * upper-layer driver support
1056 * Allocate an active-open TID and set it to the supplied value.
1058 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1062 spin_lock_bh(&t->atid_lock);
1064 union aopen_entry *p = t->afree;
1066 atid = (p - t->atid_tab) + t->atid_base;
1071 spin_unlock_bh(&t->atid_lock);
1074 EXPORT_SYMBOL(cxgb4_alloc_atid);
1077 * Release an active-open TID.
1079 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1081 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1083 spin_lock_bh(&t->atid_lock);
1087 spin_unlock_bh(&t->atid_lock);
1089 EXPORT_SYMBOL(cxgb4_free_atid);
1092 * Allocate a server TID and set it to the supplied value.
1094 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1098 spin_lock_bh(&t->stid_lock);
1099 if (family == PF_INET) {
1100 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1101 if (stid < t->nstids)
1102 __set_bit(stid, t->stid_bmap);
1106 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1111 t->stid_tab[stid].data = data;
1112 stid += t->stid_base;
1113 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1114 * This is equivalent to 4 TIDs. With CLIP enabled it
1117 if (family == PF_INET)
1120 t->stids_in_use += 2;
1122 spin_unlock_bh(&t->stid_lock);
1125 EXPORT_SYMBOL(cxgb4_alloc_stid);
1127 /* Allocate a server filter TID and set it to the supplied value.
1129 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1133 spin_lock_bh(&t->stid_lock);
1134 if (family == PF_INET) {
1135 stid = find_next_zero_bit(t->stid_bmap,
1136 t->nstids + t->nsftids, t->nstids);
1137 if (stid < (t->nstids + t->nsftids))
1138 __set_bit(stid, t->stid_bmap);
1145 t->stid_tab[stid].data = data;
1147 stid += t->sftid_base;
1150 spin_unlock_bh(&t->stid_lock);
1153 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1155 /* Release a server TID.
1157 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1159 /* Is it a server filter TID? */
1160 if (t->nsftids && (stid >= t->sftid_base)) {
1161 stid -= t->sftid_base;
1164 stid -= t->stid_base;
1167 spin_lock_bh(&t->stid_lock);
1168 if (family == PF_INET)
1169 __clear_bit(stid, t->stid_bmap);
1171 bitmap_release_region(t->stid_bmap, stid, 1);
1172 t->stid_tab[stid].data = NULL;
1173 if (stid < t->nstids) {
1174 if (family == PF_INET)
1177 t->stids_in_use -= 2;
1181 spin_unlock_bh(&t->stid_lock);
1183 EXPORT_SYMBOL(cxgb4_free_stid);
1186 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1188 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1191 struct cpl_tid_release *req;
1193 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1194 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1195 INIT_TP_WR(req, tid);
1196 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1200 * Queue a TID release request and if necessary schedule a work queue to
1203 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1206 void **p = &t->tid_tab[tid];
1207 struct adapter *adap = container_of(t, struct adapter, tids);
1209 spin_lock_bh(&adap->tid_release_lock);
1210 *p = adap->tid_release_head;
1211 /* Low 2 bits encode the Tx channel number */
1212 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1213 if (!adap->tid_release_task_busy) {
1214 adap->tid_release_task_busy = true;
1215 queue_work(adap->workq, &adap->tid_release_task);
1217 spin_unlock_bh(&adap->tid_release_lock);
1221 * Process the list of pending TID release requests.
1223 static void process_tid_release_list(struct work_struct *work)
1225 struct sk_buff *skb;
1226 struct adapter *adap;
1228 adap = container_of(work, struct adapter, tid_release_task);
1230 spin_lock_bh(&adap->tid_release_lock);
1231 while (adap->tid_release_head) {
1232 void **p = adap->tid_release_head;
1233 unsigned int chan = (uintptr_t)p & 3;
1234 p = (void *)p - chan;
1236 adap->tid_release_head = *p;
1238 spin_unlock_bh(&adap->tid_release_lock);
1240 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1242 schedule_timeout_uninterruptible(1);
1244 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1245 t4_ofld_send(adap, skb);
1246 spin_lock_bh(&adap->tid_release_lock);
1248 adap->tid_release_task_busy = false;
1249 spin_unlock_bh(&adap->tid_release_lock);
1253 * Release a TID and inform HW. If we are unable to allocate the release
1254 * message we defer to a work queue.
1256 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1258 struct sk_buff *skb;
1259 struct adapter *adap = container_of(t, struct adapter, tids);
1261 WARN_ON(tid >= t->ntids);
1263 if (t->tid_tab[tid]) {
1264 t->tid_tab[tid] = NULL;
1265 if (t->hash_base && (tid >= t->hash_base))
1266 atomic_dec(&t->hash_tids_in_use);
1268 atomic_dec(&t->tids_in_use);
1271 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1273 mk_tid_release(skb, chan, tid);
1274 t4_ofld_send(adap, skb);
1276 cxgb4_queue_tid_release(t, chan, tid);
1278 EXPORT_SYMBOL(cxgb4_remove_tid);
1281 * Allocate and initialize the TID tables. Returns 0 on success.
1283 static int tid_init(struct tid_info *t)
1285 struct adapter *adap = container_of(t, struct adapter, tids);
1286 unsigned int max_ftids = t->nftids + t->nsftids;
1287 unsigned int natids = t->natids;
1288 unsigned int stid_bmap_size;
1289 unsigned int ftid_bmap_size;
1292 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1293 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1294 size = t->ntids * sizeof(*t->tid_tab) +
1295 natids * sizeof(*t->atid_tab) +
1296 t->nstids * sizeof(*t->stid_tab) +
1297 t->nsftids * sizeof(*t->stid_tab) +
1298 stid_bmap_size * sizeof(long) +
1299 max_ftids * sizeof(*t->ftid_tab) +
1300 ftid_bmap_size * sizeof(long);
1302 t->tid_tab = t4_alloc_mem(size);
1306 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1307 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1308 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1309 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1310 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1311 spin_lock_init(&t->stid_lock);
1312 spin_lock_init(&t->atid_lock);
1313 spin_lock_init(&t->ftid_lock);
1315 t->stids_in_use = 0;
1316 t->sftids_in_use = 0;
1318 t->atids_in_use = 0;
1319 atomic_set(&t->tids_in_use, 0);
1320 atomic_set(&t->hash_tids_in_use, 0);
1322 /* Setup the free list for atid_tab and clear the stid bitmap. */
1325 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1326 t->afree = t->atid_tab;
1329 if (is_offload(adap)) {
1330 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1331 /* Reserve stid 0 for T4/T5 adapters */
1332 if (!t->stid_base &&
1333 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1334 __set_bit(0, t->stid_bmap);
1337 bitmap_zero(t->ftid_bmap, t->nftids);
1342 * cxgb4_create_server - create an IP server
1344 * @stid: the server TID
1345 * @sip: local IP address to bind server to
1346 * @sport: the server's TCP port
1347 * @queue: queue to direct messages from this server to
1349 * Create an IP server for the given port and address.
1350 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1352 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1353 __be32 sip, __be16 sport, __be16 vlan,
1357 struct sk_buff *skb;
1358 struct adapter *adap;
1359 struct cpl_pass_open_req *req;
1362 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1366 adap = netdev2adap(dev);
1367 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1369 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1370 req->local_port = sport;
1371 req->peer_port = htons(0);
1372 req->local_ip = sip;
1373 req->peer_ip = htonl(0);
1374 chan = rxq_to_chan(&adap->sge, queue);
1375 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1376 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1377 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1378 ret = t4_mgmt_tx(adap, skb);
1379 return net_xmit_eval(ret);
1381 EXPORT_SYMBOL(cxgb4_create_server);
1383 /* cxgb4_create_server6 - create an IPv6 server
1385 * @stid: the server TID
1386 * @sip: local IPv6 address to bind server to
1387 * @sport: the server's TCP port
1388 * @queue: queue to direct messages from this server to
1390 * Create an IPv6 server for the given port and address.
1391 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1393 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1394 const struct in6_addr *sip, __be16 sport,
1398 struct sk_buff *skb;
1399 struct adapter *adap;
1400 struct cpl_pass_open_req6 *req;
1403 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1407 adap = netdev2adap(dev);
1408 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1410 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1411 req->local_port = sport;
1412 req->peer_port = htons(0);
1413 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1414 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1415 req->peer_ip_hi = cpu_to_be64(0);
1416 req->peer_ip_lo = cpu_to_be64(0);
1417 chan = rxq_to_chan(&adap->sge, queue);
1418 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1419 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1420 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1421 ret = t4_mgmt_tx(adap, skb);
1422 return net_xmit_eval(ret);
1424 EXPORT_SYMBOL(cxgb4_create_server6);
1426 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1427 unsigned int queue, bool ipv6)
1429 struct sk_buff *skb;
1430 struct adapter *adap;
1431 struct cpl_close_listsvr_req *req;
1434 adap = netdev2adap(dev);
1436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1440 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1442 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1443 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1444 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1445 ret = t4_mgmt_tx(adap, skb);
1446 return net_xmit_eval(ret);
1448 EXPORT_SYMBOL(cxgb4_remove_server);
1451 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1452 * @mtus: the HW MTU table
1453 * @mtu: the target MTU
1454 * @idx: index of selected entry in the MTU table
1456 * Returns the index and the value in the HW MTU table that is closest to
1457 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1458 * table, in which case that smallest available value is selected.
1460 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1465 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1471 EXPORT_SYMBOL(cxgb4_best_mtu);
1474 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1475 * @mtus: the HW MTU table
1476 * @header_size: Header Size
1477 * @data_size_max: maximum Data Segment Size
1478 * @data_size_align: desired Data Segment Size Alignment (2^N)
1479 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1481 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1482 * MTU Table based solely on a Maximum MTU parameter, we break that
1483 * parameter up into a Header Size and Maximum Data Segment Size, and
1484 * provide a desired Data Segment Size Alignment. If we find an MTU in
1485 * the Hardware MTU Table which will result in a Data Segment Size with
1486 * the requested alignment _and_ that MTU isn't "too far" from the
1487 * closest MTU, then we'll return that rather than the closest MTU.
1489 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1490 unsigned short header_size,
1491 unsigned short data_size_max,
1492 unsigned short data_size_align,
1493 unsigned int *mtu_idxp)
1495 unsigned short max_mtu = header_size + data_size_max;
1496 unsigned short data_size_align_mask = data_size_align - 1;
1497 int mtu_idx, aligned_mtu_idx;
1499 /* Scan the MTU Table till we find an MTU which is larger than our
1500 * Maximum MTU or we reach the end of the table. Along the way,
1501 * record the last MTU found, if any, which will result in a Data
1502 * Segment Length matching the requested alignment.
1504 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1505 unsigned short data_size = mtus[mtu_idx] - header_size;
1507 /* If this MTU minus the Header Size would result in a
1508 * Data Segment Size of the desired alignment, remember it.
1510 if ((data_size & data_size_align_mask) == 0)
1511 aligned_mtu_idx = mtu_idx;
1513 /* If we're not at the end of the Hardware MTU Table and the
1514 * next element is larger than our Maximum MTU, drop out of
1517 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1521 /* If we fell out of the loop because we ran to the end of the table,
1522 * then we just have to use the last [largest] entry.
1524 if (mtu_idx == NMTUS)
1527 /* If we found an MTU which resulted in the requested Data Segment
1528 * Length alignment and that's "not far" from the largest MTU which is
1529 * less than or equal to the maximum MTU, then use that.
1531 if (aligned_mtu_idx >= 0 &&
1532 mtu_idx - aligned_mtu_idx <= 1)
1533 mtu_idx = aligned_mtu_idx;
1535 /* If the caller has passed in an MTU Index pointer, pass the
1536 * MTU Index back. Return the MTU value.
1539 *mtu_idxp = mtu_idx;
1540 return mtus[mtu_idx];
1542 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1545 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1547 * @viid: VI id of the given port
1549 * Return the SMT index for this VI.
1551 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1553 /* In T4/T5, SMT contains 256 SMAC entries organized in
1554 * 128 rows of 2 entries each.
1555 * In T6, SMT contains 256 SMAC entries in 256 rows.
1556 * TODO: The below code needs to be updated when we add support
1559 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1560 return ((viid & 0x7f) << 1);
1562 return (viid & 0x7f);
1564 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1567 * cxgb4_port_chan - get the HW channel of a port
1568 * @dev: the net device for the port
1570 * Return the HW Tx channel of the given port.
1572 unsigned int cxgb4_port_chan(const struct net_device *dev)
1574 return netdev2pinfo(dev)->tx_chan;
1576 EXPORT_SYMBOL(cxgb4_port_chan);
1578 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1580 struct adapter *adap = netdev2adap(dev);
1581 u32 v1, v2, lp_count, hp_count;
1583 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1584 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1585 if (is_t4(adap->params.chip)) {
1586 lp_count = LP_COUNT_G(v1);
1587 hp_count = HP_COUNT_G(v1);
1589 lp_count = LP_COUNT_T5_G(v1);
1590 hp_count = HP_COUNT_T5_G(v2);
1592 return lpfifo ? lp_count : hp_count;
1594 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1597 * cxgb4_port_viid - get the VI id of a port
1598 * @dev: the net device for the port
1600 * Return the VI id of the given port.
1602 unsigned int cxgb4_port_viid(const struct net_device *dev)
1604 return netdev2pinfo(dev)->viid;
1606 EXPORT_SYMBOL(cxgb4_port_viid);
1609 * cxgb4_port_idx - get the index of a port
1610 * @dev: the net device for the port
1612 * Return the index of the given port.
1614 unsigned int cxgb4_port_idx(const struct net_device *dev)
1616 return netdev2pinfo(dev)->port_id;
1618 EXPORT_SYMBOL(cxgb4_port_idx);
1620 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1621 struct tp_tcp_stats *v6)
1623 struct adapter *adap = pci_get_drvdata(pdev);
1625 spin_lock(&adap->stats_lock);
1626 t4_tp_get_tcp_stats(adap, v4, v6);
1627 spin_unlock(&adap->stats_lock);
1629 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1631 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1632 const unsigned int *pgsz_order)
1634 struct adapter *adap = netdev2adap(dev);
1636 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1637 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1638 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1639 HPZ3_V(pgsz_order[3]));
1641 EXPORT_SYMBOL(cxgb4_iscsi_init);
1643 int cxgb4_flush_eq_cache(struct net_device *dev)
1645 struct adapter *adap = netdev2adap(dev);
1647 return t4_sge_ctxt_flush(adap, adap->mbox);
1649 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1651 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1653 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1657 spin_lock(&adap->win0_lock);
1658 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1659 sizeof(indices), (__be32 *)&indices,
1661 spin_unlock(&adap->win0_lock);
1663 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1664 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1669 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1672 struct adapter *adap = netdev2adap(dev);
1673 u16 hw_pidx, hw_cidx;
1676 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1680 if (pidx != hw_pidx) {
1684 if (pidx >= hw_pidx)
1685 delta = pidx - hw_pidx;
1687 delta = size - hw_pidx + pidx;
1689 if (is_t4(adap->params.chip))
1690 val = PIDX_V(delta);
1692 val = PIDX_T5_V(delta);
1694 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1700 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1702 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1704 struct adapter *adap;
1705 u32 offset, memtype, memaddr;
1706 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1707 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1710 adap = netdev2adap(dev);
1712 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1714 /* Figure out where the offset lands in the Memory Type/Address scheme.
1715 * This code assumes that the memory is laid out starting at offset 0
1716 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1717 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1718 * MC0, and some have both MC0 and MC1.
1720 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1721 edc0_size = EDRAM0_SIZE_G(size) << 20;
1722 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1723 edc1_size = EDRAM1_SIZE_G(size) << 20;
1724 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1725 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1727 edc0_end = edc0_size;
1728 edc1_end = edc0_end + edc1_size;
1729 mc0_end = edc1_end + mc0_size;
1731 if (offset < edc0_end) {
1734 } else if (offset < edc1_end) {
1736 memaddr = offset - edc0_end;
1738 if (offset < mc0_end) {
1740 memaddr = offset - edc1_end;
1741 } else if (is_t5(adap->params.chip)) {
1742 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1743 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1744 mc1_end = mc0_end + mc1_size;
1745 if (offset < mc1_end) {
1747 memaddr = offset - mc0_end;
1749 /* offset beyond the end of any memory */
1753 /* T4/T6 only has a single memory channel */
1758 spin_lock(&adap->win0_lock);
1759 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1760 spin_unlock(&adap->win0_lock);
1764 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1768 EXPORT_SYMBOL(cxgb4_read_tpte);
1770 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1773 struct adapter *adap;
1775 adap = netdev2adap(dev);
1776 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1777 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1779 return ((u64)hi << 32) | (u64)lo;
1781 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1783 int cxgb4_bar2_sge_qregs(struct net_device *dev,
1785 enum cxgb4_bar2_qtype qtype,
1788 unsigned int *pbar2_qid)
1790 return t4_bar2_sge_qregs(netdev2adap(dev),
1792 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1793 ? T4_BAR2_QTYPE_EGRESS
1794 : T4_BAR2_QTYPE_INGRESS),
1799 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1801 static struct pci_driver cxgb4_driver;
1803 static void check_neigh_update(struct neighbour *neigh)
1805 const struct device *parent;
1806 const struct net_device *netdev = neigh->dev;
1808 if (is_vlan_dev(netdev))
1809 netdev = vlan_dev_real_dev(netdev);
1810 parent = netdev->dev.parent;
1811 if (parent && parent->driver == &cxgb4_driver.driver)
1812 t4_l2t_update(dev_get_drvdata(parent), neigh);
1815 static int netevent_cb(struct notifier_block *nb, unsigned long event,
1819 case NETEVENT_NEIGH_UPDATE:
1820 check_neigh_update(data);
1822 case NETEVENT_REDIRECT:
1829 static bool netevent_registered;
1830 static struct notifier_block cxgb4_netevent_nb = {
1831 .notifier_call = netevent_cb
1834 static void drain_db_fifo(struct adapter *adap, int usecs)
1836 u32 v1, v2, lp_count, hp_count;
1839 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1840 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1841 if (is_t4(adap->params.chip)) {
1842 lp_count = LP_COUNT_G(v1);
1843 hp_count = HP_COUNT_G(v1);
1845 lp_count = LP_COUNT_T5_G(v1);
1846 hp_count = HP_COUNT_T5_G(v2);
1849 if (lp_count == 0 && hp_count == 0)
1851 set_current_state(TASK_UNINTERRUPTIBLE);
1852 schedule_timeout(usecs_to_jiffies(usecs));
1856 static void disable_txq_db(struct sge_txq *q)
1858 unsigned long flags;
1860 spin_lock_irqsave(&q->db_lock, flags);
1862 spin_unlock_irqrestore(&q->db_lock, flags);
1865 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1867 spin_lock_irq(&q->db_lock);
1868 if (q->db_pidx_inc) {
1869 /* Make sure that all writes to the TX descriptors
1870 * are committed before we tell HW about them.
1873 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1874 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1878 spin_unlock_irq(&q->db_lock);
1881 static void disable_dbs(struct adapter *adap)
1885 for_each_ethrxq(&adap->sge, i)
1886 disable_txq_db(&adap->sge.ethtxq[i].q);
1887 if (is_offload(adap)) {
1888 struct sge_uld_txq_info *txq_info =
1889 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1892 for_each_ofldtxq(&adap->sge, i) {
1893 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1895 disable_txq_db(&txq->q);
1899 for_each_port(adap, i)
1900 disable_txq_db(&adap->sge.ctrlq[i].q);
1903 static void enable_dbs(struct adapter *adap)
1907 for_each_ethrxq(&adap->sge, i)
1908 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1909 if (is_offload(adap)) {
1910 struct sge_uld_txq_info *txq_info =
1911 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1914 for_each_ofldtxq(&adap->sge, i) {
1915 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1917 enable_txq_db(adap, &txq->q);
1921 for_each_port(adap, i)
1922 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1925 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1927 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1929 if (adap->uld && adap->uld[type].handle)
1930 adap->uld[type].control(adap->uld[type].handle, cmd);
1933 static void process_db_full(struct work_struct *work)
1935 struct adapter *adap;
1937 adap = container_of(work, struct adapter, db_full_task);
1939 drain_db_fifo(adap, dbfifo_drain_delay);
1941 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1942 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1943 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1944 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1945 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1947 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1948 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
1951 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
1953 u16 hw_pidx, hw_cidx;
1956 spin_lock_irq(&q->db_lock);
1957 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
1960 if (q->db_pidx != hw_pidx) {
1964 if (q->db_pidx >= hw_pidx)
1965 delta = q->db_pidx - hw_pidx;
1967 delta = q->size - hw_pidx + q->db_pidx;
1969 if (is_t4(adap->params.chip))
1970 val = PIDX_V(delta);
1972 val = PIDX_T5_V(delta);
1974 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1975 QID_V(q->cntxt_id) | val);
1980 spin_unlock_irq(&q->db_lock);
1982 CH_WARN(adap, "DB drop recovery failed.\n");
1985 static void recover_all_queues(struct adapter *adap)
1989 for_each_ethrxq(&adap->sge, i)
1990 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
1991 if (is_offload(adap)) {
1992 struct sge_uld_txq_info *txq_info =
1993 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1995 for_each_ofldtxq(&adap->sge, i) {
1996 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1998 sync_txq_pidx(adap, &txq->q);
2002 for_each_port(adap, i)
2003 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2006 static void process_db_drop(struct work_struct *work)
2008 struct adapter *adap;
2010 adap = container_of(work, struct adapter, db_drop_task);
2012 if (is_t4(adap->params.chip)) {
2013 drain_db_fifo(adap, dbfifo_drain_delay);
2014 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2015 drain_db_fifo(adap, dbfifo_drain_delay);
2016 recover_all_queues(adap);
2017 drain_db_fifo(adap, dbfifo_drain_delay);
2019 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2020 } else if (is_t5(adap->params.chip)) {
2021 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2022 u16 qid = (dropped_db >> 15) & 0x1ffff;
2023 u16 pidx_inc = dropped_db & 0x1fff;
2025 unsigned int bar2_qid;
2028 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2029 0, &bar2_qoffset, &bar2_qid);
2031 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2032 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2034 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2035 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2037 /* Re-enable BAR2 WC */
2038 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2041 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2042 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2045 void t4_db_full(struct adapter *adap)
2047 if (is_t4(adap->params.chip)) {
2049 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2050 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2051 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2052 queue_work(adap->workq, &adap->db_full_task);
2056 void t4_db_dropped(struct adapter *adap)
2058 if (is_t4(adap->params.chip)) {
2060 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2062 queue_work(adap->workq, &adap->db_drop_task);
2065 void t4_register_netevent_notifier(void)
2067 if (!netevent_registered) {
2068 register_netevent_notifier(&cxgb4_netevent_nb);
2069 netevent_registered = true;
2073 static void detach_ulds(struct adapter *adap)
2077 mutex_lock(&uld_mutex);
2078 list_del(&adap->list_node);
2079 for (i = 0; i < CXGB4_ULD_MAX; i++)
2080 if (adap->uld && adap->uld[i].handle) {
2081 adap->uld[i].state_change(adap->uld[i].handle,
2082 CXGB4_STATE_DETACH);
2083 adap->uld[i].handle = NULL;
2085 if (netevent_registered && list_empty(&adapter_list)) {
2086 unregister_netevent_notifier(&cxgb4_netevent_nb);
2087 netevent_registered = false;
2089 mutex_unlock(&uld_mutex);
2092 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2096 mutex_lock(&uld_mutex);
2097 for (i = 0; i < CXGB4_ULD_MAX; i++)
2098 if (adap->uld && adap->uld[i].handle)
2099 adap->uld[i].state_change(adap->uld[i].handle,
2101 mutex_unlock(&uld_mutex);
2104 #if IS_ENABLED(CONFIG_IPV6)
2105 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2106 unsigned long event, void *data)
2108 struct inet6_ifaddr *ifa = data;
2109 struct net_device *event_dev = ifa->idev->dev;
2110 const struct device *parent = NULL;
2111 #if IS_ENABLED(CONFIG_BONDING)
2112 struct adapter *adap;
2114 if (is_vlan_dev(event_dev))
2115 event_dev = vlan_dev_real_dev(event_dev);
2116 #if IS_ENABLED(CONFIG_BONDING)
2117 if (event_dev->flags & IFF_MASTER) {
2118 list_for_each_entry(adap, &adapter_list, list_node) {
2121 cxgb4_clip_get(adap->port[0],
2122 (const u32 *)ifa, 1);
2125 cxgb4_clip_release(adap->port[0],
2126 (const u32 *)ifa, 1);
2137 parent = event_dev->dev.parent;
2139 if (parent && parent->driver == &cxgb4_driver.driver) {
2142 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2145 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2154 static bool inet6addr_registered;
2155 static struct notifier_block cxgb4_inet6addr_notifier = {
2156 .notifier_call = cxgb4_inet6addr_handler
2159 static void update_clip(const struct adapter *adap)
2162 struct net_device *dev;
2167 for (i = 0; i < MAX_NPORTS; i++) {
2168 dev = adap->port[i];
2172 ret = cxgb4_update_root_dev_clip(dev);
2179 #endif /* IS_ENABLED(CONFIG_IPV6) */
2182 * cxgb_up - enable the adapter
2183 * @adap: adapter being enabled
2185 * Called when the first port is enabled, this function performs the
2186 * actions necessary to make an adapter operational, such as completing
2187 * the initialization of HW modules, and enabling interrupts.
2189 * Must be called with the rtnl lock held.
2191 static int cxgb_up(struct adapter *adap)
2195 err = setup_sge_queues(adap);
2198 err = setup_rss(adap);
2202 if (adap->flags & USING_MSIX) {
2203 name_msix_vecs(adap);
2204 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2205 adap->msix_info[0].desc, adap);
2208 err = request_msix_queue_irqs(adap);
2210 free_irq(adap->msix_info[0].vec, adap);
2214 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2215 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2216 adap->port[0]->name, adap);
2222 t4_intr_enable(adap);
2223 adap->flags |= FULL_INIT_DONE;
2224 notify_ulds(adap, CXGB4_STATE_UP);
2225 #if IS_ENABLED(CONFIG_IPV6)
2228 /* Initialize hash mac addr list*/
2229 INIT_LIST_HEAD(&adap->mac_hlist);
2233 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2235 t4_free_sge_resources(adap);
2239 static void cxgb_down(struct adapter *adapter)
2241 cancel_work_sync(&adapter->tid_release_task);
2242 cancel_work_sync(&adapter->db_full_task);
2243 cancel_work_sync(&adapter->db_drop_task);
2244 adapter->tid_release_task_busy = false;
2245 adapter->tid_release_head = NULL;
2247 t4_sge_stop(adapter);
2248 t4_free_sge_resources(adapter);
2249 adapter->flags &= ~FULL_INIT_DONE;
2253 * net_device operations
2255 static int cxgb_open(struct net_device *dev)
2258 struct port_info *pi = netdev_priv(dev);
2259 struct adapter *adapter = pi->adapter;
2261 netif_carrier_off(dev);
2263 if (!(adapter->flags & FULL_INIT_DONE)) {
2264 err = cxgb_up(adapter);
2269 err = link_start(dev);
2271 netif_tx_start_all_queues(dev);
2275 static int cxgb_close(struct net_device *dev)
2277 struct port_info *pi = netdev_priv(dev);
2278 struct adapter *adapter = pi->adapter;
2280 netif_tx_stop_all_queues(dev);
2281 netif_carrier_off(dev);
2282 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2285 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2286 __be32 sip, __be16 sport, __be16 vlan,
2287 unsigned int queue, unsigned char port, unsigned char mask)
2290 struct filter_entry *f;
2291 struct adapter *adap;
2295 adap = netdev2adap(dev);
2297 /* Adjust stid to correct filter index */
2298 stid -= adap->tids.sftid_base;
2299 stid += adap->tids.nftids;
2301 /* Check to make sure the filter requested is writable ...
2303 f = &adap->tids.ftid_tab[stid];
2304 ret = writable_filter(f);
2308 /* Clear out any old resources being used by the filter before
2309 * we start constructing the new filter.
2312 clear_filter(adap, f);
2314 /* Clear out filter specifications */
2315 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2316 f->fs.val.lport = cpu_to_be16(sport);
2317 f->fs.mask.lport = ~0;
2319 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2320 for (i = 0; i < 4; i++) {
2321 f->fs.val.lip[i] = val[i];
2322 f->fs.mask.lip[i] = ~0;
2324 if (adap->params.tp.vlan_pri_map & PORT_F) {
2325 f->fs.val.iport = port;
2326 f->fs.mask.iport = mask;
2330 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2331 f->fs.val.proto = IPPROTO_TCP;
2332 f->fs.mask.proto = ~0;
2337 /* Mark filter as locked */
2341 /* Save the actual tid. We need this to get the corresponding
2342 * filter entry structure in filter_rpl.
2344 f->tid = stid + adap->tids.ftid_base;
2345 ret = set_filter_wr(adap, stid);
2347 clear_filter(adap, f);
2353 EXPORT_SYMBOL(cxgb4_create_server_filter);
2355 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2356 unsigned int queue, bool ipv6)
2358 struct filter_entry *f;
2359 struct adapter *adap;
2361 adap = netdev2adap(dev);
2363 /* Adjust stid to correct filter index */
2364 stid -= adap->tids.sftid_base;
2365 stid += adap->tids.nftids;
2367 f = &adap->tids.ftid_tab[stid];
2368 /* Unlock the filter */
2371 return delete_filter(adap, stid);
2373 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2375 static void cxgb_get_stats(struct net_device *dev,
2376 struct rtnl_link_stats64 *ns)
2378 struct port_stats stats;
2379 struct port_info *p = netdev_priv(dev);
2380 struct adapter *adapter = p->adapter;
2382 /* Block retrieving statistics during EEH error
2383 * recovery. Otherwise, the recovery might fail
2384 * and the PCI device will be removed permanently
2386 spin_lock(&adapter->stats_lock);
2387 if (!netif_device_present(dev)) {
2388 spin_unlock(&adapter->stats_lock);
2391 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2393 spin_unlock(&adapter->stats_lock);
2395 ns->tx_bytes = stats.tx_octets;
2396 ns->tx_packets = stats.tx_frames;
2397 ns->rx_bytes = stats.rx_octets;
2398 ns->rx_packets = stats.rx_frames;
2399 ns->multicast = stats.rx_mcast_frames;
2401 /* detailed rx_errors */
2402 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2404 ns->rx_over_errors = 0;
2405 ns->rx_crc_errors = stats.rx_fcs_err;
2406 ns->rx_frame_errors = stats.rx_symbol_err;
2407 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
2408 stats.rx_ovflow2 + stats.rx_ovflow3 +
2409 stats.rx_trunc0 + stats.rx_trunc1 +
2410 stats.rx_trunc2 + stats.rx_trunc3;
2411 ns->rx_missed_errors = 0;
2413 /* detailed tx_errors */
2414 ns->tx_aborted_errors = 0;
2415 ns->tx_carrier_errors = 0;
2416 ns->tx_fifo_errors = 0;
2417 ns->tx_heartbeat_errors = 0;
2418 ns->tx_window_errors = 0;
2420 ns->tx_errors = stats.tx_error_frames;
2421 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2422 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2425 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2428 int ret = 0, prtad, devad;
2429 struct port_info *pi = netdev_priv(dev);
2430 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2434 if (pi->mdio_addr < 0)
2436 data->phy_id = pi->mdio_addr;
2440 if (mdio_phy_id_is_c45(data->phy_id)) {
2441 prtad = mdio_phy_id_prtad(data->phy_id);
2442 devad = mdio_phy_id_devad(data->phy_id);
2443 } else if (data->phy_id < 32) {
2444 prtad = data->phy_id;
2446 data->reg_num &= 0x1f;
2450 mbox = pi->adapter->pf;
2451 if (cmd == SIOCGMIIREG)
2452 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2453 data->reg_num, &data->val_out);
2455 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2456 data->reg_num, data->val_in);
2459 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2460 sizeof(pi->tstamp_config)) ?
2463 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2464 sizeof(pi->tstamp_config)))
2467 switch (pi->tstamp_config.rx_filter) {
2468 case HWTSTAMP_FILTER_NONE:
2469 pi->rxtstamp = false;
2471 case HWTSTAMP_FILTER_ALL:
2472 pi->rxtstamp = true;
2475 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2479 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2480 sizeof(pi->tstamp_config)) ?
2488 static void cxgb_set_rxmode(struct net_device *dev)
2490 /* unfortunately we can't return errors to the stack */
2491 set_rxmode(dev, -1, false);
2494 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2497 struct port_info *pi = netdev_priv(dev);
2499 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2506 #ifdef CONFIG_PCI_IOV
2507 static int dummy_open(struct net_device *dev)
2509 /* Turn carrier off since we don't have to transmit anything on this
2512 netif_carrier_off(dev);
2516 /* Fill MAC address that will be assigned by the FW */
2517 static void fill_vf_station_mac_addr(struct adapter *adap)
2520 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2525 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2527 na = adap->params.vpd.na;
2528 for (i = 0; i < ETH_ALEN; i++)
2529 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2530 hex2val(na[2 * i + 1]));
2531 a = (hw_addr[0] << 8) | hw_addr[1];
2532 b = (hw_addr[1] << 8) | hw_addr[2];
2534 a |= 0x0200; /* locally assigned Ethernet MAC address */
2535 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2536 macaddr[0] = a >> 8;
2537 macaddr[1] = a & 0xff;
2539 for (i = 2; i < 5; i++)
2540 macaddr[i] = hw_addr[i + 1];
2542 for (i = 0; i < adap->num_vfs; i++) {
2543 macaddr[5] = adap->pf * 16 + i;
2544 ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
2549 static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2551 struct port_info *pi = netdev_priv(dev);
2552 struct adapter *adap = pi->adapter;
2555 /* verify MAC addr is valid */
2556 if (!is_valid_ether_addr(mac)) {
2557 dev_err(pi->adapter->pdev_dev,
2558 "Invalid Ethernet address %pM for VF %d\n",
2563 dev_info(pi->adapter->pdev_dev,
2564 "Setting MAC %pM on VF %d\n", mac, vf);
2565 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2567 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2571 static int cxgb_get_vf_config(struct net_device *dev,
2572 int vf, struct ifla_vf_info *ivi)
2574 struct port_info *pi = netdev_priv(dev);
2575 struct adapter *adap = pi->adapter;
2577 if (vf >= adap->num_vfs)
2580 ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
2584 static int cxgb_get_phys_port_id(struct net_device *dev,
2585 struct netdev_phys_item_id *ppid)
2587 struct port_info *pi = netdev_priv(dev);
2588 unsigned int phy_port_id;
2590 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2591 ppid->id_len = sizeof(phy_port_id);
2592 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2598 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2601 struct sockaddr *addr = p;
2602 struct port_info *pi = netdev_priv(dev);
2604 if (!is_valid_ether_addr(addr->sa_data))
2605 return -EADDRNOTAVAIL;
2607 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2608 pi->xact_addr_filt, addr->sa_data, true, true);
2612 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2613 pi->xact_addr_filt = ret;
2617 #ifdef CONFIG_NET_POLL_CONTROLLER
2618 static void cxgb_netpoll(struct net_device *dev)
2620 struct port_info *pi = netdev_priv(dev);
2621 struct adapter *adap = pi->adapter;
2623 if (adap->flags & USING_MSIX) {
2625 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2627 for (i = pi->nqsets; i; i--, rx++)
2628 t4_sge_intr_msix(0, &rx->rspq);
2630 t4_intr_handler(adap)(0, adap);
2634 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2636 struct port_info *pi = netdev_priv(dev);
2637 struct adapter *adap = pi->adapter;
2638 struct sched_class *e;
2639 struct ch_sched_params p;
2640 struct ch_sched_queue qe;
2644 if (!can_sched(dev))
2647 if (index < 0 || index > pi->nqsets - 1)
2650 if (!(adap->flags & FULL_INIT_DONE)) {
2651 dev_err(adap->pdev_dev,
2652 "Failed to rate limit on queue %d. Link Down?\n",
2657 /* Convert from Mbps to Kbps */
2658 req_rate = rate << 10;
2660 /* Max rate is 10 Gbps */
2661 if (req_rate >= SCHED_MAX_RATE_KBPS) {
2662 dev_err(adap->pdev_dev,
2663 "Invalid rate %u Mbps, Max rate is %u Gbps\n",
2664 rate, SCHED_MAX_RATE_KBPS);
2668 /* First unbind the queue from any existing class */
2669 memset(&qe, 0, sizeof(qe));
2671 qe.class = SCHED_CLS_NONE;
2673 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2675 dev_err(adap->pdev_dev,
2676 "Unbinding Queue %d on port %d fail. Err: %d\n",
2677 index, pi->port_id, err);
2681 /* Queue already unbound */
2685 /* Fetch any available unused or matching scheduling class */
2686 memset(&p, 0, sizeof(p));
2687 p.type = SCHED_CLASS_TYPE_PACKET;
2688 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2689 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2690 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2691 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2692 p.u.params.channel = pi->tx_chan;
2693 p.u.params.class = SCHED_CLS_NONE;
2694 p.u.params.minrate = 0;
2695 p.u.params.maxrate = req_rate;
2696 p.u.params.weight = 0;
2697 p.u.params.pktsize = dev->mtu;
2699 e = cxgb4_sched_class_alloc(dev, &p);
2703 /* Bind the queue to a scheduling class */
2704 memset(&qe, 0, sizeof(qe));
2708 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2710 dev_err(adap->pdev_dev,
2711 "Queue rate limiting failed. Err: %d\n", err);
2715 static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
2716 struct tc_to_netdev *tc)
2718 struct port_info *pi = netdev2pinfo(dev);
2719 struct adapter *adap = netdev2adap(dev);
2721 if (!(adap->flags & FULL_INIT_DONE)) {
2722 dev_err(adap->pdev_dev,
2723 "Failed to setup tc on port %d. Link Down?\n",
2728 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
2729 tc->type == TC_SETUP_CLSU32) {
2730 switch (tc->cls_u32->command) {
2731 case TC_CLSU32_NEW_KNODE:
2732 case TC_CLSU32_REPLACE_KNODE:
2733 return cxgb4_config_knode(dev, proto, tc->cls_u32);
2734 case TC_CLSU32_DELETE_KNODE:
2735 return cxgb4_delete_knode(dev, proto, tc->cls_u32);
2744 static const struct net_device_ops cxgb4_netdev_ops = {
2745 .ndo_open = cxgb_open,
2746 .ndo_stop = cxgb_close,
2747 .ndo_start_xmit = t4_eth_xmit,
2748 .ndo_select_queue = cxgb_select_queue,
2749 .ndo_get_stats64 = cxgb_get_stats,
2750 .ndo_set_rx_mode = cxgb_set_rxmode,
2751 .ndo_set_mac_address = cxgb_set_mac_addr,
2752 .ndo_set_features = cxgb_set_features,
2753 .ndo_validate_addr = eth_validate_addr,
2754 .ndo_do_ioctl = cxgb_ioctl,
2755 .ndo_change_mtu = cxgb_change_mtu,
2756 #ifdef CONFIG_NET_POLL_CONTROLLER
2757 .ndo_poll_controller = cxgb_netpoll,
2759 #ifdef CONFIG_CHELSIO_T4_FCOE
2760 .ndo_fcoe_enable = cxgb_fcoe_enable,
2761 .ndo_fcoe_disable = cxgb_fcoe_disable,
2762 #endif /* CONFIG_CHELSIO_T4_FCOE */
2763 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
2764 .ndo_setup_tc = cxgb_setup_tc,
2767 #ifdef CONFIG_PCI_IOV
2768 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
2769 .ndo_open = dummy_open,
2770 .ndo_set_vf_mac = cxgb_set_vf_mac,
2771 .ndo_get_vf_config = cxgb_get_vf_config,
2772 .ndo_get_phys_port_id = cxgb_get_phys_port_id,
2776 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2778 struct adapter *adapter = netdev2adap(dev);
2780 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
2781 strlcpy(info->version, cxgb4_driver_version,
2782 sizeof(info->version));
2783 strlcpy(info->bus_info, pci_name(adapter->pdev),
2784 sizeof(info->bus_info));
2787 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
2788 .get_drvinfo = get_drvinfo,
2791 void t4_fatal_err(struct adapter *adap)
2795 /* Disable the SGE since ULDs are going to free resources that
2796 * could be exposed to the adapter. RDMA MWs for example...
2798 t4_shutdown_adapter(adap);
2799 for_each_port(adap, port) {
2800 struct net_device *dev = adap->port[port];
2802 /* If we get here in very early initialization the network
2803 * devices may not have been set up yet.
2808 netif_tx_stop_all_queues(dev);
2809 netif_carrier_off(dev);
2811 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2814 static void setup_memwin(struct adapter *adap)
2816 u32 nic_win_base = t4_get_util_window(adap);
2818 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
2821 static void setup_memwin_rdma(struct adapter *adap)
2823 if (adap->vres.ocq.size) {
2827 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
2828 start &= PCI_BASE_ADDRESS_MEM_MASK;
2829 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2830 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2832 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
2833 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
2835 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
2836 adap->vres.ocq.start);
2838 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
2842 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2847 /* get device capabilities */
2848 memset(c, 0, sizeof(*c));
2849 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2850 FW_CMD_REQUEST_F | FW_CMD_READ_F);
2851 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
2852 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
2856 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2857 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
2858 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
2862 ret = t4_config_glbl_rss(adap, adap->pf,
2863 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2864 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
2865 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
2869 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
2870 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
2877 /* tweak some settings */
2878 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
2879 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
2880 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
2881 v = t4_read_reg(adap, TP_PIO_DATA_A);
2882 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
2884 /* first 4 Tx modulation queues point to consecutive Tx channels */
2885 adap->params.tp.tx_modq_map = 0xE4;
2886 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
2887 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
2889 /* associate each Tx modulation queue with consecutive Tx channels */
2891 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2892 &v, 1, TP_TX_SCHED_HDR_A);
2893 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2894 &v, 1, TP_TX_SCHED_FIFO_A);
2895 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2896 &v, 1, TP_TX_SCHED_PCMD_A);
2898 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
2899 if (is_offload(adap)) {
2900 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
2901 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2902 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2903 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2904 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
2905 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
2906 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2907 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2908 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2909 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
2912 /* get basic stuff going */
2913 return t4_early_init(adap, adap->pf);
2917 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2919 #define MAX_ATIDS 8192U
2922 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2924 * If the firmware we're dealing with has Configuration File support, then
2925 * we use that to perform all configuration
2929 * Tweak configuration based on module parameters, etc. Most of these have
2930 * defaults assigned to them by Firmware Configuration Files (if we're using
2931 * them) but need to be explicitly set if we're using hard-coded
2932 * initialization. But even in the case of using Firmware Configuration
2933 * Files, we'd like to expose the ability to change these via module
2934 * parameters so these are essentially common tweaks/settings for
2935 * Configuration Files and hard-coded initialization ...
2937 static int adap_init0_tweaks(struct adapter *adapter)
2940 * Fix up various Host-Dependent Parameters like Page Size, Cache
2941 * Line Size, etc. The firmware default is for a 4KB Page Size and
2942 * 64B Cache Line Size ...
2944 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
2947 * Process module parameters which affect early initialization.
2949 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
2950 dev_err(&adapter->pdev->dev,
2951 "Ignoring illegal rx_dma_offset=%d, using 2\n",
2955 t4_set_reg_field(adapter, SGE_CONTROL_A,
2956 PKTSHIFT_V(PKTSHIFT_M),
2957 PKTSHIFT_V(rx_dma_offset));
2960 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
2961 * adds the pseudo header itself.
2963 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
2964 CSUM_HAS_PSEUDO_HDR_F, 0);
2969 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
2970 * unto themselves and they contain their own firmware to perform their
2973 static int phy_aq1202_version(const u8 *phy_fw_data,
2978 /* At offset 0x8 you're looking for the primary image's
2979 * starting offset which is 3 Bytes wide
2981 * At offset 0xa of the primary image, you look for the offset
2982 * of the DRAM segment which is 3 Bytes wide.
2984 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
2987 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
2988 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
2989 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
2991 offset = le24(phy_fw_data + 0x8) << 12;
2992 offset = le24(phy_fw_data + offset + 0xa);
2993 return be16(phy_fw_data + offset + 0x27e);
3000 static struct info_10gbt_phy_fw {
3001 unsigned int phy_fw_id; /* PCI Device ID */
3002 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3003 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3004 int phy_flash; /* Has FLASH for PHY Firmware */
3005 } phy_info_array[] = {
3007 PHY_AQ1202_DEVICEID,
3008 PHY_AQ1202_FIRMWARE,
3013 PHY_BCM84834_DEVICEID,
3014 PHY_BCM84834_FIRMWARE,
3021 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3025 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3026 if (phy_info_array[i].phy_fw_id == devid)
3027 return &phy_info_array[i];
3032 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3033 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3034 * we return a negative error number. If we transfer new firmware we return 1
3035 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3037 static int adap_init0_phy(struct adapter *adap)
3039 const struct firmware *phyf;
3041 struct info_10gbt_phy_fw *phy_info;
3043 /* Use the device ID to determine which PHY file to flash.
3045 phy_info = find_phy_info(adap->pdev->device);
3047 dev_warn(adap->pdev_dev,
3048 "No PHY Firmware file found for this PHY\n");
3052 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3053 * use that. The adapter firmware provides us with a memory buffer
3054 * where we can load a PHY firmware file from the host if we want to
3055 * override the PHY firmware File in flash.
3057 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3060 /* For adapters without FLASH attached to PHY for their
3061 * firmware, it's obviously a fatal error if we can't get the
3062 * firmware to the adapter. For adapters with PHY firmware
3063 * FLASH storage, it's worth a warning if we can't find the
3064 * PHY Firmware but we'll neuter the error ...
3066 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3067 "/lib/firmware/%s, error %d\n",
3068 phy_info->phy_fw_file, -ret);
3069 if (phy_info->phy_flash) {
3070 int cur_phy_fw_ver = 0;
3072 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3073 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3074 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3081 /* Load PHY Firmware onto adapter.
3083 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3084 phy_info->phy_fw_version,
3085 (u8 *)phyf->data, phyf->size);
3087 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3090 int new_phy_fw_ver = 0;
3092 if (phy_info->phy_fw_version)
3093 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3095 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3096 "Firmware /lib/firmware/%s, version %#x\n",
3097 phy_info->phy_fw_file, new_phy_fw_ver);
3100 release_firmware(phyf);
3106 * Attempt to initialize the adapter via a Firmware Configuration File.
3108 static int adap_init0_config(struct adapter *adapter, int reset)
3110 struct fw_caps_config_cmd caps_cmd;
3111 const struct firmware *cf;
3112 unsigned long mtype = 0, maddr = 0;
3113 u32 finiver, finicsum, cfcsum;
3115 int config_issued = 0;
3116 char *fw_config_file, fw_config_file_path[256];
3117 char *config_name = NULL;
3120 * Reset device if necessary.
3123 ret = t4_fw_reset(adapter, adapter->mbox,
3124 PIORSTMODE_F | PIORST_F);
3129 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3130 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3131 * to be performed after any global adapter RESET above since some
3132 * PHYs only have local RAM copies of the PHY firmware.
3134 if (is_10gbt_device(adapter->pdev->device)) {
3135 ret = adap_init0_phy(adapter);
3140 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3141 * then use that. Otherwise, use the configuration file stored
3142 * in the adapter flash ...
3144 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3146 fw_config_file = FW4_CFNAME;
3149 fw_config_file = FW5_CFNAME;
3152 fw_config_file = FW6_CFNAME;
3155 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3156 adapter->pdev->device);
3161 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3163 config_name = "On FLASH";
3164 mtype = FW_MEMTYPE_CF_FLASH;
3165 maddr = t4_flash_cfg_addr(adapter);
3167 u32 params[7], val[7];
3169 sprintf(fw_config_file_path,
3170 "/lib/firmware/%s", fw_config_file);
3171 config_name = fw_config_file_path;
3173 if (cf->size >= FLASH_CFG_MAX_SIZE)
3176 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3177 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3178 ret = t4_query_params(adapter, adapter->mbox,
3179 adapter->pf, 0, 1, params, val);
3182 * For t4_memory_rw() below addresses and
3183 * sizes have to be in terms of multiples of 4
3184 * bytes. So, if the Configuration File isn't
3185 * a multiple of 4 bytes in length we'll have
3186 * to write that out separately since we can't
3187 * guarantee that the bytes following the
3188 * residual byte in the buffer returned by
3189 * request_firmware() are zeroed out ...
3191 size_t resid = cf->size & 0x3;
3192 size_t size = cf->size & ~0x3;
3193 __be32 *data = (__be32 *)cf->data;
3195 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3196 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3198 spin_lock(&adapter->win0_lock);
3199 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3200 size, data, T4_MEMORY_WRITE);
3201 if (ret == 0 && resid != 0) {
3208 last.word = data[size >> 2];
3209 for (i = resid; i < 4; i++)
3211 ret = t4_memory_rw(adapter, 0, mtype,
3216 spin_unlock(&adapter->win0_lock);
3220 release_firmware(cf);
3226 * Issue a Capability Configuration command to the firmware to get it
3227 * to parse the Configuration File. We don't use t4_fw_config_file()
3228 * because we want the ability to modify various features after we've
3229 * processed the configuration file ...
3231 memset(&caps_cmd, 0, sizeof(caps_cmd));
3232 caps_cmd.op_to_write =
3233 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3236 caps_cmd.cfvalid_to_len16 =
3237 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3238 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3239 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3240 FW_LEN16(caps_cmd));
3241 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3244 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3245 * Configuration File in FLASH), our last gasp effort is to use the
3246 * Firmware Configuration File which is embedded in the firmware. A
3247 * very few early versions of the firmware didn't have one embedded
3248 * but we can ignore those.
3250 if (ret == -ENOENT) {
3251 memset(&caps_cmd, 0, sizeof(caps_cmd));
3252 caps_cmd.op_to_write =
3253 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3256 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3257 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3258 sizeof(caps_cmd), &caps_cmd);
3259 config_name = "Firmware Default";
3266 finiver = ntohl(caps_cmd.finiver);
3267 finicsum = ntohl(caps_cmd.finicsum);
3268 cfcsum = ntohl(caps_cmd.cfcsum);
3269 if (finicsum != cfcsum)
3270 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3271 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3275 * And now tell the firmware to use the configuration we just loaded.
3277 caps_cmd.op_to_write =
3278 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3281 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3282 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3288 * Tweak configuration based on system architecture, module
3291 ret = adap_init0_tweaks(adapter);
3296 * And finally tell the firmware to initialize itself using the
3297 * parameters from the Configuration File.
3299 ret = t4_fw_initialize(adapter, adapter->mbox);
3303 /* Emit Firmware Configuration File information and return
3306 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3307 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3308 config_name, finiver, cfcsum);
3312 * Something bad happened. Return the error ... (If the "error"
3313 * is that there's no Configuration File on the adapter we don't
3314 * want to issue a warning since this is fairly common.)
3317 if (config_issued && ret != -ENOENT)
3318 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3323 static struct fw_info fw_info_array[] = {
3326 .fs_name = FW4_CFNAME,
3327 .fw_mod_name = FW4_FNAME,
3329 .chip = FW_HDR_CHIP_T4,
3330 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3331 .intfver_nic = FW_INTFVER(T4, NIC),
3332 .intfver_vnic = FW_INTFVER(T4, VNIC),
3333 .intfver_ri = FW_INTFVER(T4, RI),
3334 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3335 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3339 .fs_name = FW5_CFNAME,
3340 .fw_mod_name = FW5_FNAME,
3342 .chip = FW_HDR_CHIP_T5,
3343 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3344 .intfver_nic = FW_INTFVER(T5, NIC),
3345 .intfver_vnic = FW_INTFVER(T5, VNIC),
3346 .intfver_ri = FW_INTFVER(T5, RI),
3347 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3348 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3352 .fs_name = FW6_CFNAME,
3353 .fw_mod_name = FW6_FNAME,
3355 .chip = FW_HDR_CHIP_T6,
3356 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3357 .intfver_nic = FW_INTFVER(T6, NIC),
3358 .intfver_vnic = FW_INTFVER(T6, VNIC),
3359 .intfver_ofld = FW_INTFVER(T6, OFLD),
3360 .intfver_ri = FW_INTFVER(T6, RI),
3361 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3362 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3363 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3364 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3370 static struct fw_info *find_fw_info(int chip)
3374 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3375 if (fw_info_array[i].chip == chip)
3376 return &fw_info_array[i];
3382 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3384 static int adap_init0(struct adapter *adap)
3388 enum dev_state state;
3389 u32 params[7], val[7];
3390 struct fw_caps_config_cmd caps_cmd;
3393 /* Grab Firmware Device Log parameters as early as possible so we have
3394 * access to it for debugging, etc.
3396 ret = t4_init_devlog_params(adap);
3400 /* Contact FW, advertising Master capability */
3401 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3402 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3404 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3408 if (ret == adap->mbox)
3409 adap->flags |= MASTER_PF;
3412 * If we're the Master PF Driver and the device is uninitialized,
3413 * then let's consider upgrading the firmware ... (We always want
3414 * to check the firmware version number in order to A. get it for
3415 * later reporting and B. to warn if the currently loaded firmware
3416 * is excessively mismatched relative to the driver.)
3418 t4_get_fw_version(adap, &adap->params.fw_vers);
3419 t4_get_bs_version(adap, &adap->params.bs_vers);
3420 t4_get_tp_version(adap, &adap->params.tp_vers);
3421 t4_get_exprom_version(adap, &adap->params.er_vers);
3423 ret = t4_check_fw_version(adap);
3424 /* If firmware is too old (not supported by driver) force an update. */
3426 state = DEV_STATE_UNINIT;
3427 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3428 struct fw_info *fw_info;
3429 struct fw_hdr *card_fw;
3430 const struct firmware *fw;
3431 const u8 *fw_data = NULL;
3432 unsigned int fw_size = 0;
3434 /* This is the firmware whose headers the driver was compiled
3437 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3438 if (fw_info == NULL) {
3439 dev_err(adap->pdev_dev,
3440 "unable to get firmware info for chip %d.\n",
3441 CHELSIO_CHIP_VERSION(adap->params.chip));
3445 /* allocate memory to read the header of the firmware on the
3448 card_fw = t4_alloc_mem(sizeof(*card_fw));
3450 /* Get FW from from /lib/firmware/ */
3451 ret = request_firmware(&fw, fw_info->fw_mod_name,
3454 dev_err(adap->pdev_dev,
3455 "unable to load firmware image %s, error %d\n",
3456 fw_info->fw_mod_name, ret);
3462 /* upgrade FW logic */
3463 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3467 release_firmware(fw);
3468 t4_free_mem(card_fw);
3475 * Grab VPD parameters. This should be done after we establish a
3476 * connection to the firmware since some of the VPD parameters
3477 * (notably the Core Clock frequency) are retrieved via requests to
3478 * the firmware. On the other hand, we need these fairly early on
3479 * so we do this right after getting ahold of the firmware.
3481 ret = t4_get_vpd_params(adap, &adap->params.vpd);
3486 * Find out what ports are available to us. Note that we need to do
3487 * this before calling adap_init0_no_config() since it needs nports
3491 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3492 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3493 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3497 adap->params.nports = hweight32(port_vec);
3498 adap->params.portvec = port_vec;
3500 /* If the firmware is initialized already, emit a simply note to that
3501 * effect. Otherwise, it's time to try initializing the adapter.
3503 if (state == DEV_STATE_INIT) {
3504 dev_info(adap->pdev_dev, "Coming up as %s: "\
3505 "Adapter already initialized\n",
3506 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3508 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3509 "Initializing adapter\n");
3511 /* Find out whether we're dealing with a version of the
3512 * firmware which has configuration file support.
3514 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3515 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3516 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3519 /* If the firmware doesn't support Configuration Files,
3523 dev_err(adap->pdev_dev, "firmware doesn't support "
3524 "Firmware Configuration Files\n");
3528 /* The firmware provides us with a memory buffer where we can
3529 * load a Configuration File from the host if we want to
3530 * override the Configuration File in flash.
3532 ret = adap_init0_config(adap, reset);
3533 if (ret == -ENOENT) {
3534 dev_err(adap->pdev_dev, "no Configuration File "
3535 "present on adapter.\n");
3539 dev_err(adap->pdev_dev, "could not initialize "
3540 "adapter, error %d\n", -ret);
3545 /* Give the SGE code a chance to pull in anything that it needs ...
3546 * Note that this must be called after we retrieve our VPD parameters
3547 * in order to know how to convert core ticks to seconds, etc.
3549 ret = t4_sge_init(adap);
3553 if (is_bypass_device(adap->pdev->device))
3554 adap->params.bypass = 1;
3557 * Grab some of our basic fundamental operating parameters.
3559 #define FW_PARAM_DEV(param) \
3560 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3561 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3563 #define FW_PARAM_PFVF(param) \
3564 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3565 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3566 FW_PARAMS_PARAM_Y_V(0) | \
3567 FW_PARAMS_PARAM_Z_V(0)
3569 params[0] = FW_PARAM_PFVF(EQ_START);
3570 params[1] = FW_PARAM_PFVF(L2T_START);
3571 params[2] = FW_PARAM_PFVF(L2T_END);
3572 params[3] = FW_PARAM_PFVF(FILTER_START);
3573 params[4] = FW_PARAM_PFVF(FILTER_END);
3574 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3575 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3578 adap->sge.egr_start = val[0];
3579 adap->l2t_start = val[1];
3580 adap->l2t_end = val[2];
3581 adap->tids.ftid_base = val[3];
3582 adap->tids.nftids = val[4] - val[3] + 1;
3583 adap->sge.ingr_start = val[5];
3585 /* qids (ingress/egress) returned from firmware can be anywhere
3586 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3587 * Hence driver needs to allocate memory for this range to
3588 * store the queue info. Get the highest IQFLINT/EQ index returned
3589 * in FW_EQ_*_CMD.alloc command.
3591 params[0] = FW_PARAM_PFVF(EQ_END);
3592 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3593 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3596 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3597 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3599 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3600 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3601 if (!adap->sge.egr_map) {
3606 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3607 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3608 if (!adap->sge.ingr_map) {
3613 /* Allocate the memory for the vaious egress queue bitmaps
3614 * ie starving_fl, txq_maperr and blocked_fl.
3616 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3617 sizeof(long), GFP_KERNEL);
3618 if (!adap->sge.starving_fl) {
3623 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3624 sizeof(long), GFP_KERNEL);
3625 if (!adap->sge.txq_maperr) {
3630 #ifdef CONFIG_DEBUG_FS
3631 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3632 sizeof(long), GFP_KERNEL);
3633 if (!adap->sge.blocked_fl) {
3639 params[0] = FW_PARAM_PFVF(CLIP_START);
3640 params[1] = FW_PARAM_PFVF(CLIP_END);
3641 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3644 adap->clipt_start = val[0];
3645 adap->clipt_end = val[1];
3647 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
3648 * Classes supported by the hardware/firmware so we hard code it here
3651 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
3653 /* query params related to active filter region */
3654 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3655 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3656 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3657 /* If Active filter size is set we enable establishing
3658 * offload connection through firmware work request
3660 if ((val[0] != val[1]) && (ret >= 0)) {
3661 adap->flags |= FW_OFLD_CONN;
3662 adap->tids.aftid_base = val[0];
3663 adap->tids.aftid_end = val[1];
3666 /* If we're running on newer firmware, let it know that we're
3667 * prepared to deal with encapsulated CPL messages. Older
3668 * firmware won't understand this and we'll just get
3669 * unencapsulated messages ...
3671 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3673 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
3676 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3677 * capability. Earlier versions of the firmware didn't have the
3678 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3679 * permission to use ULPTX MEMWRITE DSGL.
3681 if (is_t4(adap->params.chip)) {
3682 adap->params.ulptx_memwrite_dsgl = false;
3684 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3685 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3687 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3690 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
3691 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
3692 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3694 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
3697 * Get device capabilities so we can determine what resources we need
3700 memset(&caps_cmd, 0, sizeof(caps_cmd));
3701 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3702 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3703 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3704 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3709 if (caps_cmd.ofldcaps) {
3710 /* query offload-related parameters */
3711 params[0] = FW_PARAM_DEV(NTID);
3712 params[1] = FW_PARAM_PFVF(SERVER_START);
3713 params[2] = FW_PARAM_PFVF(SERVER_END);
3714 params[3] = FW_PARAM_PFVF(TDDP_START);
3715 params[4] = FW_PARAM_PFVF(TDDP_END);
3716 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3717 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3721 adap->tids.ntids = val[0];
3722 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3723 adap->tids.stid_base = val[1];
3724 adap->tids.nstids = val[2] - val[1] + 1;
3726 * Setup server filter region. Divide the available filter
3727 * region into two parts. Regular filters get 1/3rd and server
3728 * filters get 2/3rd part. This is only enabled if workarond
3730 * 1. For regular filters.
3731 * 2. Server filter: This are special filters which are used
3732 * to redirect SYN packets to offload queue.
3734 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3735 adap->tids.sftid_base = adap->tids.ftid_base +
3736 DIV_ROUND_UP(adap->tids.nftids, 3);
3737 adap->tids.nsftids = adap->tids.nftids -
3738 DIV_ROUND_UP(adap->tids.nftids, 3);
3739 adap->tids.nftids = adap->tids.sftid_base -
3740 adap->tids.ftid_base;
3742 adap->vres.ddp.start = val[3];
3743 adap->vres.ddp.size = val[4] - val[3] + 1;
3744 adap->params.ofldq_wr_cred = val[5];
3746 adap->params.offload = 1;
3747 adap->num_ofld_uld += 1;
3749 if (caps_cmd.rdmacaps) {
3750 params[0] = FW_PARAM_PFVF(STAG_START);
3751 params[1] = FW_PARAM_PFVF(STAG_END);
3752 params[2] = FW_PARAM_PFVF(RQ_START);
3753 params[3] = FW_PARAM_PFVF(RQ_END);
3754 params[4] = FW_PARAM_PFVF(PBL_START);
3755 params[5] = FW_PARAM_PFVF(PBL_END);
3756 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3760 adap->vres.stag.start = val[0];
3761 adap->vres.stag.size = val[1] - val[0] + 1;
3762 adap->vres.rq.start = val[2];
3763 adap->vres.rq.size = val[3] - val[2] + 1;
3764 adap->vres.pbl.start = val[4];
3765 adap->vres.pbl.size = val[5] - val[4] + 1;
3767 params[0] = FW_PARAM_PFVF(SQRQ_START);
3768 params[1] = FW_PARAM_PFVF(SQRQ_END);
3769 params[2] = FW_PARAM_PFVF(CQ_START);
3770 params[3] = FW_PARAM_PFVF(CQ_END);
3771 params[4] = FW_PARAM_PFVF(OCQ_START);
3772 params[5] = FW_PARAM_PFVF(OCQ_END);
3773 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
3777 adap->vres.qp.start = val[0];
3778 adap->vres.qp.size = val[1] - val[0] + 1;
3779 adap->vres.cq.start = val[2];
3780 adap->vres.cq.size = val[3] - val[2] + 1;
3781 adap->vres.ocq.start = val[4];
3782 adap->vres.ocq.size = val[5] - val[4] + 1;
3784 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
3785 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
3789 adap->params.max_ordird_qp = 8;
3790 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
3793 adap->params.max_ordird_qp = val[0];
3794 adap->params.max_ird_adapter = val[1];
3796 dev_info(adap->pdev_dev,
3797 "max_ordird_qp %d max_ird_adapter %d\n",
3798 adap->params.max_ordird_qp,
3799 adap->params.max_ird_adapter);
3800 adap->num_ofld_uld += 2;
3802 if (caps_cmd.iscsicaps) {
3803 params[0] = FW_PARAM_PFVF(ISCSI_START);
3804 params[1] = FW_PARAM_PFVF(ISCSI_END);
3805 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
3809 adap->vres.iscsi.start = val[0];
3810 adap->vres.iscsi.size = val[1] - val[0] + 1;
3811 /* LIO target and cxgb4i initiaitor */
3812 adap->num_ofld_uld += 2;
3814 if (caps_cmd.cryptocaps) {
3815 /* Should query params here...TODO */
3816 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
3817 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
3823 adap->vres.ncrypto_fc = val[0];
3825 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
3828 #undef FW_PARAM_PFVF
3831 /* The MTU/MSS Table is initialized by now, so load their values. If
3832 * we're initializing the adapter, then we'll make any modifications
3833 * we want to the MTU/MSS Table and also initialize the congestion
3836 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3837 if (state != DEV_STATE_INIT) {
3840 /* The default MTU Table contains values 1492 and 1500.
3841 * However, for TCP, it's better to have two values which are
3842 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
3843 * This allows us to have a TCP Data Payload which is a
3844 * multiple of 8 regardless of what combination of TCP Options
3845 * are in use (always a multiple of 4 bytes) which is
3846 * important for performance reasons. For instance, if no
3847 * options are in use, then we have a 20-byte IP header and a
3848 * 20-byte TCP header. In this case, a 1500-byte MSS would
3849 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
3850 * which is not a multiple of 8. So using an MSS of 1488 in
3851 * this case results in a TCP Data Payload of 1448 bytes which
3852 * is a multiple of 8. On the other hand, if 12-byte TCP Time
3853 * Stamps have been negotiated, then an MTU of 1500 bytes
3854 * results in a TCP Data Payload of 1448 bytes which, as
3855 * above, is a multiple of 8 bytes ...
3857 for (i = 0; i < NMTUS; i++)
3858 if (adap->params.mtus[i] == 1492) {
3859 adap->params.mtus[i] = 1488;
3863 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3864 adap->params.b_wnd);
3866 t4_init_sge_params(adap);
3867 adap->flags |= FW_OK;
3868 t4_init_tp_params(adap);
3872 * Something bad happened. If a command timed out or failed with EIO
3873 * FW does not operate within its spec or something catastrophic
3874 * happened to HW/FW, stop issuing commands.
3877 kfree(adap->sge.egr_map);
3878 kfree(adap->sge.ingr_map);
3879 kfree(adap->sge.starving_fl);
3880 kfree(adap->sge.txq_maperr);
3881 #ifdef CONFIG_DEBUG_FS
3882 kfree(adap->sge.blocked_fl);
3884 if (ret != -ETIMEDOUT && ret != -EIO)
3885 t4_fw_bye(adap, adap->mbox);
3891 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3892 pci_channel_state_t state)
3895 struct adapter *adap = pci_get_drvdata(pdev);
3901 adap->flags &= ~FW_OK;
3902 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3903 spin_lock(&adap->stats_lock);
3904 for_each_port(adap, i) {
3905 struct net_device *dev = adap->port[i];
3907 netif_device_detach(dev);
3908 netif_carrier_off(dev);
3910 spin_unlock(&adap->stats_lock);
3911 disable_interrupts(adap);
3912 if (adap->flags & FULL_INIT_DONE)
3915 if ((adap->flags & DEV_ENABLED)) {
3916 pci_disable_device(pdev);
3917 adap->flags &= ~DEV_ENABLED;
3919 out: return state == pci_channel_io_perm_failure ?
3920 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3923 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3926 struct fw_caps_config_cmd c;
3927 struct adapter *adap = pci_get_drvdata(pdev);
3930 pci_restore_state(pdev);
3931 pci_save_state(pdev);
3932 return PCI_ERS_RESULT_RECOVERED;
3935 if (!(adap->flags & DEV_ENABLED)) {
3936 if (pci_enable_device(pdev)) {
3937 dev_err(&pdev->dev, "Cannot reenable PCI "
3938 "device after reset\n");
3939 return PCI_ERS_RESULT_DISCONNECT;
3941 adap->flags |= DEV_ENABLED;
3944 pci_set_master(pdev);
3945 pci_restore_state(pdev);
3946 pci_save_state(pdev);
3947 pci_cleanup_aer_uncorrect_error_status(pdev);
3949 if (t4_wait_dev_ready(adap->regs) < 0)
3950 return PCI_ERS_RESULT_DISCONNECT;
3951 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
3952 return PCI_ERS_RESULT_DISCONNECT;
3953 adap->flags |= FW_OK;
3954 if (adap_init1(adap, &c))
3955 return PCI_ERS_RESULT_DISCONNECT;
3957 for_each_port(adap, i) {
3958 struct port_info *p = adap2pinfo(adap, i);
3960 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
3963 return PCI_ERS_RESULT_DISCONNECT;
3965 p->xact_addr_filt = -1;
3968 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3969 adap->params.b_wnd);
3972 return PCI_ERS_RESULT_DISCONNECT;
3973 return PCI_ERS_RESULT_RECOVERED;
3976 static void eeh_resume(struct pci_dev *pdev)
3979 struct adapter *adap = pci_get_drvdata(pdev);
3985 for_each_port(adap, i) {
3986 struct net_device *dev = adap->port[i];
3988 if (netif_running(dev)) {
3990 cxgb_set_rxmode(dev);
3992 netif_device_attach(dev);
3997 static const struct pci_error_handlers cxgb4_eeh = {
3998 .error_detected = eeh_err_detected,
3999 .slot_reset = eeh_slot_reset,
4000 .resume = eeh_resume,
4003 /* Return true if the Link Configuration supports "High Speeds" (those greater
4006 static inline bool is_x_10g_port(const struct link_config *lc)
4008 unsigned int speeds, high_speeds;
4010 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
4011 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
4013 return high_speeds != 0;
4017 * Perform default configuration of DMA queues depending on the number and type
4018 * of ports we found and the number of available CPUs. Most settings can be
4019 * modified by the admin prior to actual use.
4021 static void cfg_queues(struct adapter *adap)
4023 struct sge *s = &adap->sge;
4024 int i = 0, n10g = 0, qidx = 0;
4025 #ifndef CONFIG_CHELSIO_T4_DCB
4029 /* Reduce memory usage in kdump environment, disable all offload.
4031 if (is_kdump_kernel()) {
4032 adap->params.offload = 0;
4033 adap->params.crypto = 0;
4034 } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
4035 adap->params.offload = 0;
4036 adap->params.crypto = 0;
4039 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4040 #ifdef CONFIG_CHELSIO_T4_DCB
4041 /* For Data Center Bridging support we need to be able to support up
4042 * to 8 Traffic Priorities; each of which will be assigned to its
4043 * own TX Queue in order to prevent Head-Of-Line Blocking.
4045 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4046 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4047 MAX_ETH_QSETS, adap->params.nports * 8);
4051 for_each_port(adap, i) {
4052 struct port_info *pi = adap2pinfo(adap, i);
4054 pi->first_qset = qidx;
4058 #else /* !CONFIG_CHELSIO_T4_DCB */
4060 * We default to 1 queue per non-10G port and up to # of cores queues
4064 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4065 if (q10g > netif_get_num_default_rss_queues())
4066 q10g = netif_get_num_default_rss_queues();
4068 for_each_port(adap, i) {
4069 struct port_info *pi = adap2pinfo(adap, i);
4071 pi->first_qset = qidx;
4072 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4075 #endif /* !CONFIG_CHELSIO_T4_DCB */
4078 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4082 * For offload we use 1 queue/channel if all ports are up to 1G,
4083 * otherwise we divide all available queues amongst the channels
4084 * capped by the number of available cores.
4087 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4088 s->ofldqsets = roundup(i, adap->params.nports);
4090 s->ofldqsets = adap->params.nports;
4094 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4095 struct sge_eth_rxq *r = &s->ethrxq[i];
4097 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4101 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4102 s->ethtxq[i].q.size = 1024;
4104 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4105 s->ctrlq[i].q.size = 512;
4107 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4108 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4112 * Reduce the number of Ethernet queues across all ports to at most n.
4113 * n provides at least one queue per port.
4115 static void reduce_ethqs(struct adapter *adap, int n)
4118 struct port_info *pi;
4120 while (n < adap->sge.ethqsets)
4121 for_each_port(adap, i) {
4122 pi = adap2pinfo(adap, i);
4123 if (pi->nqsets > 1) {
4125 adap->sge.ethqsets--;
4126 if (adap->sge.ethqsets <= n)
4132 for_each_port(adap, i) {
4133 pi = adap2pinfo(adap, i);
4139 static int get_msix_info(struct adapter *adap)
4141 struct uld_msix_info *msix_info;
4142 unsigned int max_ingq = 0;
4144 if (is_offload(adap))
4145 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4146 if (is_pci_uld(adap))
4147 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4152 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4156 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4157 sizeof(long), GFP_KERNEL);
4158 if (!adap->msix_bmap_ulds.msix_bmap) {
4162 spin_lock_init(&adap->msix_bmap_ulds.lock);
4163 adap->msix_info_ulds = msix_info;
4168 static void free_msix_info(struct adapter *adap)
4170 if (!(adap->num_uld && adap->num_ofld_uld))
4173 kfree(adap->msix_info_ulds);
4174 kfree(adap->msix_bmap_ulds.msix_bmap);
4177 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4178 #define EXTRA_VECS 2
4180 static int enable_msix(struct adapter *adap)
4182 int ofld_need = 0, uld_need = 0;
4183 int i, j, want, need, allocated;
4184 struct sge *s = &adap->sge;
4185 unsigned int nchan = adap->params.nports;
4186 struct msix_entry *entries;
4187 int max_ingq = MAX_INGQ;
4189 if (is_pci_uld(adap))
4190 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4191 if (is_offload(adap))
4192 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
4193 entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
4199 if (get_msix_info(adap)) {
4200 adap->params.offload = 0;
4201 adap->params.crypto = 0;
4204 for (i = 0; i < max_ingq + 1; ++i)
4205 entries[i].entry = i;
4207 want = s->max_ethqsets + EXTRA_VECS;
4208 if (is_offload(adap)) {
4209 want += adap->num_ofld_uld * s->ofldqsets;
4210 ofld_need = adap->num_ofld_uld * nchan;
4212 if (is_pci_uld(adap)) {
4213 want += adap->num_uld * s->ofldqsets;
4214 uld_need = adap->num_uld * nchan;
4216 #ifdef CONFIG_CHELSIO_T4_DCB
4217 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4220 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4222 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4224 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4225 if (allocated < 0) {
4226 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4227 " not using MSI-X\n");
4232 /* Distribute available vectors to the various queue groups.
4233 * Every group gets its minimum requirement and NIC gets top
4234 * priority for leftovers.
4236 i = allocated - EXTRA_VECS - ofld_need - uld_need;
4237 if (i < s->max_ethqsets) {
4238 s->max_ethqsets = i;
4239 if (i < s->ethqsets)
4240 reduce_ethqs(adap, i);
4243 if (allocated < want)
4244 s->nqs_per_uld = nchan;
4246 s->nqs_per_uld = s->ofldqsets;
4249 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
4250 adap->msix_info[i].vec = entries[i].vector;
4252 for (j = 0 ; i < allocated; ++i, j++) {
4253 adap->msix_info_ulds[j].vec = entries[i].vector;
4254 adap->msix_info_ulds[j].idx = i;
4256 adap->msix_bmap_ulds.mapsize = j;
4258 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4259 "nic %d per uld %d\n",
4260 allocated, s->max_ethqsets, s->nqs_per_uld);
4268 static int init_rss(struct adapter *adap)
4273 err = t4_init_rss_mode(adap, adap->mbox);
4277 for_each_port(adap, i) {
4278 struct port_info *pi = adap2pinfo(adap, i);
4280 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4287 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4288 enum pci_bus_speed *speed,
4289 enum pcie_link_width *width)
4291 u32 lnkcap1, lnkcap2;
4294 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4296 *speed = PCI_SPEED_UNKNOWN;
4297 *width = PCIE_LNK_WIDTH_UNKNOWN;
4299 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4301 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4303 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
4304 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4305 *speed = PCIE_SPEED_8_0GT;
4306 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4307 *speed = PCIE_SPEED_5_0GT;
4308 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4309 *speed = PCIE_SPEED_2_5GT;
4312 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4313 if (!lnkcap2) { /* pre-r3.0 */
4314 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4315 *speed = PCIE_SPEED_5_0GT;
4316 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4317 *speed = PCIE_SPEED_2_5GT;
4321 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4322 return err1 ? err1 : err2 ? err2 : -EINVAL;
4326 static void cxgb4_check_pcie_caps(struct adapter *adap)
4328 enum pcie_link_width width, width_cap;
4329 enum pci_bus_speed speed, speed_cap;
4331 #define PCIE_SPEED_STR(speed) \
4332 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4333 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4334 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4337 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4338 dev_warn(adap->pdev_dev,
4339 "Unable to determine PCIe device BW capabilities\n");
4343 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4344 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4345 dev_warn(adap->pdev_dev,
4346 "Unable to determine PCI Express bandwidth.\n");
4350 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4351 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4352 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4354 if (speed < speed_cap || width < width_cap)
4355 dev_info(adap->pdev_dev,
4356 "A slot with more lanes and/or higher speed is "
4357 "suggested for optimal performance.\n");
4360 /* Dump basic information about the adapter */
4361 static void print_adapter_info(struct adapter *adapter)
4363 /* Device information */
4364 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
4365 adapter->params.vpd.id,
4366 CHELSIO_CHIP_RELEASE(adapter->params.chip));
4367 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
4368 adapter->params.vpd.sn, adapter->params.vpd.pn);
4370 /* Firmware Version */
4371 if (!adapter->params.fw_vers)
4372 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
4374 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
4375 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
4376 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
4377 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
4378 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
4380 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4381 * Firmware, so dev_info() is more appropriate here.)
4383 if (!adapter->params.bs_vers)
4384 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
4386 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
4387 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
4388 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
4389 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
4390 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
4392 /* TP Microcode Version */
4393 if (!adapter->params.tp_vers)
4394 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
4396 dev_info(adapter->pdev_dev,
4397 "TP Microcode version: %u.%u.%u.%u\n",
4398 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
4399 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
4400 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
4401 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
4403 /* Expansion ROM version */
4404 if (!adapter->params.er_vers)
4405 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
4407 dev_info(adapter->pdev_dev,
4408 "Expansion ROM version: %u.%u.%u.%u\n",
4409 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
4410 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
4411 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
4412 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
4414 /* Software/Hardware configuration */
4415 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
4416 is_offload(adapter) ? "R" : "",
4417 ((adapter->flags & USING_MSIX) ? "MSI-X" :
4418 (adapter->flags & USING_MSI) ? "MSI" : ""),
4419 is_offload(adapter) ? "Offload" : "non-Offload");
4422 static void print_port_info(const struct net_device *dev)
4426 const char *spd = "";
4427 const struct port_info *pi = netdev_priv(dev);
4428 const struct adapter *adap = pi->adapter;
4430 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4432 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4434 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4437 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4438 bufp += sprintf(bufp, "100M/");
4439 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4440 bufp += sprintf(bufp, "1G/");
4441 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4442 bufp += sprintf(bufp, "10G/");
4443 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
4444 bufp += sprintf(bufp, "25G/");
4445 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4446 bufp += sprintf(bufp, "40G/");
4447 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
4448 bufp += sprintf(bufp, "100G/");
4451 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4453 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
4454 dev->name, adap->params.vpd.id, adap->name, buf);
4457 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4459 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4463 * Free the following resources:
4464 * - memory used for tables
4467 * - resources FW is holding for us
4469 static void free_some_resources(struct adapter *adapter)
4473 t4_free_mem(adapter->l2t);
4474 t4_cleanup_sched(adapter);
4475 t4_free_mem(adapter->tids.tid_tab);
4476 cxgb4_cleanup_tc_u32(adapter);
4477 kfree(adapter->sge.egr_map);
4478 kfree(adapter->sge.ingr_map);
4479 kfree(adapter->sge.starving_fl);
4480 kfree(adapter->sge.txq_maperr);
4481 #ifdef CONFIG_DEBUG_FS
4482 kfree(adapter->sge.blocked_fl);
4484 disable_msi(adapter);
4486 for_each_port(adapter, i)
4487 if (adapter->port[i]) {
4488 struct port_info *pi = adap2pinfo(adapter, i);
4491 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4493 kfree(adap2pinfo(adapter, i)->rss);
4494 free_netdev(adapter->port[i]);
4496 if (adapter->flags & FW_OK)
4497 t4_fw_bye(adapter, adapter->pf);
4500 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4501 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4502 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4503 #define SEGMENT_SIZE 128
4505 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4509 /* Retrieve adapter's device ID */
4510 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4512 switch (device_id >> 12) {
4514 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4516 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4518 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4520 dev_err(&pdev->dev, "Device %d is not supported\n",
4526 #ifdef CONFIG_PCI_IOV
4527 static void dummy_setup(struct net_device *dev)
4529 dev->type = ARPHRD_NONE;
4531 dev->hard_header_len = 0;
4533 dev->tx_queue_len = 0;
4534 dev->flags |= IFF_NOARP;
4535 dev->priv_flags |= IFF_NO_QUEUE;
4537 /* Initialize the device structure. */
4538 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4539 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4540 dev->destructor = free_netdev;
4543 static int config_mgmt_dev(struct pci_dev *pdev)
4545 struct adapter *adap = pci_get_drvdata(pdev);
4546 struct net_device *netdev;
4547 struct port_info *pi;
4548 char name[IFNAMSIZ];
4551 snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
4552 netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
4557 pi = netdev_priv(netdev);
4559 pi->port_id = adap->pf % adap->params.nports;
4560 SET_NETDEV_DEV(netdev, &pdev->dev);
4562 adap->port[0] = netdev;
4564 err = register_netdev(adap->port[0]);
4566 pr_info("Unable to register VF mgmt netdev %s\n", name);
4567 free_netdev(adap->port[0]);
4568 adap->port[0] = NULL;
4574 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4576 struct adapter *adap = pci_get_drvdata(pdev);
4578 int current_vfs = pci_num_vf(pdev);
4581 pcie_fw = readl(adap->regs + PCIE_FW_A);
4582 /* Check if cxgb4 is the MASTER and fw is initialized */
4583 if (!(pcie_fw & PCIE_FW_INIT_F) ||
4584 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4585 PCIE_FW_MASTER_G(pcie_fw) != 4) {
4586 dev_warn(&pdev->dev,
4587 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4591 /* If any of the VF's is already assigned to Guest OS, then
4592 * SRIOV for the same cannot be modified
4594 if (current_vfs && pci_vfs_assigned(pdev)) {
4596 "Cannot modify SR-IOV while VFs are assigned\n");
4597 num_vfs = current_vfs;
4601 /* Disable SRIOV when zero is passed.
4602 * One needs to disable SRIOV before modifying it, else
4603 * stack throws the below warning:
4604 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4607 pci_disable_sriov(pdev);
4608 if (adap->port[0]) {
4609 unregister_netdev(adap->port[0]);
4610 adap->port[0] = NULL;
4612 /* free VF resources */
4613 kfree(adap->vfinfo);
4614 adap->vfinfo = NULL;
4619 if (num_vfs != current_vfs) {
4620 err = pci_enable_sriov(pdev, num_vfs);
4624 adap->num_vfs = num_vfs;
4625 err = config_mgmt_dev(pdev);
4630 adap->vfinfo = kcalloc(adap->num_vfs,
4631 sizeof(struct vf_info), GFP_KERNEL);
4633 fill_vf_station_mac_addr(adap);
4638 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4640 int func, i, err, s_qpp, qpp, num_seg;
4641 struct port_info *pi;
4642 bool highdma = false;
4643 struct adapter *adapter = NULL;
4644 struct net_device *netdev;
4647 enum chip_type chip;
4648 static int adap_idx = 1;
4649 #ifdef CONFIG_PCI_IOV
4653 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4655 err = pci_request_regions(pdev, KBUILD_MODNAME);
4657 /* Just info, some other driver may have claimed the device. */
4658 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4662 err = pci_enable_device(pdev);
4664 dev_err(&pdev->dev, "cannot enable PCI device\n");
4665 goto out_release_regions;
4668 regs = pci_ioremap_bar(pdev, 0);
4670 dev_err(&pdev->dev, "cannot map device registers\n");
4672 goto out_disable_device;
4675 err = t4_wait_dev_ready(regs);
4677 goto out_unmap_bar0;
4679 /* We control everything through one PF */
4680 whoami = readl(regs + PL_WHOAMI_A);
4681 pl_rev = REV_G(readl(regs + PL_REV_A));
4682 chip = get_chip_type(pdev, pl_rev);
4683 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4684 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4685 if (func != ent->driver_data) {
4686 #ifndef CONFIG_PCI_IOV
4689 pci_disable_device(pdev);
4690 pci_save_state(pdev); /* to restore SR-IOV later */
4694 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4696 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4698 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4699 "coherent allocations\n");
4700 goto out_unmap_bar0;
4703 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4705 dev_err(&pdev->dev, "no usable DMA configuration\n");
4706 goto out_unmap_bar0;
4710 pci_enable_pcie_error_reporting(pdev);
4711 enable_pcie_relaxed_ordering(pdev);
4712 pci_set_master(pdev);
4713 pci_save_state(pdev);
4715 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4718 goto out_unmap_bar0;
4722 adapter->workq = create_singlethread_workqueue("cxgb4");
4723 if (!adapter->workq) {
4725 goto out_free_adapter;
4728 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
4729 (sizeof(struct mbox_cmd) *
4730 T4_OS_LOG_MBOX_CMDS),
4732 if (!adapter->mbox_log) {
4734 goto out_free_adapter;
4736 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
4738 /* PCI device has been enabled */
4739 adapter->flags |= DEV_ENABLED;
4741 adapter->regs = regs;
4742 adapter->pdev = pdev;
4743 adapter->pdev_dev = &pdev->dev;
4744 adapter->name = pci_name(pdev);
4745 adapter->mbox = func;
4747 adapter->msg_enable = DFLT_MSG_ENABLE;
4748 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4750 spin_lock_init(&adapter->stats_lock);
4751 spin_lock_init(&adapter->tid_release_lock);
4752 spin_lock_init(&adapter->win0_lock);
4753 spin_lock_init(&adapter->mbox_lock);
4755 INIT_LIST_HEAD(&adapter->mlist.list);
4757 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4758 INIT_WORK(&adapter->db_full_task, process_db_full);
4759 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4761 err = t4_prep_adapter(adapter);
4763 goto out_free_adapter;
4766 if (!is_t4(adapter->params.chip)) {
4767 s_qpp = (QUEUESPERPAGEPF0_S +
4768 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4770 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4771 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4772 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4774 /* Each segment size is 128B. Write coalescing is enabled only
4775 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4776 * queue is less no of segments that can be accommodated in
4779 if (qpp > num_seg) {
4781 "Incorrect number of egress queues per page\n");
4783 goto out_free_adapter;
4785 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4786 pci_resource_len(pdev, 2));
4787 if (!adapter->bar2) {
4788 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4790 goto out_free_adapter;
4794 setup_memwin(adapter);
4795 err = adap_init0(adapter);
4796 #ifdef CONFIG_DEBUG_FS
4797 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4799 setup_memwin_rdma(adapter);
4803 /* configure SGE_STAT_CFG_A to read WC stats */
4804 if (!is_t4(adapter->params.chip))
4805 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
4806 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
4809 for_each_port(adapter, i) {
4810 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4817 SET_NETDEV_DEV(netdev, &pdev->dev);
4819 adapter->port[i] = netdev;
4820 pi = netdev_priv(netdev);
4821 pi->adapter = adapter;
4822 pi->xact_addr_filt = -1;
4824 netdev->irq = pdev->irq;
4826 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4827 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4828 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4829 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
4832 netdev->hw_features |= NETIF_F_HIGHDMA;
4833 netdev->features |= netdev->hw_features;
4834 netdev->vlan_features = netdev->features & VLAN_FEAT;
4836 netdev->priv_flags |= IFF_UNICAST_FLT;
4838 /* MTU range: 81 - 9600 */
4839 netdev->min_mtu = 81;
4840 netdev->max_mtu = MAX_MTU;
4842 netdev->netdev_ops = &cxgb4_netdev_ops;
4843 #ifdef CONFIG_CHELSIO_T4_DCB
4844 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4845 cxgb4_dcb_state_init(netdev);
4847 cxgb4_set_ethtool_ops(netdev);
4850 pci_set_drvdata(pdev, adapter);
4852 if (adapter->flags & FW_OK) {
4853 err = t4_port_init(adapter, func, func, 0);
4856 } else if (adapter->params.nports == 1) {
4857 /* If we don't have a connection to the firmware -- possibly
4858 * because of an error -- grab the raw VPD parameters so we
4859 * can set the proper MAC Address on the debug network
4860 * interface that we've created.
4862 u8 hw_addr[ETH_ALEN];
4863 u8 *na = adapter->params.vpd.na;
4865 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
4867 for (i = 0; i < ETH_ALEN; i++)
4868 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
4869 hex2val(na[2 * i + 1]));
4870 t4_set_hw_addr(adapter, 0, hw_addr);
4874 /* Configure queues and allocate tables now, they can be needed as
4875 * soon as the first register_netdev completes.
4877 cfg_queues(adapter);
4879 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
4880 if (!adapter->l2t) {
4881 /* We tolerate a lack of L2T, giving up some functionality */
4882 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4883 adapter->params.offload = 0;
4886 #if IS_ENABLED(CONFIG_IPV6)
4887 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
4888 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
4889 /* CLIP functionality is not present in hardware,
4890 * hence disable all offload features
4892 dev_warn(&pdev->dev,
4893 "CLIP not enabled in hardware, continuing\n");
4894 adapter->params.offload = 0;
4896 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4897 adapter->clipt_end);
4898 if (!adapter->clipt) {
4899 /* We tolerate a lack of clip_table, giving up
4900 * some functionality
4902 dev_warn(&pdev->dev,
4903 "could not allocate Clip table, continuing\n");
4904 adapter->params.offload = 0;
4909 for_each_port(adapter, i) {
4910 pi = adap2pinfo(adapter, i);
4911 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
4913 dev_warn(&pdev->dev,
4914 "could not activate scheduling on port %d\n",
4918 if (tid_init(&adapter->tids) < 0) {
4919 dev_warn(&pdev->dev, "could not allocate TID table, "
4921 adapter->params.offload = 0;
4923 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
4924 if (!adapter->tc_u32)
4925 dev_warn(&pdev->dev,
4926 "could not offload tc u32, continuing\n");
4929 if (is_offload(adapter)) {
4930 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
4931 u32 hash_base, hash_reg;
4933 if (chip <= CHELSIO_T5) {
4934 hash_reg = LE_DB_TID_HASHBASE_A;
4935 hash_base = t4_read_reg(adapter, hash_reg);
4936 adapter->tids.hash_base = hash_base / 4;
4938 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
4939 hash_base = t4_read_reg(adapter, hash_reg);
4940 adapter->tids.hash_base = hash_base;
4945 /* See what interrupts we'll be using */
4946 if (msi > 1 && enable_msix(adapter) == 0)
4947 adapter->flags |= USING_MSIX;
4948 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
4949 adapter->flags |= USING_MSI;
4951 free_msix_info(adapter);
4954 /* check for PCI Express bandwidth capabiltites */
4955 cxgb4_check_pcie_caps(adapter);
4957 err = init_rss(adapter);
4962 * The card is now ready to go. If any errors occur during device
4963 * registration we do not fail the whole card but rather proceed only
4964 * with the ports we manage to register successfully. However we must
4965 * register at least one net device.
4967 for_each_port(adapter, i) {
4968 pi = adap2pinfo(adapter, i);
4969 adapter->port[i]->dev_port = pi->lport;
4970 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4971 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4973 err = register_netdev(adapter->port[i]);
4976 adapter->chan_map[pi->tx_chan] = i;
4977 print_port_info(adapter->port[i]);
4980 dev_err(&pdev->dev, "could not register any net devices\n");
4984 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4988 if (cxgb4_debugfs_root) {
4989 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4990 cxgb4_debugfs_root);
4991 setup_debugfs(adapter);
4994 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4995 pdev->needs_freset = 1;
4997 if (is_uld(adapter)) {
4998 mutex_lock(&uld_mutex);
4999 list_add_tail(&adapter->list_node, &adapter_list);
5000 mutex_unlock(&uld_mutex);
5003 print_adapter_info(adapter);
5004 setup_fw_sge_queues(adapter);
5008 #ifdef CONFIG_PCI_IOV
5009 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5012 goto free_pci_region;
5015 adapter->pdev = pdev;
5016 adapter->pdev_dev = &pdev->dev;
5017 adapter->name = pci_name(pdev);
5018 adapter->mbox = func;
5020 adapter->regs = regs;
5021 adapter->adap_idx = adap_idx;
5022 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5023 (sizeof(struct mbox_cmd) *
5024 T4_OS_LOG_MBOX_CMDS),
5026 if (!adapter->mbox_log) {
5030 spin_lock_init(&adapter->mbox_lock);
5031 INIT_LIST_HEAD(&adapter->mlist.list);
5033 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5034 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5035 err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
5038 dev_err(adapter->pdev_dev, "Could not fetch port params\n");
5042 adapter->params.nports = hweight32(port_vec);
5043 pci_set_drvdata(pdev, adapter);
5050 pci_disable_sriov(pdev);
5051 pci_release_regions(pdev);
5058 free_some_resources(adapter);
5059 if (adapter->flags & USING_MSIX)
5060 free_msix_info(adapter);
5061 if (adapter->num_uld || adapter->num_ofld_uld)
5062 t4_uld_mem_free(adapter);
5064 if (!is_t4(adapter->params.chip))
5065 iounmap(adapter->bar2);
5068 destroy_workqueue(adapter->workq);
5070 kfree(adapter->mbox_log);
5075 pci_disable_pcie_error_reporting(pdev);
5076 pci_disable_device(pdev);
5077 out_release_regions:
5078 pci_release_regions(pdev);
5082 static void remove_one(struct pci_dev *pdev)
5084 struct adapter *adapter = pci_get_drvdata(pdev);
5087 pci_release_regions(pdev);
5091 if (adapter->pf == 4) {
5094 /* Tear down per-adapter Work Queue first since it can contain
5095 * references to our adapter data structure.
5097 destroy_workqueue(adapter->workq);
5099 if (is_uld(adapter))
5100 detach_ulds(adapter);
5102 disable_interrupts(adapter);
5104 for_each_port(adapter, i)
5105 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5106 unregister_netdev(adapter->port[i]);
5108 debugfs_remove_recursive(adapter->debugfs_root);
5110 /* If we allocated filters, free up state associated with any
5113 clear_all_filters(adapter);
5115 if (adapter->flags & FULL_INIT_DONE)
5118 if (adapter->flags & USING_MSIX)
5119 free_msix_info(adapter);
5120 if (adapter->num_uld || adapter->num_ofld_uld)
5121 t4_uld_mem_free(adapter);
5122 free_some_resources(adapter);
5123 #if IS_ENABLED(CONFIG_IPV6)
5124 t4_cleanup_clip_tbl(adapter);
5126 iounmap(adapter->regs);
5127 if (!is_t4(adapter->params.chip))
5128 iounmap(adapter->bar2);
5129 pci_disable_pcie_error_reporting(pdev);
5130 if ((adapter->flags & DEV_ENABLED)) {
5131 pci_disable_device(pdev);
5132 adapter->flags &= ~DEV_ENABLED;
5134 pci_release_regions(pdev);
5135 kfree(adapter->mbox_log);
5139 #ifdef CONFIG_PCI_IOV
5141 if (adapter->port[0])
5142 unregister_netdev(adapter->port[0]);
5143 iounmap(adapter->regs);
5144 kfree(adapter->vfinfo);
5146 pci_disable_sriov(pdev);
5147 pci_release_regions(pdev);
5152 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5153 * delivery. This is essentially a stripped down version of the PCI remove()
5154 * function where we do the minimal amount of work necessary to shutdown any
5157 static void shutdown_one(struct pci_dev *pdev)
5159 struct adapter *adapter = pci_get_drvdata(pdev);
5161 /* As with remove_one() above (see extended comment), we only want do
5162 * do cleanup on PCI Devices which went all the way through init_one()
5166 pci_release_regions(pdev);
5170 if (adapter->pf == 4) {
5173 for_each_port(adapter, i)
5174 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5175 cxgb_close(adapter->port[i]);
5177 t4_uld_clean_up(adapter);
5178 disable_interrupts(adapter);
5179 disable_msi(adapter);
5181 t4_sge_stop(adapter);
5182 if (adapter->flags & FW_OK)
5183 t4_fw_bye(adapter, adapter->mbox);
5185 #ifdef CONFIG_PCI_IOV
5187 if (adapter->port[0])
5188 unregister_netdev(adapter->port[0]);
5189 iounmap(adapter->regs);
5190 kfree(adapter->vfinfo);
5192 pci_disable_sriov(pdev);
5193 pci_release_regions(pdev);
5198 static struct pci_driver cxgb4_driver = {
5199 .name = KBUILD_MODNAME,
5200 .id_table = cxgb4_pci_tbl,
5202 .remove = remove_one,
5203 .shutdown = shutdown_one,
5204 #ifdef CONFIG_PCI_IOV
5205 .sriov_configure = cxgb4_iov_configure,
5207 .err_handler = &cxgb4_eeh,
5210 static int __init cxgb4_init_module(void)
5214 /* Debugfs support is optional, just warn if this fails */
5215 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5216 if (!cxgb4_debugfs_root)
5217 pr_warn("could not create debugfs entry, continuing\n");
5219 ret = pci_register_driver(&cxgb4_driver);
5221 debugfs_remove(cxgb4_debugfs_root);
5223 #if IS_ENABLED(CONFIG_IPV6)
5224 if (!inet6addr_registered) {
5225 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5226 inet6addr_registered = true;
5233 static void __exit cxgb4_cleanup_module(void)
5235 #if IS_ENABLED(CONFIG_IPV6)
5236 if (inet6addr_registered) {
5237 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5238 inet6addr_registered = false;
5241 pci_unregister_driver(&cxgb4_driver);
5242 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5245 module_init(cxgb4_init_module);
5246 module_exit(cxgb4_cleanup_module);