2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
72 #include <../drivers/net/bonding/bonding.h>
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
85 #define MAX_SGE_TIMERVAL 200U
89 * Physical Function provisioning constants.
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 #ifdef CONFIG_PCI_IOV
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
134 static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
137 unsigned int portn, portvec;
140 * Give PF's access to all of the ports.
143 return FW_PFVF_CMD_PMASK_MASK;
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
152 if (adapter->params.nports == 0)
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
184 /* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
190 struct filter_entry {
191 /* Administrative fields for filter.
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 struct ch_filter_specification fs;
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
215 CH_DEVICE(0xa000, 0), /* PE10K */
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
259 CH_DEVICE(0x5080, 4),
260 CH_DEVICE(0x5081, 4),
261 CH_DEVICE(0x5082, 4),
262 CH_DEVICE(0x5083, 4),
263 CH_DEVICE(0x5084, 4),
264 CH_DEVICE(0x5085, 4),
265 CH_DEVICE(0x5401, 4),
266 CH_DEVICE(0x5402, 4),
267 CH_DEVICE(0x5403, 4),
268 CH_DEVICE(0x5404, 4),
269 CH_DEVICE(0x5405, 4),
270 CH_DEVICE(0x5406, 4),
271 CH_DEVICE(0x5407, 4),
272 CH_DEVICE(0x5408, 4),
273 CH_DEVICE(0x5409, 4),
274 CH_DEVICE(0x540A, 4),
275 CH_DEVICE(0x540B, 4),
276 CH_DEVICE(0x540C, 4),
277 CH_DEVICE(0x540D, 4),
278 CH_DEVICE(0x540E, 4),
279 CH_DEVICE(0x540F, 4),
280 CH_DEVICE(0x5410, 4),
281 CH_DEVICE(0x5411, 4),
282 CH_DEVICE(0x5412, 4),
283 CH_DEVICE(0x5413, 4),
284 CH_DEVICE(0x5414, 4),
285 CH_DEVICE(0x5415, 4),
286 CH_DEVICE(0x5480, 4),
287 CH_DEVICE(0x5481, 4),
288 CH_DEVICE(0x5482, 4),
289 CH_DEVICE(0x5483, 4),
290 CH_DEVICE(0x5484, 4),
291 CH_DEVICE(0x5485, 4),
295 #define FW4_FNAME "cxgb4/t4fw.bin"
296 #define FW5_FNAME "cxgb4/t5fw.bin"
297 #define FW4_CFNAME "cxgb4/t4-config.txt"
298 #define FW5_CFNAME "cxgb4/t5-config.txt"
300 MODULE_DESCRIPTION(DRV_DESC);
301 MODULE_AUTHOR("Chelsio Communications");
302 MODULE_LICENSE("Dual BSD/GPL");
303 MODULE_VERSION(DRV_VERSION);
304 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
305 MODULE_FIRMWARE(FW4_FNAME);
306 MODULE_FIRMWARE(FW5_FNAME);
309 * Normally we're willing to become the firmware's Master PF but will be happy
310 * if another PF has already become the Master and initialized the adapter.
311 * Setting "force_init" will cause this driver to forcibly establish itself as
312 * the Master PF and initialize the adapter.
314 static uint force_init;
316 module_param(force_init, uint, 0644);
317 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
320 * Normally if the firmware we connect to has Configuration File support, we
321 * use that and only fall back to the old Driver-based initialization if the
322 * Configuration File fails for some reason. If force_old_init is set, then
323 * we'll always use the old Driver-based initialization sequence.
325 static uint force_old_init;
327 module_param(force_old_init, uint, 0644);
328 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
330 static int dflt_msg_enable = DFLT_MSG_ENABLE;
332 module_param(dflt_msg_enable, int, 0644);
333 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
336 * The driver uses the best interrupt scheme available on a platform in the
337 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
338 * of these schemes the driver may consider as follows:
340 * msi = 2: choose from among all three options
341 * msi = 1: only consider MSI and INTx interrupts
342 * msi = 0: force INTx interrupts
346 module_param(msi, int, 0644);
347 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
350 * Queue interrupt hold-off timer values. Queues default to the first of these
353 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
355 module_param_array(intr_holdoff, uint, NULL, 0644);
356 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
357 "0..4 in microseconds");
359 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
361 module_param_array(intr_cnt, uint, NULL, 0644);
362 MODULE_PARM_DESC(intr_cnt,
363 "thresholds 1..3 for queue interrupt packet counters");
366 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
367 * offset by 2 bytes in order to have the IP headers line up on 4-byte
368 * boundaries. This is a requirement for many architectures which will throw
369 * a machine check fault if an attempt is made to access one of the 4-byte IP
370 * header fields on a non-4-byte boundary. And it's a major performance issue
371 * even on some architectures which allow it like some implementations of the
372 * x86 ISA. However, some architectures don't mind this and for some very
373 * edge-case performance sensitive applications (like forwarding large volumes
374 * of small packets), setting this DMA offset to 0 will decrease the number of
375 * PCI-E Bus transfers enough to measurably affect performance.
377 static int rx_dma_offset = 2;
381 #ifdef CONFIG_PCI_IOV
382 module_param(vf_acls, bool, 0644);
383 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
385 /* Configure the number of PCI-E Virtual Function which are to be instantiated
386 * on SR-IOV Capable Physical Functions.
388 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
390 module_param_array(num_vf, uint, NULL, 0644);
391 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
395 * The filter TCAM has a fixed portion and a variable portion. The fixed
396 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
397 * ports. The variable portion is 36 bits which can include things like Exact
398 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
399 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
400 * far exceed the 36-bit budget for this "compressed" header portion of the
401 * filter. Thus, we have a scarce resource which must be carefully managed.
403 * By default we set this up to mostly match the set of filter matching
404 * capabilities of T3 but with accommodations for some of T4's more
405 * interesting features:
407 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
408 * [Inner] VLAN (17), Port (3), FCoE (1) }
411 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
412 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
413 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
416 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
418 module_param(tp_vlan_pri_map, uint, 0644);
419 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
421 static struct dentry *cxgb4_debugfs_root;
423 static LIST_HEAD(adapter_list);
424 static DEFINE_MUTEX(uld_mutex);
425 /* Adapter list to be accessed from atomic context */
426 static LIST_HEAD(adap_rcu_list);
427 static DEFINE_SPINLOCK(adap_rcu_lock);
428 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
429 static const char *uld_str[] = { "RDMA", "iSCSI" };
431 static void link_report(struct net_device *dev)
433 if (!netif_carrier_ok(dev))
434 netdev_info(dev, "link down\n");
436 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
438 const char *s = "10Mbps";
439 const struct port_info *p = netdev_priv(dev);
441 switch (p->link_cfg.speed) {
456 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
461 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
463 struct net_device *dev = adapter->port[port_id];
465 /* Skip changes from disabled ports. */
466 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
468 netif_carrier_on(dev);
470 netif_carrier_off(dev);
476 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
478 static const char *mod_str[] = {
479 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
482 const struct net_device *dev = adap->port[port_id];
483 const struct port_info *pi = netdev_priv(dev);
485 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
486 netdev_info(dev, "port module unplugged\n");
487 else if (pi->mod_type < ARRAY_SIZE(mod_str))
488 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
492 * Configure the exact and hash address filters to handle a port's multicast
493 * and secondary unicast MAC addresses.
495 static int set_addr_filters(const struct net_device *dev, bool sleep)
503 const struct netdev_hw_addr *ha;
504 int uc_cnt = netdev_uc_count(dev);
505 int mc_cnt = netdev_mc_count(dev);
506 const struct port_info *pi = netdev_priv(dev);
507 unsigned int mb = pi->adapter->fn;
509 /* first do the secondary unicast addresses */
510 netdev_for_each_uc_addr(ha, dev) {
511 addr[naddr++] = ha->addr;
512 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
513 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
514 naddr, addr, filt_idx, &uhash, sleep);
523 /* next set up the multicast addresses */
524 netdev_for_each_mc_addr(ha, dev) {
525 addr[naddr++] = ha->addr;
526 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
527 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
528 naddr, addr, filt_idx, &mhash, sleep);
537 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
538 uhash | mhash, sleep);
541 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
542 module_param(dbfifo_int_thresh, int, 0644);
543 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
546 * usecs to sleep while draining the dbfifo
548 static int dbfifo_drain_delay = 1000;
549 module_param(dbfifo_drain_delay, int, 0644);
550 MODULE_PARM_DESC(dbfifo_drain_delay,
551 "usecs to sleep while draining the dbfifo");
554 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
555 * If @mtu is -1 it is left unchanged.
557 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
560 struct port_info *pi = netdev_priv(dev);
562 ret = set_addr_filters(dev, sleep_ok);
564 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
565 (dev->flags & IFF_PROMISC) ? 1 : 0,
566 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
571 static struct workqueue_struct *workq;
574 * link_start - enable a port
575 * @dev: the port to enable
577 * Performs the MAC and PHY actions needed to enable a port.
579 static int link_start(struct net_device *dev)
582 struct port_info *pi = netdev_priv(dev);
583 unsigned int mb = pi->adapter->fn;
586 * We do not set address filters and promiscuity here, the stack does
587 * that step explicitly.
589 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
590 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
592 ret = t4_change_mac(pi->adapter, mb, pi->viid,
593 pi->xact_addr_filt, dev->dev_addr, true,
596 pi->xact_addr_filt = ret;
601 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
604 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
608 /* Clear a filter and release any of its resources that we own. This also
609 * clears the filter's "pending" status.
611 static void clear_filter(struct adapter *adap, struct filter_entry *f)
613 /* If the new or old filter have loopback rewriteing rules then we'll
614 * need to free any existing Layer Two Table (L2T) entries of the old
615 * filter rule. The firmware will handle freeing up any Source MAC
616 * Table (SMT) entries used for rewriting Source MAC Addresses in
620 cxgb4_l2t_release(f->l2t);
622 /* The zeroing of the filter rule below clears the filter valid,
623 * pending, locked flags, l2t pointer, etc. so it's all we need for
626 memset(f, 0, sizeof(*f));
629 /* Handle a filter write/deletion reply.
631 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
633 unsigned int idx = GET_TID(rpl);
634 unsigned int nidx = idx - adap->tids.ftid_base;
636 struct filter_entry *f;
638 if (idx >= adap->tids.ftid_base && nidx <
639 (adap->tids.nftids + adap->tids.nsftids)) {
641 ret = GET_TCB_COOKIE(rpl->cookie);
642 f = &adap->tids.ftid_tab[idx];
644 if (ret == FW_FILTER_WR_FLT_DELETED) {
645 /* Clear the filter when we get confirmation from the
646 * hardware that the filter has been deleted.
648 clear_filter(adap, f);
649 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
650 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
652 clear_filter(adap, f);
653 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
654 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
655 f->pending = 0; /* asynchronous setup completed */
658 /* Something went wrong. Issue a warning about the
659 * problem and clear everything out.
661 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
663 clear_filter(adap, f);
668 /* Response queue handler for the FW event queue.
670 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
671 const struct pkt_gl *gl)
673 u8 opcode = ((const struct rss_header *)rsp)->opcode;
675 rsp++; /* skip RSS header */
677 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
679 if (unlikely(opcode == CPL_FW4_MSG &&
680 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
682 opcode = ((const struct rss_header *)rsp)->opcode;
684 if (opcode != CPL_SGE_EGR_UPDATE) {
685 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
691 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
692 const struct cpl_sge_egr_update *p = (void *)rsp;
693 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
696 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
698 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
699 struct sge_eth_txq *eq;
701 eq = container_of(txq, struct sge_eth_txq, q);
702 netif_tx_wake_queue(eq->txq);
704 struct sge_ofld_txq *oq;
706 oq = container_of(txq, struct sge_ofld_txq, q);
707 tasklet_schedule(&oq->qresume_tsk);
709 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
710 const struct cpl_fw6_msg *p = (void *)rsp;
713 t4_handle_fw_rpl(q->adap, p->data);
714 } else if (opcode == CPL_L2T_WRITE_RPL) {
715 const struct cpl_l2t_write_rpl *p = (void *)rsp;
717 do_l2t_write_rpl(q->adap, p);
718 } else if (opcode == CPL_SET_TCB_RPL) {
719 const struct cpl_set_tcb_rpl *p = (void *)rsp;
721 filter_rpl(q->adap, p);
723 dev_err(q->adap->pdev_dev,
724 "unexpected CPL %#x on FW event queue\n", opcode);
730 * uldrx_handler - response queue handler for ULD queues
731 * @q: the response queue that received the packet
732 * @rsp: the response queue descriptor holding the offload message
733 * @gl: the gather list of packet fragments
735 * Deliver an ingress offload packet to a ULD. All processing is done by
736 * the ULD, we just maintain statistics.
738 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
739 const struct pkt_gl *gl)
741 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
743 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
745 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
746 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
749 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
755 else if (gl == CXGB4_MSG_AN)
762 static void disable_msi(struct adapter *adapter)
764 if (adapter->flags & USING_MSIX) {
765 pci_disable_msix(adapter->pdev);
766 adapter->flags &= ~USING_MSIX;
767 } else if (adapter->flags & USING_MSI) {
768 pci_disable_msi(adapter->pdev);
769 adapter->flags &= ~USING_MSI;
774 * Interrupt handler for non-data events used with MSI-X.
776 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
778 struct adapter *adap = cookie;
780 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
783 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
785 t4_slow_intr_handler(adap);
790 * Name the MSI-X interrupts.
792 static void name_msix_vecs(struct adapter *adap)
794 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
796 /* non-data interrupts */
797 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
800 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
801 adap->port[0]->name);
803 /* Ethernet queues */
804 for_each_port(adap, j) {
805 struct net_device *d = adap->port[j];
806 const struct port_info *pi = netdev_priv(d);
808 for (i = 0; i < pi->nqsets; i++, msi_idx++)
809 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
814 for_each_ofldrxq(&adap->sge, i)
815 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
816 adap->port[0]->name, i);
818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i);
822 for_each_rdmaciq(&adap->sge, i)
823 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
824 adap->port[0]->name, i);
827 static int request_msix_queue_irqs(struct adapter *adap)
829 struct sge *s = &adap->sge;
830 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
833 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
834 adap->msix_info[1].desc, &s->fw_evtq);
838 for_each_ethrxq(s, ethqidx) {
839 err = request_irq(adap->msix_info[msi_index].vec,
841 adap->msix_info[msi_index].desc,
842 &s->ethrxq[ethqidx].rspq);
847 for_each_ofldrxq(s, ofldqidx) {
848 err = request_irq(adap->msix_info[msi_index].vec,
850 adap->msix_info[msi_index].desc,
851 &s->ofldrxq[ofldqidx].rspq);
856 for_each_rdmarxq(s, rdmaqidx) {
857 err = request_irq(adap->msix_info[msi_index].vec,
859 adap->msix_info[msi_index].desc,
860 &s->rdmarxq[rdmaqidx].rspq);
865 for_each_rdmaciq(s, rdmaciqqidx) {
866 err = request_irq(adap->msix_info[msi_index].vec,
868 adap->msix_info[msi_index].desc,
869 &s->rdmaciq[rdmaciqqidx].rspq);
877 while (--rdmaciqqidx >= 0)
878 free_irq(adap->msix_info[--msi_index].vec,
879 &s->rdmaciq[rdmaciqqidx].rspq);
880 while (--rdmaqidx >= 0)
881 free_irq(adap->msix_info[--msi_index].vec,
882 &s->rdmarxq[rdmaqidx].rspq);
883 while (--ofldqidx >= 0)
884 free_irq(adap->msix_info[--msi_index].vec,
885 &s->ofldrxq[ofldqidx].rspq);
886 while (--ethqidx >= 0)
887 free_irq(adap->msix_info[--msi_index].vec,
888 &s->ethrxq[ethqidx].rspq);
889 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
893 static void free_msix_queue_irqs(struct adapter *adap)
895 int i, msi_index = 2;
896 struct sge *s = &adap->sge;
898 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
899 for_each_ethrxq(s, i)
900 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
901 for_each_ofldrxq(s, i)
902 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
903 for_each_rdmarxq(s, i)
904 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
905 for_each_rdmaciq(s, i)
906 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
910 * write_rss - write the RSS table for a given port
912 * @queues: array of queue indices for RSS
914 * Sets up the portion of the HW RSS table for the port's VI to distribute
915 * packets to the Rx queues in @queues.
917 static int write_rss(const struct port_info *pi, const u16 *queues)
921 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
923 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
927 /* map the queue indices to queue ids */
928 for (i = 0; i < pi->rss_size; i++, queues++)
929 rss[i] = q[*queues].rspq.abs_id;
931 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
932 pi->rss_size, rss, pi->rss_size);
938 * setup_rss - configure RSS
941 * Sets up RSS for each port.
943 static int setup_rss(struct adapter *adap)
947 for_each_port(adap, i) {
948 const struct port_info *pi = adap2pinfo(adap, i);
950 err = write_rss(pi, pi->rss);
958 * Return the channel of the ingress queue with the given qid.
960 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
962 qid -= p->ingr_start;
963 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
967 * Wait until all NAPI handlers are descheduled.
969 static void quiesce_rx(struct adapter *adap)
973 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
974 struct sge_rspq *q = adap->sge.ingr_map[i];
977 napi_disable(&q->napi);
982 * Enable NAPI scheduling and interrupt generation for all Rx queues.
984 static void enable_rx(struct adapter *adap)
988 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
989 struct sge_rspq *q = adap->sge.ingr_map[i];
994 napi_enable(&q->napi);
995 /* 0-increment GTS to start the timer and enable interrupts */
996 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
997 SEINTARM(q->intr_params) |
998 INGRESSQID(q->cntxt_id));
1003 * setup_sge_queues - configure SGE Tx/Rx/response queues
1004 * @adap: the adapter
1006 * Determines how many sets of SGE queues to use and initializes them.
1007 * We support multiple queue sets per port if we have MSI-X, otherwise
1008 * just one queue set per port.
1010 static int setup_sge_queues(struct adapter *adap)
1012 int err, msi_idx, i, j;
1013 struct sge *s = &adap->sge;
1015 bitmap_zero(s->starving_fl, MAX_EGRQ);
1016 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1018 if (adap->flags & USING_MSIX)
1019 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1021 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1025 msi_idx = -((int)s->intrq.abs_id + 1);
1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1029 msi_idx, NULL, fwevtq_handler);
1031 freeout: t4_free_sge_resources(adap);
1035 for_each_port(adap, i) {
1036 struct net_device *dev = adap->port[i];
1037 struct port_info *pi = netdev_priv(dev);
1038 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1039 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1041 for (j = 0; j < pi->nqsets; j++, q++) {
1044 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1050 memset(&q->stats, 0, sizeof(q->stats));
1052 for (j = 0; j < pi->nqsets; j++, t++) {
1053 err = t4_sge_alloc_eth_txq(adap, t, dev,
1054 netdev_get_tx_queue(dev, j),
1055 s->fw_evtq.cntxt_id);
1061 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1062 for_each_ofldrxq(s, i) {
1063 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1064 struct net_device *dev = adap->port[i / j];
1068 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1069 q->fl.size ? &q->fl : NULL,
1073 memset(&q->stats, 0, sizeof(q->stats));
1074 s->ofld_rxq[i] = q->rspq.abs_id;
1075 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1076 s->fw_evtq.cntxt_id);
1081 for_each_rdmarxq(s, i) {
1082 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1086 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1087 msi_idx, q->fl.size ? &q->fl : NULL,
1091 memset(&q->stats, 0, sizeof(q->stats));
1092 s->rdma_rxq[i] = q->rspq.abs_id;
1095 for_each_rdmaciq(s, i) {
1096 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1100 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1101 msi_idx, q->fl.size ? &q->fl : NULL,
1105 memset(&q->stats, 0, sizeof(q->stats));
1106 s->rdma_ciq[i] = q->rspq.abs_id;
1109 for_each_port(adap, i) {
1111 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1112 * have RDMA queues, and that's the right value.
1114 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1115 s->fw_evtq.cntxt_id,
1116 s->rdmarxq[i].rspq.cntxt_id);
1121 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1122 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1123 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1128 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1129 * The allocated memory is cleared.
1131 void *t4_alloc_mem(size_t size)
1133 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1141 * Free memory allocated through alloc_mem().
1143 static void t4_free_mem(void *addr)
1145 if (is_vmalloc_addr(addr))
1151 /* Send a Work Request to write the filter at a specified index. We construct
1152 * a Firmware Filter Work Request to have the work done and put the indicated
1153 * filter into "pending" mode which will prevent any further actions against
1154 * it till we get a reply from the firmware on the completion status of the
1157 static int set_filter_wr(struct adapter *adapter, int fidx)
1159 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1160 struct sk_buff *skb;
1161 struct fw_filter_wr *fwr;
1164 /* If the new filter requires loopback Destination MAC and/or VLAN
1165 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1168 if (f->fs.newdmac || f->fs.newvlan) {
1169 /* allocate L2T entry for new filter */
1170 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1173 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1174 f->fs.eport, f->fs.dmac)) {
1175 cxgb4_l2t_release(f->l2t);
1181 ftid = adapter->tids.ftid_base + fidx;
1183 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1184 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1185 memset(fwr, 0, sizeof(*fwr));
1187 /* It would be nice to put most of the following in t4_hw.c but most
1188 * of the work is translating the cxgbtool ch_filter_specification
1189 * into the Work Request and the definition of that structure is
1190 * currently in cxgbtool.h which isn't appropriate to pull into the
1191 * common code. We may eventually try to come up with a more neutral
1192 * filter specification structure but for now it's easiest to simply
1193 * put this fairly direct code in line ...
1195 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1196 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1198 htonl(V_FW_FILTER_WR_TID(ftid) |
1199 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1200 V_FW_FILTER_WR_NOREPLY(0) |
1201 V_FW_FILTER_WR_IQ(f->fs.iq));
1202 fwr->del_filter_to_l2tix =
1203 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1204 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1205 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1206 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1207 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1208 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1209 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1210 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1211 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1212 f->fs.newvlan == VLAN_REWRITE) |
1213 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1214 f->fs.newvlan == VLAN_REWRITE) |
1215 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1216 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1217 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1218 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1219 fwr->ethtype = htons(f->fs.val.ethtype);
1220 fwr->ethtypem = htons(f->fs.mask.ethtype);
1221 fwr->frag_to_ovlan_vldm =
1222 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1223 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1224 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1225 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1226 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1227 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1229 fwr->rx_chan_rx_rpl_iq =
1230 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1231 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1232 fwr->maci_to_matchtypem =
1233 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1234 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1235 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1236 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1237 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1238 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1239 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1240 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1241 fwr->ptcl = f->fs.val.proto;
1242 fwr->ptclm = f->fs.mask.proto;
1243 fwr->ttyp = f->fs.val.tos;
1244 fwr->ttypm = f->fs.mask.tos;
1245 fwr->ivlan = htons(f->fs.val.ivlan);
1246 fwr->ivlanm = htons(f->fs.mask.ivlan);
1247 fwr->ovlan = htons(f->fs.val.ovlan);
1248 fwr->ovlanm = htons(f->fs.mask.ovlan);
1249 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1250 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1251 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1252 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1253 fwr->lp = htons(f->fs.val.lport);
1254 fwr->lpm = htons(f->fs.mask.lport);
1255 fwr->fp = htons(f->fs.val.fport);
1256 fwr->fpm = htons(f->fs.mask.fport);
1258 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1260 /* Mark the filter as "pending" and ship off the Filter Work Request.
1261 * When we get the Work Request Reply we'll clear the pending status.
1264 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1265 t4_ofld_send(adapter, skb);
1269 /* Delete the filter at a specified index.
1271 static int del_filter_wr(struct adapter *adapter, int fidx)
1273 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1274 struct sk_buff *skb;
1275 struct fw_filter_wr *fwr;
1276 unsigned int len, ftid;
1279 ftid = adapter->tids.ftid_base + fidx;
1281 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1282 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1283 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1285 /* Mark the filter as "pending" and ship off the Filter Work Request.
1286 * When we get the Work Request Reply we'll clear the pending status.
1289 t4_mgmt_tx(adapter, skb);
1293 static inline int is_offload(const struct adapter *adap)
1295 return adap->params.offload;
1299 * Implementation of ethtool operations.
1302 static u32 get_msglevel(struct net_device *dev)
1304 return netdev2adap(dev)->msg_enable;
1307 static void set_msglevel(struct net_device *dev, u32 val)
1309 netdev2adap(dev)->msg_enable = val;
1312 static char stats_strings[][ETH_GSTRING_LEN] = {
1315 "TxBroadcastFrames ",
1316 "TxMulticastFrames ",
1322 "TxFrames128To255 ",
1323 "TxFrames256To511 ",
1324 "TxFrames512To1023 ",
1325 "TxFrames1024To1518 ",
1326 "TxFrames1519ToMax ",
1341 "RxBroadcastFrames ",
1342 "RxMulticastFrames ",
1354 "RxFrames128To255 ",
1355 "RxFrames256To511 ",
1356 "RxFrames512To1023 ",
1357 "RxFrames1024To1518 ",
1358 "RxFrames1519ToMax ",
1370 "RxBG0FramesDropped ",
1371 "RxBG1FramesDropped ",
1372 "RxBG2FramesDropped ",
1373 "RxBG3FramesDropped ",
1374 "RxBG0FramesTrunc ",
1375 "RxBG1FramesTrunc ",
1376 "RxBG2FramesTrunc ",
1377 "RxBG3FramesTrunc ",
1386 "WriteCoalSuccess ",
1390 static int get_sset_count(struct net_device *dev, int sset)
1394 return ARRAY_SIZE(stats_strings);
1400 #define T4_REGMAP_SIZE (160 * 1024)
1401 #define T5_REGMAP_SIZE (332 * 1024)
1403 static int get_regs_len(struct net_device *dev)
1405 struct adapter *adap = netdev2adap(dev);
1406 if (is_t4(adap->params.chip))
1407 return T4_REGMAP_SIZE;
1409 return T5_REGMAP_SIZE;
1412 static int get_eeprom_len(struct net_device *dev)
1417 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1419 struct adapter *adapter = netdev2adap(dev);
1421 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1422 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1423 strlcpy(info->bus_info, pci_name(adapter->pdev),
1424 sizeof(info->bus_info));
1426 if (adapter->params.fw_vers)
1427 snprintf(info->fw_version, sizeof(info->fw_version),
1428 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1429 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1430 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1431 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1432 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1433 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1434 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1435 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1436 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1439 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1441 if (stringset == ETH_SS_STATS)
1442 memcpy(data, stats_strings, sizeof(stats_strings));
1446 * port stats maintained per queue of the port. They should be in the same
1447 * order as in stats_strings above.
1449 struct queue_port_stats {
1459 static void collect_sge_port_stats(const struct adapter *adap,
1460 const struct port_info *p, struct queue_port_stats *s)
1463 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1464 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1466 memset(s, 0, sizeof(*s));
1467 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1469 s->tx_csum += tx->tx_cso;
1470 s->rx_csum += rx->stats.rx_cso;
1471 s->vlan_ex += rx->stats.vlan_ex;
1472 s->vlan_ins += tx->vlan_ins;
1473 s->gro_pkts += rx->stats.lro_pkts;
1474 s->gro_merged += rx->stats.lro_merged;
1478 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1481 struct port_info *pi = netdev_priv(dev);
1482 struct adapter *adapter = pi->adapter;
1485 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1487 data += sizeof(struct port_stats) / sizeof(u64);
1488 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1489 data += sizeof(struct queue_port_stats) / sizeof(u64);
1490 if (!is_t4(adapter->params.chip)) {
1491 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1492 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1493 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1494 *data = val1 - val2;
1499 memset(data, 0, 2 * sizeof(u64));
1505 * Return a version number to identify the type of adapter. The scheme is:
1506 * - bits 0..9: chip version
1507 * - bits 10..15: chip revision
1508 * - bits 16..23: register dump version
1510 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1512 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1513 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1516 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1519 u32 *p = buf + start;
1521 for ( ; start <= end; start += sizeof(u32))
1522 *p++ = t4_read_reg(ap, start);
1525 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1528 static const unsigned int t4_reg_ranges[] = {
1748 static const unsigned int t5_reg_ranges[] = {
2176 struct adapter *ap = netdev2adap(dev);
2177 static const unsigned int *reg_ranges;
2178 int arr_size = 0, buf_size = 0;
2180 if (is_t4(ap->params.chip)) {
2181 reg_ranges = &t4_reg_ranges[0];
2182 arr_size = ARRAY_SIZE(t4_reg_ranges);
2183 buf_size = T4_REGMAP_SIZE;
2185 reg_ranges = &t5_reg_ranges[0];
2186 arr_size = ARRAY_SIZE(t5_reg_ranges);
2187 buf_size = T5_REGMAP_SIZE;
2190 regs->version = mk_adap_vers(ap);
2192 memset(buf, 0, buf_size);
2193 for (i = 0; i < arr_size; i += 2)
2194 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2197 static int restart_autoneg(struct net_device *dev)
2199 struct port_info *p = netdev_priv(dev);
2201 if (!netif_running(dev))
2203 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2205 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2209 static int identify_port(struct net_device *dev,
2210 enum ethtool_phys_id_state state)
2213 struct adapter *adap = netdev2adap(dev);
2215 if (state == ETHTOOL_ID_ACTIVE)
2217 else if (state == ETHTOOL_ID_INACTIVE)
2222 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2225 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2229 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2230 type == FW_PORT_TYPE_BT_XAUI) {
2232 if (caps & FW_PORT_CAP_SPEED_100M)
2233 v |= SUPPORTED_100baseT_Full;
2234 if (caps & FW_PORT_CAP_SPEED_1G)
2235 v |= SUPPORTED_1000baseT_Full;
2236 if (caps & FW_PORT_CAP_SPEED_10G)
2237 v |= SUPPORTED_10000baseT_Full;
2238 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2239 v |= SUPPORTED_Backplane;
2240 if (caps & FW_PORT_CAP_SPEED_1G)
2241 v |= SUPPORTED_1000baseKX_Full;
2242 if (caps & FW_PORT_CAP_SPEED_10G)
2243 v |= SUPPORTED_10000baseKX4_Full;
2244 } else if (type == FW_PORT_TYPE_KR)
2245 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2246 else if (type == FW_PORT_TYPE_BP_AP)
2247 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2248 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2249 else if (type == FW_PORT_TYPE_BP4_AP)
2250 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2251 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2252 SUPPORTED_10000baseKX4_Full;
2253 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2254 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2255 v |= SUPPORTED_FIBRE;
2256 else if (type == FW_PORT_TYPE_BP40_BA)
2257 v |= SUPPORTED_40000baseSR4_Full;
2259 if (caps & FW_PORT_CAP_ANEG)
2260 v |= SUPPORTED_Autoneg;
2264 static unsigned int to_fw_linkcaps(unsigned int caps)
2268 if (caps & ADVERTISED_100baseT_Full)
2269 v |= FW_PORT_CAP_SPEED_100M;
2270 if (caps & ADVERTISED_1000baseT_Full)
2271 v |= FW_PORT_CAP_SPEED_1G;
2272 if (caps & ADVERTISED_10000baseT_Full)
2273 v |= FW_PORT_CAP_SPEED_10G;
2274 if (caps & ADVERTISED_40000baseSR4_Full)
2275 v |= FW_PORT_CAP_SPEED_40G;
2279 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2281 const struct port_info *p = netdev_priv(dev);
2283 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2284 p->port_type == FW_PORT_TYPE_BT_XFI ||
2285 p->port_type == FW_PORT_TYPE_BT_XAUI)
2286 cmd->port = PORT_TP;
2287 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2288 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2289 cmd->port = PORT_FIBRE;
2290 else if (p->port_type == FW_PORT_TYPE_SFP ||
2291 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2292 p->port_type == FW_PORT_TYPE_QSFP) {
2293 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2294 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2295 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2296 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2297 cmd->port = PORT_FIBRE;
2298 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2299 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2300 cmd->port = PORT_DA;
2302 cmd->port = PORT_OTHER;
2304 cmd->port = PORT_OTHER;
2306 if (p->mdio_addr >= 0) {
2307 cmd->phy_address = p->mdio_addr;
2308 cmd->transceiver = XCVR_EXTERNAL;
2309 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2310 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2312 cmd->phy_address = 0; /* not really, but no better option */
2313 cmd->transceiver = XCVR_INTERNAL;
2314 cmd->mdio_support = 0;
2317 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2318 cmd->advertising = from_fw_linkcaps(p->port_type,
2319 p->link_cfg.advertising);
2320 ethtool_cmd_speed_set(cmd,
2321 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2322 cmd->duplex = DUPLEX_FULL;
2323 cmd->autoneg = p->link_cfg.autoneg;
2329 static unsigned int speed_to_caps(int speed)
2332 return FW_PORT_CAP_SPEED_100M;
2334 return FW_PORT_CAP_SPEED_1G;
2336 return FW_PORT_CAP_SPEED_10G;
2338 return FW_PORT_CAP_SPEED_40G;
2342 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2345 struct port_info *p = netdev_priv(dev);
2346 struct link_config *lc = &p->link_cfg;
2347 u32 speed = ethtool_cmd_speed(cmd);
2349 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2352 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2354 * PHY offers a single speed. See if that's what's
2357 if (cmd->autoneg == AUTONEG_DISABLE &&
2358 (lc->supported & speed_to_caps(speed)))
2363 if (cmd->autoneg == AUTONEG_DISABLE) {
2364 cap = speed_to_caps(speed);
2366 if (!(lc->supported & cap) ||
2371 lc->requested_speed = cap;
2372 lc->advertising = 0;
2374 cap = to_fw_linkcaps(cmd->advertising);
2375 if (!(lc->supported & cap))
2377 lc->requested_speed = 0;
2378 lc->advertising = cap | FW_PORT_CAP_ANEG;
2380 lc->autoneg = cmd->autoneg;
2382 if (netif_running(dev))
2383 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2388 static void get_pauseparam(struct net_device *dev,
2389 struct ethtool_pauseparam *epause)
2391 struct port_info *p = netdev_priv(dev);
2393 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2394 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2395 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2398 static int set_pauseparam(struct net_device *dev,
2399 struct ethtool_pauseparam *epause)
2401 struct port_info *p = netdev_priv(dev);
2402 struct link_config *lc = &p->link_cfg;
2404 if (epause->autoneg == AUTONEG_DISABLE)
2405 lc->requested_fc = 0;
2406 else if (lc->supported & FW_PORT_CAP_ANEG)
2407 lc->requested_fc = PAUSE_AUTONEG;
2411 if (epause->rx_pause)
2412 lc->requested_fc |= PAUSE_RX;
2413 if (epause->tx_pause)
2414 lc->requested_fc |= PAUSE_TX;
2415 if (netif_running(dev))
2416 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2421 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2423 const struct port_info *pi = netdev_priv(dev);
2424 const struct sge *s = &pi->adapter->sge;
2426 e->rx_max_pending = MAX_RX_BUFFERS;
2427 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2428 e->rx_jumbo_max_pending = 0;
2429 e->tx_max_pending = MAX_TXQ_ENTRIES;
2431 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2432 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2433 e->rx_jumbo_pending = 0;
2434 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2437 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2440 const struct port_info *pi = netdev_priv(dev);
2441 struct adapter *adapter = pi->adapter;
2442 struct sge *s = &adapter->sge;
2444 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2445 e->tx_pending > MAX_TXQ_ENTRIES ||
2446 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2447 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2448 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2451 if (adapter->flags & FULL_INIT_DONE)
2454 for (i = 0; i < pi->nqsets; ++i) {
2455 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2456 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2457 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2462 static int closest_timer(const struct sge *s, int time)
2464 int i, delta, match = 0, min_delta = INT_MAX;
2466 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2467 delta = time - s->timer_val[i];
2470 if (delta < min_delta) {
2478 static int closest_thres(const struct sge *s, int thres)
2480 int i, delta, match = 0, min_delta = INT_MAX;
2482 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2483 delta = thres - s->counter_val[i];
2486 if (delta < min_delta) {
2495 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2497 static unsigned int qtimer_val(const struct adapter *adap,
2498 const struct sge_rspq *q)
2500 unsigned int idx = q->intr_params >> 1;
2502 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2506 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2508 * @us: the hold-off time in us, or 0 to disable timer
2509 * @cnt: the hold-off packet count, or 0 to disable counter
2511 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2512 * one of the two needs to be enabled for the queue to generate interrupts.
2514 static int set_rspq_intr_params(struct sge_rspq *q,
2515 unsigned int us, unsigned int cnt)
2517 struct adapter *adap = q->adap;
2519 if ((us | cnt) == 0)
2526 new_idx = closest_thres(&adap->sge, cnt);
2527 if (q->desc && q->pktcnt_idx != new_idx) {
2528 /* the queue has already been created, update it */
2529 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2530 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2531 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2532 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2537 q->pktcnt_idx = new_idx;
2540 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2541 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2546 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2547 * @dev: the network device
2548 * @us: the hold-off time in us, or 0 to disable timer
2549 * @cnt: the hold-off packet count, or 0 to disable counter
2551 * Set the RX interrupt hold-off parameters for a network device.
2553 static int set_rx_intr_params(struct net_device *dev,
2554 unsigned int us, unsigned int cnt)
2557 struct port_info *pi = netdev_priv(dev);
2558 struct adapter *adap = pi->adapter;
2559 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2561 for (i = 0; i < pi->nqsets; i++, q++) {
2562 err = set_rspq_intr_params(&q->rspq, us, cnt);
2569 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2571 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2572 c->rx_max_coalesced_frames);
2575 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2577 const struct port_info *pi = netdev_priv(dev);
2578 const struct adapter *adap = pi->adapter;
2579 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2581 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2582 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2583 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2588 * eeprom_ptov - translate a physical EEPROM address to virtual
2589 * @phys_addr: the physical EEPROM address
2590 * @fn: the PCI function number
2591 * @sz: size of function-specific area
2593 * Translate a physical EEPROM address to virtual. The first 1K is
2594 * accessed through virtual addresses starting at 31K, the rest is
2595 * accessed through virtual addresses starting at 0.
2597 * The mapping is as follows:
2598 * [0..1K) -> [31K..32K)
2599 * [1K..1K+A) -> [31K-A..31K)
2600 * [1K+A..ES) -> [0..ES-A-1K)
2602 * where A = @fn * @sz, and ES = EEPROM size.
2604 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2607 if (phys_addr < 1024)
2608 return phys_addr + (31 << 10);
2609 if (phys_addr < 1024 + fn)
2610 return 31744 - fn + phys_addr - 1024;
2611 if (phys_addr < EEPROMSIZE)
2612 return phys_addr - 1024 - fn;
2617 * The next two routines implement eeprom read/write from physical addresses.
2619 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2621 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2624 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2625 return vaddr < 0 ? vaddr : 0;
2628 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2630 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2633 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2634 return vaddr < 0 ? vaddr : 0;
2637 #define EEPROM_MAGIC 0x38E2F10C
2639 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2643 struct adapter *adapter = netdev2adap(dev);
2645 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2649 e->magic = EEPROM_MAGIC;
2650 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2651 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2654 memcpy(data, buf + e->offset, e->len);
2659 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2664 u32 aligned_offset, aligned_len, *p;
2665 struct adapter *adapter = netdev2adap(dev);
2667 if (eeprom->magic != EEPROM_MAGIC)
2670 aligned_offset = eeprom->offset & ~3;
2671 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2673 if (adapter->fn > 0) {
2674 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2676 if (aligned_offset < start ||
2677 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2681 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2683 * RMW possibly needed for first or last words.
2685 buf = kmalloc(aligned_len, GFP_KERNEL);
2688 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2689 if (!err && aligned_len > 4)
2690 err = eeprom_rd_phys(adapter,
2691 aligned_offset + aligned_len - 4,
2692 (u32 *)&buf[aligned_len - 4]);
2695 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2699 err = t4_seeprom_wp(adapter, false);
2703 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2704 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2705 aligned_offset += 4;
2709 err = t4_seeprom_wp(adapter, true);
2716 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2719 const struct firmware *fw;
2720 struct adapter *adap = netdev2adap(netdev);
2722 ef->data[sizeof(ef->data) - 1] = '\0';
2723 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2727 ret = t4_load_fw(adap, fw->data, fw->size);
2728 release_firmware(fw);
2730 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2734 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2735 #define BCAST_CRC 0xa0ccc1a6
2737 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2739 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2740 wol->wolopts = netdev2adap(dev)->wol;
2741 memset(&wol->sopass, 0, sizeof(wol->sopass));
2744 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2747 struct port_info *pi = netdev_priv(dev);
2749 if (wol->wolopts & ~WOL_SUPPORTED)
2751 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2752 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2753 if (wol->wolopts & WAKE_BCAST) {
2754 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2757 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2758 ~6ULL, ~0ULL, BCAST_CRC, true);
2760 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2764 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2766 const struct port_info *pi = netdev_priv(dev);
2767 netdev_features_t changed = dev->features ^ features;
2770 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2773 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2775 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2777 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2781 static u32 get_rss_table_size(struct net_device *dev)
2783 const struct port_info *pi = netdev_priv(dev);
2785 return pi->rss_size;
2788 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2790 const struct port_info *pi = netdev_priv(dev);
2791 unsigned int n = pi->rss_size;
2798 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2801 struct port_info *pi = netdev_priv(dev);
2803 for (i = 0; i < pi->rss_size; i++)
2805 if (pi->adapter->flags & FULL_INIT_DONE)
2806 return write_rss(pi, pi->rss);
2810 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2813 const struct port_info *pi = netdev_priv(dev);
2815 switch (info->cmd) {
2816 case ETHTOOL_GRXFH: {
2817 unsigned int v = pi->rss_mode;
2820 switch (info->flow_type) {
2822 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2823 info->data = RXH_IP_SRC | RXH_IP_DST |
2824 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2825 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2826 info->data = RXH_IP_SRC | RXH_IP_DST;
2829 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2830 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2831 info->data = RXH_IP_SRC | RXH_IP_DST |
2832 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2833 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2834 info->data = RXH_IP_SRC | RXH_IP_DST;
2837 case AH_ESP_V4_FLOW:
2839 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2840 info->data = RXH_IP_SRC | RXH_IP_DST;
2843 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2844 info->data = RXH_IP_SRC | RXH_IP_DST |
2845 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2846 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2847 info->data = RXH_IP_SRC | RXH_IP_DST;
2850 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2851 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2852 info->data = RXH_IP_SRC | RXH_IP_DST |
2853 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2854 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2855 info->data = RXH_IP_SRC | RXH_IP_DST;
2858 case AH_ESP_V6_FLOW:
2860 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2861 info->data = RXH_IP_SRC | RXH_IP_DST;
2866 case ETHTOOL_GRXRINGS:
2867 info->data = pi->nqsets;
2873 static const struct ethtool_ops cxgb_ethtool_ops = {
2874 .get_settings = get_settings,
2875 .set_settings = set_settings,
2876 .get_drvinfo = get_drvinfo,
2877 .get_msglevel = get_msglevel,
2878 .set_msglevel = set_msglevel,
2879 .get_ringparam = get_sge_param,
2880 .set_ringparam = set_sge_param,
2881 .get_coalesce = get_coalesce,
2882 .set_coalesce = set_coalesce,
2883 .get_eeprom_len = get_eeprom_len,
2884 .get_eeprom = get_eeprom,
2885 .set_eeprom = set_eeprom,
2886 .get_pauseparam = get_pauseparam,
2887 .set_pauseparam = set_pauseparam,
2888 .get_link = ethtool_op_get_link,
2889 .get_strings = get_strings,
2890 .set_phys_id = identify_port,
2891 .nway_reset = restart_autoneg,
2892 .get_sset_count = get_sset_count,
2893 .get_ethtool_stats = get_stats,
2894 .get_regs_len = get_regs_len,
2895 .get_regs = get_regs,
2898 .get_rxnfc = get_rxnfc,
2899 .get_rxfh_indir_size = get_rss_table_size,
2900 .get_rxfh = get_rss_table,
2901 .set_rxfh = set_rss_table,
2902 .flash_device = set_flash,
2908 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2912 loff_t avail = file_inode(file)->i_size;
2913 unsigned int mem = (uintptr_t)file->private_data & 3;
2914 struct adapter *adap = file->private_data - mem;
2920 if (count > avail - pos)
2921 count = avail - pos;
2928 if ((mem == MEM_MC) || (mem == MEM_MC1))
2929 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2931 ret = t4_edc_read(adap, mem, pos, data, NULL);
2935 ofst = pos % sizeof(data);
2936 len = min(count, sizeof(data) - ofst);
2937 if (copy_to_user(buf, (u8 *)data + ofst, len))
2944 count = pos - *ppos;
2949 static const struct file_operations mem_debugfs_fops = {
2950 .owner = THIS_MODULE,
2951 .open = simple_open,
2953 .llseek = default_llseek,
2956 static void add_debugfs_mem(struct adapter *adap, const char *name,
2957 unsigned int idx, unsigned int size_mb)
2961 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2962 (void *)adap + idx, &mem_debugfs_fops);
2963 if (de && de->d_inode)
2964 de->d_inode->i_size = size_mb << 20;
2967 static int setup_debugfs(struct adapter *adap)
2972 if (IS_ERR_OR_NULL(adap->debugfs_root))
2975 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2976 if (i & EDRAM0_ENABLE) {
2977 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2978 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2980 if (i & EDRAM1_ENABLE) {
2981 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2982 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2984 if (is_t4(adap->params.chip)) {
2985 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2986 if (i & EXT_MEM_ENABLE)
2987 add_debugfs_mem(adap, "mc", MEM_MC,
2988 EXT_MEM_SIZE_GET(size));
2990 if (i & EXT_MEM_ENABLE) {
2991 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2992 add_debugfs_mem(adap, "mc0", MEM_MC0,
2993 EXT_MEM_SIZE_GET(size));
2995 if (i & EXT_MEM1_ENABLE) {
2996 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2997 add_debugfs_mem(adap, "mc1", MEM_MC1,
2998 EXT_MEM_SIZE_GET(size));
3002 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3008 * upper-layer driver support
3012 * Allocate an active-open TID and set it to the supplied value.
3014 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3018 spin_lock_bh(&t->atid_lock);
3020 union aopen_entry *p = t->afree;
3022 atid = (p - t->atid_tab) + t->atid_base;
3027 spin_unlock_bh(&t->atid_lock);
3030 EXPORT_SYMBOL(cxgb4_alloc_atid);
3033 * Release an active-open TID.
3035 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3037 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3039 spin_lock_bh(&t->atid_lock);
3043 spin_unlock_bh(&t->atid_lock);
3045 EXPORT_SYMBOL(cxgb4_free_atid);
3048 * Allocate a server TID and set it to the supplied value.
3050 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3054 spin_lock_bh(&t->stid_lock);
3055 if (family == PF_INET) {
3056 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3057 if (stid < t->nstids)
3058 __set_bit(stid, t->stid_bmap);
3062 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3067 t->stid_tab[stid].data = data;
3068 stid += t->stid_base;
3069 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3070 * This is equivalent to 4 TIDs. With CLIP enabled it
3073 if (family == PF_INET)
3076 t->stids_in_use += 4;
3078 spin_unlock_bh(&t->stid_lock);
3081 EXPORT_SYMBOL(cxgb4_alloc_stid);
3083 /* Allocate a server filter TID and set it to the supplied value.
3085 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3089 spin_lock_bh(&t->stid_lock);
3090 if (family == PF_INET) {
3091 stid = find_next_zero_bit(t->stid_bmap,
3092 t->nstids + t->nsftids, t->nstids);
3093 if (stid < (t->nstids + t->nsftids))
3094 __set_bit(stid, t->stid_bmap);
3101 t->stid_tab[stid].data = data;
3103 stid += t->sftid_base;
3106 spin_unlock_bh(&t->stid_lock);
3109 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3111 /* Release a server TID.
3113 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3115 /* Is it a server filter TID? */
3116 if (t->nsftids && (stid >= t->sftid_base)) {
3117 stid -= t->sftid_base;
3120 stid -= t->stid_base;
3123 spin_lock_bh(&t->stid_lock);
3124 if (family == PF_INET)
3125 __clear_bit(stid, t->stid_bmap);
3127 bitmap_release_region(t->stid_bmap, stid, 2);
3128 t->stid_tab[stid].data = NULL;
3129 if (family == PF_INET)
3132 t->stids_in_use -= 4;
3133 spin_unlock_bh(&t->stid_lock);
3135 EXPORT_SYMBOL(cxgb4_free_stid);
3138 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3140 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3143 struct cpl_tid_release *req;
3145 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3146 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3147 INIT_TP_WR(req, tid);
3148 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3152 * Queue a TID release request and if necessary schedule a work queue to
3155 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3158 void **p = &t->tid_tab[tid];
3159 struct adapter *adap = container_of(t, struct adapter, tids);
3161 spin_lock_bh(&adap->tid_release_lock);
3162 *p = adap->tid_release_head;
3163 /* Low 2 bits encode the Tx channel number */
3164 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3165 if (!adap->tid_release_task_busy) {
3166 adap->tid_release_task_busy = true;
3167 queue_work(workq, &adap->tid_release_task);
3169 spin_unlock_bh(&adap->tid_release_lock);
3173 * Process the list of pending TID release requests.
3175 static void process_tid_release_list(struct work_struct *work)
3177 struct sk_buff *skb;
3178 struct adapter *adap;
3180 adap = container_of(work, struct adapter, tid_release_task);
3182 spin_lock_bh(&adap->tid_release_lock);
3183 while (adap->tid_release_head) {
3184 void **p = adap->tid_release_head;
3185 unsigned int chan = (uintptr_t)p & 3;
3186 p = (void *)p - chan;
3188 adap->tid_release_head = *p;
3190 spin_unlock_bh(&adap->tid_release_lock);
3192 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3194 schedule_timeout_uninterruptible(1);
3196 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3197 t4_ofld_send(adap, skb);
3198 spin_lock_bh(&adap->tid_release_lock);
3200 adap->tid_release_task_busy = false;
3201 spin_unlock_bh(&adap->tid_release_lock);
3205 * Release a TID and inform HW. If we are unable to allocate the release
3206 * message we defer to a work queue.
3208 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3211 struct sk_buff *skb;
3212 struct adapter *adap = container_of(t, struct adapter, tids);
3214 old = t->tid_tab[tid];
3215 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3217 t->tid_tab[tid] = NULL;
3218 mk_tid_release(skb, chan, tid);
3219 t4_ofld_send(adap, skb);
3221 cxgb4_queue_tid_release(t, chan, tid);
3223 atomic_dec(&t->tids_in_use);
3225 EXPORT_SYMBOL(cxgb4_remove_tid);
3228 * Allocate and initialize the TID tables. Returns 0 on success.
3230 static int tid_init(struct tid_info *t)
3233 unsigned int stid_bmap_size;
3234 unsigned int natids = t->natids;
3235 struct adapter *adap = container_of(t, struct adapter, tids);
3237 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3238 size = t->ntids * sizeof(*t->tid_tab) +
3239 natids * sizeof(*t->atid_tab) +
3240 t->nstids * sizeof(*t->stid_tab) +
3241 t->nsftids * sizeof(*t->stid_tab) +
3242 stid_bmap_size * sizeof(long) +
3243 t->nftids * sizeof(*t->ftid_tab) +
3244 t->nsftids * sizeof(*t->ftid_tab);
3246 t->tid_tab = t4_alloc_mem(size);
3250 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3251 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3252 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3253 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3254 spin_lock_init(&t->stid_lock);
3255 spin_lock_init(&t->atid_lock);
3257 t->stids_in_use = 0;
3259 t->atids_in_use = 0;
3260 atomic_set(&t->tids_in_use, 0);
3262 /* Setup the free list for atid_tab and clear the stid bitmap. */
3265 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3266 t->afree = t->atid_tab;
3268 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3269 /* Reserve stid 0 for T4/T5 adapters */
3270 if (!t->stid_base &&
3271 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3272 __set_bit(0, t->stid_bmap);
3277 static int cxgb4_clip_get(const struct net_device *dev,
3278 const struct in6_addr *lip)
3280 struct adapter *adap;
3281 struct fw_clip_cmd c;
3283 adap = netdev2adap(dev);
3284 memset(&c, 0, sizeof(c));
3285 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3286 FW_CMD_REQUEST | FW_CMD_WRITE);
3287 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3288 c.ip_hi = *(__be64 *)(lip->s6_addr);
3289 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3290 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3293 static int cxgb4_clip_release(const struct net_device *dev,
3294 const struct in6_addr *lip)
3296 struct adapter *adap;
3297 struct fw_clip_cmd c;
3299 adap = netdev2adap(dev);
3300 memset(&c, 0, sizeof(c));
3301 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3302 FW_CMD_REQUEST | FW_CMD_READ);
3303 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3304 c.ip_hi = *(__be64 *)(lip->s6_addr);
3305 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3306 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3310 * cxgb4_create_server - create an IP server
3312 * @stid: the server TID
3313 * @sip: local IP address to bind server to
3314 * @sport: the server's TCP port
3315 * @queue: queue to direct messages from this server to
3317 * Create an IP server for the given port and address.
3318 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3320 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3321 __be32 sip, __be16 sport, __be16 vlan,
3325 struct sk_buff *skb;
3326 struct adapter *adap;
3327 struct cpl_pass_open_req *req;
3330 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3334 adap = netdev2adap(dev);
3335 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3337 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3338 req->local_port = sport;
3339 req->peer_port = htons(0);
3340 req->local_ip = sip;
3341 req->peer_ip = htonl(0);
3342 chan = rxq_to_chan(&adap->sge, queue);
3343 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3344 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3345 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3346 ret = t4_mgmt_tx(adap, skb);
3347 return net_xmit_eval(ret);
3349 EXPORT_SYMBOL(cxgb4_create_server);
3351 /* cxgb4_create_server6 - create an IPv6 server
3353 * @stid: the server TID
3354 * @sip: local IPv6 address to bind server to
3355 * @sport: the server's TCP port
3356 * @queue: queue to direct messages from this server to
3358 * Create an IPv6 server for the given port and address.
3359 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3361 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3362 const struct in6_addr *sip, __be16 sport,
3366 struct sk_buff *skb;
3367 struct adapter *adap;
3368 struct cpl_pass_open_req6 *req;
3371 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3375 adap = netdev2adap(dev);
3376 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3378 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3379 req->local_port = sport;
3380 req->peer_port = htons(0);
3381 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3382 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3383 req->peer_ip_hi = cpu_to_be64(0);
3384 req->peer_ip_lo = cpu_to_be64(0);
3385 chan = rxq_to_chan(&adap->sge, queue);
3386 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3387 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3388 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3389 ret = t4_mgmt_tx(adap, skb);
3390 return net_xmit_eval(ret);
3392 EXPORT_SYMBOL(cxgb4_create_server6);
3394 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3395 unsigned int queue, bool ipv6)
3397 struct sk_buff *skb;
3398 struct adapter *adap;
3399 struct cpl_close_listsvr_req *req;
3402 adap = netdev2adap(dev);
3404 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3408 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3410 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3411 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3412 LISTSVR_IPV6(0)) | QUEUENO(queue));
3413 ret = t4_mgmt_tx(adap, skb);
3414 return net_xmit_eval(ret);
3416 EXPORT_SYMBOL(cxgb4_remove_server);
3419 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3420 * @mtus: the HW MTU table
3421 * @mtu: the target MTU
3422 * @idx: index of selected entry in the MTU table
3424 * Returns the index and the value in the HW MTU table that is closest to
3425 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3426 * table, in which case that smallest available value is selected.
3428 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3433 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3439 EXPORT_SYMBOL(cxgb4_best_mtu);
3442 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3443 * @mtus: the HW MTU table
3444 * @header_size: Header Size
3445 * @data_size_max: maximum Data Segment Size
3446 * @data_size_align: desired Data Segment Size Alignment (2^N)
3447 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3449 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3450 * MTU Table based solely on a Maximum MTU parameter, we break that
3451 * parameter up into a Header Size and Maximum Data Segment Size, and
3452 * provide a desired Data Segment Size Alignment. If we find an MTU in
3453 * the Hardware MTU Table which will result in a Data Segment Size with
3454 * the requested alignment _and_ that MTU isn't "too far" from the
3455 * closest MTU, then we'll return that rather than the closest MTU.
3457 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3458 unsigned short header_size,
3459 unsigned short data_size_max,
3460 unsigned short data_size_align,
3461 unsigned int *mtu_idxp)
3463 unsigned short max_mtu = header_size + data_size_max;
3464 unsigned short data_size_align_mask = data_size_align - 1;
3465 int mtu_idx, aligned_mtu_idx;
3467 /* Scan the MTU Table till we find an MTU which is larger than our
3468 * Maximum MTU or we reach the end of the table. Along the way,
3469 * record the last MTU found, if any, which will result in a Data
3470 * Segment Length matching the requested alignment.
3472 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3473 unsigned short data_size = mtus[mtu_idx] - header_size;
3475 /* If this MTU minus the Header Size would result in a
3476 * Data Segment Size of the desired alignment, remember it.
3478 if ((data_size & data_size_align_mask) == 0)
3479 aligned_mtu_idx = mtu_idx;
3481 /* If we're not at the end of the Hardware MTU Table and the
3482 * next element is larger than our Maximum MTU, drop out of
3485 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3489 /* If we fell out of the loop because we ran to the end of the table,
3490 * then we just have to use the last [largest] entry.
3492 if (mtu_idx == NMTUS)
3495 /* If we found an MTU which resulted in the requested Data Segment
3496 * Length alignment and that's "not far" from the largest MTU which is
3497 * less than or equal to the maximum MTU, then use that.
3499 if (aligned_mtu_idx >= 0 &&
3500 mtu_idx - aligned_mtu_idx <= 1)
3501 mtu_idx = aligned_mtu_idx;
3503 /* If the caller has passed in an MTU Index pointer, pass the
3504 * MTU Index back. Return the MTU value.
3507 *mtu_idxp = mtu_idx;
3508 return mtus[mtu_idx];
3510 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3513 * cxgb4_port_chan - get the HW channel of a port
3514 * @dev: the net device for the port
3516 * Return the HW Tx channel of the given port.
3518 unsigned int cxgb4_port_chan(const struct net_device *dev)
3520 return netdev2pinfo(dev)->tx_chan;
3522 EXPORT_SYMBOL(cxgb4_port_chan);
3524 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3526 struct adapter *adap = netdev2adap(dev);
3527 u32 v1, v2, lp_count, hp_count;
3529 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3530 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3531 if (is_t4(adap->params.chip)) {
3532 lp_count = G_LP_COUNT(v1);
3533 hp_count = G_HP_COUNT(v1);
3535 lp_count = G_LP_COUNT_T5(v1);
3536 hp_count = G_HP_COUNT_T5(v2);
3538 return lpfifo ? lp_count : hp_count;
3540 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3543 * cxgb4_port_viid - get the VI id of a port
3544 * @dev: the net device for the port
3546 * Return the VI id of the given port.
3548 unsigned int cxgb4_port_viid(const struct net_device *dev)
3550 return netdev2pinfo(dev)->viid;
3552 EXPORT_SYMBOL(cxgb4_port_viid);
3555 * cxgb4_port_idx - get the index of a port
3556 * @dev: the net device for the port
3558 * Return the index of the given port.
3560 unsigned int cxgb4_port_idx(const struct net_device *dev)
3562 return netdev2pinfo(dev)->port_id;
3564 EXPORT_SYMBOL(cxgb4_port_idx);
3566 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3567 struct tp_tcp_stats *v6)
3569 struct adapter *adap = pci_get_drvdata(pdev);
3571 spin_lock(&adap->stats_lock);
3572 t4_tp_get_tcp_stats(adap, v4, v6);
3573 spin_unlock(&adap->stats_lock);
3575 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3577 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3578 const unsigned int *pgsz_order)
3580 struct adapter *adap = netdev2adap(dev);
3582 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3583 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3584 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3585 HPZ3(pgsz_order[3]));
3587 EXPORT_SYMBOL(cxgb4_iscsi_init);
3589 int cxgb4_flush_eq_cache(struct net_device *dev)
3591 struct adapter *adap = netdev2adap(dev);
3594 ret = t4_fwaddrspace_write(adap, adap->mbox,
3595 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3598 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3600 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3602 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3606 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3608 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3609 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3614 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3617 struct adapter *adap = netdev2adap(dev);
3618 u16 hw_pidx, hw_cidx;
3621 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3625 if (pidx != hw_pidx) {
3628 if (pidx >= hw_pidx)
3629 delta = pidx - hw_pidx;
3631 delta = size - hw_pidx + pidx;
3633 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3634 QID(qid) | PIDX(delta));
3639 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3641 void cxgb4_disable_db_coalescing(struct net_device *dev)
3643 struct adapter *adap;
3645 adap = netdev2adap(dev);
3646 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3649 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3651 void cxgb4_enable_db_coalescing(struct net_device *dev)
3653 struct adapter *adap;
3655 adap = netdev2adap(dev);
3656 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3658 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3660 static struct pci_driver cxgb4_driver;
3662 static void check_neigh_update(struct neighbour *neigh)
3664 const struct device *parent;
3665 const struct net_device *netdev = neigh->dev;
3667 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3668 netdev = vlan_dev_real_dev(netdev);
3669 parent = netdev->dev.parent;
3670 if (parent && parent->driver == &cxgb4_driver.driver)
3671 t4_l2t_update(dev_get_drvdata(parent), neigh);
3674 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3678 case NETEVENT_NEIGH_UPDATE:
3679 check_neigh_update(data);
3681 case NETEVENT_REDIRECT:
3688 static bool netevent_registered;
3689 static struct notifier_block cxgb4_netevent_nb = {
3690 .notifier_call = netevent_cb
3693 static void drain_db_fifo(struct adapter *adap, int usecs)
3695 u32 v1, v2, lp_count, hp_count;
3698 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3699 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3700 if (is_t4(adap->params.chip)) {
3701 lp_count = G_LP_COUNT(v1);
3702 hp_count = G_HP_COUNT(v1);
3704 lp_count = G_LP_COUNT_T5(v1);
3705 hp_count = G_HP_COUNT_T5(v2);
3708 if (lp_count == 0 && hp_count == 0)
3710 set_current_state(TASK_UNINTERRUPTIBLE);
3711 schedule_timeout(usecs_to_jiffies(usecs));
3715 static void disable_txq_db(struct sge_txq *q)
3717 unsigned long flags;
3719 spin_lock_irqsave(&q->db_lock, flags);
3721 spin_unlock_irqrestore(&q->db_lock, flags);
3724 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3726 spin_lock_irq(&q->db_lock);
3727 if (q->db_pidx_inc) {
3728 /* Make sure that all writes to the TX descriptors
3729 * are committed before we tell HW about them.
3732 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3733 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3737 spin_unlock_irq(&q->db_lock);
3740 static void disable_dbs(struct adapter *adap)
3744 for_each_ethrxq(&adap->sge, i)
3745 disable_txq_db(&adap->sge.ethtxq[i].q);
3746 for_each_ofldrxq(&adap->sge, i)
3747 disable_txq_db(&adap->sge.ofldtxq[i].q);
3748 for_each_port(adap, i)
3749 disable_txq_db(&adap->sge.ctrlq[i].q);
3752 static void enable_dbs(struct adapter *adap)
3756 for_each_ethrxq(&adap->sge, i)
3757 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3758 for_each_ofldrxq(&adap->sge, i)
3759 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3760 for_each_port(adap, i)
3761 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3764 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3766 if (adap->uld_handle[CXGB4_ULD_RDMA])
3767 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3771 static void process_db_full(struct work_struct *work)
3773 struct adapter *adap;
3775 adap = container_of(work, struct adapter, db_full_task);
3777 drain_db_fifo(adap, dbfifo_drain_delay);
3779 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3780 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3781 DBFIFO_HP_INT | DBFIFO_LP_INT,
3782 DBFIFO_HP_INT | DBFIFO_LP_INT);
3785 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3787 u16 hw_pidx, hw_cidx;
3790 spin_lock_irq(&q->db_lock);
3791 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3794 if (q->db_pidx != hw_pidx) {
3797 if (q->db_pidx >= hw_pidx)
3798 delta = q->db_pidx - hw_pidx;
3800 delta = q->size - hw_pidx + q->db_pidx;
3802 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3803 QID(q->cntxt_id) | PIDX(delta));
3808 spin_unlock_irq(&q->db_lock);
3810 CH_WARN(adap, "DB drop recovery failed.\n");
3812 static void recover_all_queues(struct adapter *adap)
3816 for_each_ethrxq(&adap->sge, i)
3817 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3818 for_each_ofldrxq(&adap->sge, i)
3819 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3820 for_each_port(adap, i)
3821 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3824 static void process_db_drop(struct work_struct *work)
3826 struct adapter *adap;
3828 adap = container_of(work, struct adapter, db_drop_task);
3830 if (is_t4(adap->params.chip)) {
3831 drain_db_fifo(adap, dbfifo_drain_delay);
3832 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3833 drain_db_fifo(adap, dbfifo_drain_delay);
3834 recover_all_queues(adap);
3835 drain_db_fifo(adap, dbfifo_drain_delay);
3837 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3839 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3840 u16 qid = (dropped_db >> 15) & 0x1ffff;
3841 u16 pidx_inc = dropped_db & 0x1fff;
3843 unsigned short udb_density;
3844 unsigned long qpshift;
3848 dev_warn(adap->pdev_dev,
3849 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3851 (dropped_db >> 14) & 1,
3852 (dropped_db >> 13) & 1,
3855 drain_db_fifo(adap, 1);
3857 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3858 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3859 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3860 qpshift = PAGE_SHIFT - ilog2(udb_density);
3861 udb = qid << qpshift;
3863 page = udb / PAGE_SIZE;
3864 udb += (qid - (page * udb_density)) * 128;
3866 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3868 /* Re-enable BAR2 WC */
3869 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3872 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3875 void t4_db_full(struct adapter *adap)
3877 if (is_t4(adap->params.chip)) {
3879 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3880 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3881 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3882 queue_work(workq, &adap->db_full_task);
3886 void t4_db_dropped(struct adapter *adap)
3888 if (is_t4(adap->params.chip)) {
3890 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3892 queue_work(workq, &adap->db_drop_task);
3895 static void uld_attach(struct adapter *adap, unsigned int uld)
3898 struct cxgb4_lld_info lli;
3901 lli.pdev = adap->pdev;
3902 lli.l2t = adap->l2t;
3903 lli.tids = &adap->tids;
3904 lli.ports = adap->port;
3905 lli.vr = &adap->vres;
3906 lli.mtus = adap->params.mtus;
3907 if (uld == CXGB4_ULD_RDMA) {
3908 lli.rxq_ids = adap->sge.rdma_rxq;
3909 lli.ciq_ids = adap->sge.rdma_ciq;
3910 lli.nrxq = adap->sge.rdmaqs;
3911 lli.nciq = adap->sge.rdmaciqs;
3912 } else if (uld == CXGB4_ULD_ISCSI) {
3913 lli.rxq_ids = adap->sge.ofld_rxq;
3914 lli.nrxq = adap->sge.ofldqsets;
3916 lli.ntxq = adap->sge.ofldqsets;
3917 lli.nchan = adap->params.nports;
3918 lli.nports = adap->params.nports;
3919 lli.wr_cred = adap->params.ofldq_wr_cred;
3920 lli.adapter_type = adap->params.chip;
3921 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3922 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3923 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3925 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3926 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3928 lli.filt_mode = adap->params.tp.vlan_pri_map;
3929 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3930 for (i = 0; i < NCHAN; i++)
3932 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3933 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3934 lli.fw_vers = adap->params.fw_vers;
3935 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3936 lli.sge_pktshift = adap->sge.pktshift;
3937 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3938 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3940 handle = ulds[uld].add(&lli);
3941 if (IS_ERR(handle)) {
3942 dev_warn(adap->pdev_dev,
3943 "could not attach to the %s driver, error %ld\n",
3944 uld_str[uld], PTR_ERR(handle));
3948 adap->uld_handle[uld] = handle;
3950 if (!netevent_registered) {
3951 register_netevent_notifier(&cxgb4_netevent_nb);
3952 netevent_registered = true;
3955 if (adap->flags & FULL_INIT_DONE)
3956 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3959 static void attach_ulds(struct adapter *adap)
3963 spin_lock(&adap_rcu_lock);
3964 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3965 spin_unlock(&adap_rcu_lock);
3967 mutex_lock(&uld_mutex);
3968 list_add_tail(&adap->list_node, &adapter_list);
3969 for (i = 0; i < CXGB4_ULD_MAX; i++)
3971 uld_attach(adap, i);
3972 mutex_unlock(&uld_mutex);
3975 static void detach_ulds(struct adapter *adap)
3979 mutex_lock(&uld_mutex);
3980 list_del(&adap->list_node);
3981 for (i = 0; i < CXGB4_ULD_MAX; i++)
3982 if (adap->uld_handle[i]) {
3983 ulds[i].state_change(adap->uld_handle[i],
3984 CXGB4_STATE_DETACH);
3985 adap->uld_handle[i] = NULL;
3987 if (netevent_registered && list_empty(&adapter_list)) {
3988 unregister_netevent_notifier(&cxgb4_netevent_nb);
3989 netevent_registered = false;
3991 mutex_unlock(&uld_mutex);
3993 spin_lock(&adap_rcu_lock);
3994 list_del_rcu(&adap->rcu_node);
3995 spin_unlock(&adap_rcu_lock);
3998 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4002 mutex_lock(&uld_mutex);
4003 for (i = 0; i < CXGB4_ULD_MAX; i++)
4004 if (adap->uld_handle[i])
4005 ulds[i].state_change(adap->uld_handle[i], new_state);
4006 mutex_unlock(&uld_mutex);
4010 * cxgb4_register_uld - register an upper-layer driver
4011 * @type: the ULD type
4012 * @p: the ULD methods
4014 * Registers an upper-layer driver with this driver and notifies the ULD
4015 * about any presently available devices that support its type. Returns
4016 * %-EBUSY if a ULD of the same type is already registered.
4018 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4021 struct adapter *adap;
4023 if (type >= CXGB4_ULD_MAX)
4025 mutex_lock(&uld_mutex);
4026 if (ulds[type].add) {
4031 list_for_each_entry(adap, &adapter_list, list_node)
4032 uld_attach(adap, type);
4033 out: mutex_unlock(&uld_mutex);
4036 EXPORT_SYMBOL(cxgb4_register_uld);
4039 * cxgb4_unregister_uld - unregister an upper-layer driver
4040 * @type: the ULD type
4042 * Unregisters an existing upper-layer driver.
4044 int cxgb4_unregister_uld(enum cxgb4_uld type)
4046 struct adapter *adap;
4048 if (type >= CXGB4_ULD_MAX)
4050 mutex_lock(&uld_mutex);
4051 list_for_each_entry(adap, &adapter_list, list_node)
4052 adap->uld_handle[type] = NULL;
4053 ulds[type].add = NULL;
4054 mutex_unlock(&uld_mutex);
4057 EXPORT_SYMBOL(cxgb4_unregister_uld);
4059 /* Check if netdev on which event is occured belongs to us or not. Return
4060 * success (true) if it belongs otherwise failure (false).
4061 * Called with rcu_read_lock() held.
4063 static bool cxgb4_netdev(const struct net_device *netdev)
4065 struct adapter *adap;
4068 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4069 for (i = 0; i < MAX_NPORTS; i++)
4070 if (adap->port[i] == netdev)
4075 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4076 unsigned long event)
4078 int ret = NOTIFY_DONE;
4081 if (cxgb4_netdev(event_dev)) {
4084 ret = cxgb4_clip_get(event_dev,
4085 (const struct in6_addr *)ifa->addr.s6_addr);
4093 cxgb4_clip_release(event_dev,
4094 (const struct in6_addr *)ifa->addr.s6_addr);
4105 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4106 unsigned long event, void *data)
4108 struct inet6_ifaddr *ifa = data;
4109 struct net_device *event_dev;
4110 int ret = NOTIFY_DONE;
4111 struct bonding *bond = netdev_priv(ifa->idev->dev);
4112 struct list_head *iter;
4113 struct slave *slave;
4114 struct pci_dev *first_pdev = NULL;
4116 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4117 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4118 ret = clip_add(event_dev, ifa, event);
4119 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4120 /* It is possible that two different adapters are bonded in one
4121 * bond. We need to find such different adapters and add clip
4122 * in all of them only once.
4124 read_lock(&bond->lock);
4125 bond_for_each_slave(bond, slave, iter) {
4127 ret = clip_add(slave->dev, ifa, event);
4128 /* If clip_add is success then only initialize
4129 * first_pdev since it means it is our device
4131 if (ret == NOTIFY_OK)
4132 first_pdev = to_pci_dev(
4133 slave->dev->dev.parent);
4134 } else if (first_pdev !=
4135 to_pci_dev(slave->dev->dev.parent))
4136 ret = clip_add(slave->dev, ifa, event);
4138 read_unlock(&bond->lock);
4140 ret = clip_add(ifa->idev->dev, ifa, event);
4145 static struct notifier_block cxgb4_inet6addr_notifier = {
4146 .notifier_call = cxgb4_inet6addr_handler
4149 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4150 * a physical device.
4151 * The physical device reference is needed to send the actul CLIP command.
4153 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4155 struct inet6_dev *idev = NULL;
4156 struct inet6_ifaddr *ifa;
4159 idev = __in6_dev_get(root_dev);
4163 read_lock_bh(&idev->lock);
4164 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4165 ret = cxgb4_clip_get(dev,
4166 (const struct in6_addr *)ifa->addr.s6_addr);
4170 read_unlock_bh(&idev->lock);
4175 static int update_root_dev_clip(struct net_device *dev)
4177 struct net_device *root_dev = NULL;
4180 /* First populate the real net device's IPv6 addresses */
4181 ret = update_dev_clip(dev, dev);
4185 /* Parse all bond and vlan devices layered on top of the physical dev */
4186 for (i = 0; i < VLAN_N_VID; i++) {
4187 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4191 ret = update_dev_clip(root_dev, dev);
4198 static void update_clip(const struct adapter *adap)
4201 struct net_device *dev;
4206 for (i = 0; i < MAX_NPORTS; i++) {
4207 dev = adap->port[i];
4211 ret = update_root_dev_clip(dev);
4220 * cxgb_up - enable the adapter
4221 * @adap: adapter being enabled
4223 * Called when the first port is enabled, this function performs the
4224 * actions necessary to make an adapter operational, such as completing
4225 * the initialization of HW modules, and enabling interrupts.
4227 * Must be called with the rtnl lock held.
4229 static int cxgb_up(struct adapter *adap)
4233 err = setup_sge_queues(adap);
4236 err = setup_rss(adap);
4240 if (adap->flags & USING_MSIX) {
4241 name_msix_vecs(adap);
4242 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4243 adap->msix_info[0].desc, adap);
4247 err = request_msix_queue_irqs(adap);
4249 free_irq(adap->msix_info[0].vec, adap);
4253 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4254 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4255 adap->port[0]->name, adap);
4261 t4_intr_enable(adap);
4262 adap->flags |= FULL_INIT_DONE;
4263 notify_ulds(adap, CXGB4_STATE_UP);
4268 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4270 t4_free_sge_resources(adap);
4274 static void cxgb_down(struct adapter *adapter)
4276 t4_intr_disable(adapter);
4277 cancel_work_sync(&adapter->tid_release_task);
4278 cancel_work_sync(&adapter->db_full_task);
4279 cancel_work_sync(&adapter->db_drop_task);
4280 adapter->tid_release_task_busy = false;
4281 adapter->tid_release_head = NULL;
4283 if (adapter->flags & USING_MSIX) {
4284 free_msix_queue_irqs(adapter);
4285 free_irq(adapter->msix_info[0].vec, adapter);
4287 free_irq(adapter->pdev->irq, adapter);
4288 quiesce_rx(adapter);
4289 t4_sge_stop(adapter);
4290 t4_free_sge_resources(adapter);
4291 adapter->flags &= ~FULL_INIT_DONE;
4295 * net_device operations
4297 static int cxgb_open(struct net_device *dev)
4300 struct port_info *pi = netdev_priv(dev);
4301 struct adapter *adapter = pi->adapter;
4303 netif_carrier_off(dev);
4305 if (!(adapter->flags & FULL_INIT_DONE)) {
4306 err = cxgb_up(adapter);
4311 err = link_start(dev);
4313 netif_tx_start_all_queues(dev);
4317 static int cxgb_close(struct net_device *dev)
4319 struct port_info *pi = netdev_priv(dev);
4320 struct adapter *adapter = pi->adapter;
4322 netif_tx_stop_all_queues(dev);
4323 netif_carrier_off(dev);
4324 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4327 /* Return an error number if the indicated filter isn't writable ...
4329 static int writable_filter(struct filter_entry *f)
4339 /* Delete the filter at the specified index (if valid). The checks for all
4340 * the common problems with doing this like the filter being locked, currently
4341 * pending in another operation, etc.
4343 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4345 struct filter_entry *f;
4348 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4351 f = &adapter->tids.ftid_tab[fidx];
4352 ret = writable_filter(f);
4356 return del_filter_wr(adapter, fidx);
4361 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4362 __be32 sip, __be16 sport, __be16 vlan,
4363 unsigned int queue, unsigned char port, unsigned char mask)
4366 struct filter_entry *f;
4367 struct adapter *adap;
4371 adap = netdev2adap(dev);
4373 /* Adjust stid to correct filter index */
4374 stid -= adap->tids.sftid_base;
4375 stid += adap->tids.nftids;
4377 /* Check to make sure the filter requested is writable ...
4379 f = &adap->tids.ftid_tab[stid];
4380 ret = writable_filter(f);
4384 /* Clear out any old resources being used by the filter before
4385 * we start constructing the new filter.
4388 clear_filter(adap, f);
4390 /* Clear out filter specifications */
4391 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4392 f->fs.val.lport = cpu_to_be16(sport);
4393 f->fs.mask.lport = ~0;
4395 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4396 for (i = 0; i < 4; i++) {
4397 f->fs.val.lip[i] = val[i];
4398 f->fs.mask.lip[i] = ~0;
4400 if (adap->params.tp.vlan_pri_map & F_PORT) {
4401 f->fs.val.iport = port;
4402 f->fs.mask.iport = mask;
4406 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4407 f->fs.val.proto = IPPROTO_TCP;
4408 f->fs.mask.proto = ~0;
4413 /* Mark filter as locked */
4417 ret = set_filter_wr(adap, stid);
4419 clear_filter(adap, f);
4425 EXPORT_SYMBOL(cxgb4_create_server_filter);
4427 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4428 unsigned int queue, bool ipv6)
4431 struct filter_entry *f;
4432 struct adapter *adap;
4434 adap = netdev2adap(dev);
4436 /* Adjust stid to correct filter index */
4437 stid -= adap->tids.sftid_base;
4438 stid += adap->tids.nftids;
4440 f = &adap->tids.ftid_tab[stid];
4441 /* Unlock the filter */
4444 ret = delete_filter(adap, stid);
4450 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4452 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4453 struct rtnl_link_stats64 *ns)
4455 struct port_stats stats;
4456 struct port_info *p = netdev_priv(dev);
4457 struct adapter *adapter = p->adapter;
4459 /* Block retrieving statistics during EEH error
4460 * recovery. Otherwise, the recovery might fail
4461 * and the PCI device will be removed permanently
4463 spin_lock(&adapter->stats_lock);
4464 if (!netif_device_present(dev)) {
4465 spin_unlock(&adapter->stats_lock);
4468 t4_get_port_stats(adapter, p->tx_chan, &stats);
4469 spin_unlock(&adapter->stats_lock);
4471 ns->tx_bytes = stats.tx_octets;
4472 ns->tx_packets = stats.tx_frames;
4473 ns->rx_bytes = stats.rx_octets;
4474 ns->rx_packets = stats.rx_frames;
4475 ns->multicast = stats.rx_mcast_frames;
4477 /* detailed rx_errors */
4478 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4480 ns->rx_over_errors = 0;
4481 ns->rx_crc_errors = stats.rx_fcs_err;
4482 ns->rx_frame_errors = stats.rx_symbol_err;
4483 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4484 stats.rx_ovflow2 + stats.rx_ovflow3 +
4485 stats.rx_trunc0 + stats.rx_trunc1 +
4486 stats.rx_trunc2 + stats.rx_trunc3;
4487 ns->rx_missed_errors = 0;
4489 /* detailed tx_errors */
4490 ns->tx_aborted_errors = 0;
4491 ns->tx_carrier_errors = 0;
4492 ns->tx_fifo_errors = 0;
4493 ns->tx_heartbeat_errors = 0;
4494 ns->tx_window_errors = 0;
4496 ns->tx_errors = stats.tx_error_frames;
4497 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4498 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4502 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4505 int ret = 0, prtad, devad;
4506 struct port_info *pi = netdev_priv(dev);
4507 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4511 if (pi->mdio_addr < 0)
4513 data->phy_id = pi->mdio_addr;
4517 if (mdio_phy_id_is_c45(data->phy_id)) {
4518 prtad = mdio_phy_id_prtad(data->phy_id);
4519 devad = mdio_phy_id_devad(data->phy_id);
4520 } else if (data->phy_id < 32) {
4521 prtad = data->phy_id;
4523 data->reg_num &= 0x1f;
4527 mbox = pi->adapter->fn;
4528 if (cmd == SIOCGMIIREG)
4529 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4530 data->reg_num, &data->val_out);
4532 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4533 data->reg_num, data->val_in);
4541 static void cxgb_set_rxmode(struct net_device *dev)
4543 /* unfortunately we can't return errors to the stack */
4544 set_rxmode(dev, -1, false);
4547 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4550 struct port_info *pi = netdev_priv(dev);
4552 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4554 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4561 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4564 struct sockaddr *addr = p;
4565 struct port_info *pi = netdev_priv(dev);
4567 if (!is_valid_ether_addr(addr->sa_data))
4568 return -EADDRNOTAVAIL;
4570 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4571 pi->xact_addr_filt, addr->sa_data, true, true);
4575 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4576 pi->xact_addr_filt = ret;
4580 #ifdef CONFIG_NET_POLL_CONTROLLER
4581 static void cxgb_netpoll(struct net_device *dev)
4583 struct port_info *pi = netdev_priv(dev);
4584 struct adapter *adap = pi->adapter;
4586 if (adap->flags & USING_MSIX) {
4588 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4590 for (i = pi->nqsets; i; i--, rx++)
4591 t4_sge_intr_msix(0, &rx->rspq);
4593 t4_intr_handler(adap)(0, adap);
4597 static const struct net_device_ops cxgb4_netdev_ops = {
4598 .ndo_open = cxgb_open,
4599 .ndo_stop = cxgb_close,
4600 .ndo_start_xmit = t4_eth_xmit,
4601 .ndo_get_stats64 = cxgb_get_stats,
4602 .ndo_set_rx_mode = cxgb_set_rxmode,
4603 .ndo_set_mac_address = cxgb_set_mac_addr,
4604 .ndo_set_features = cxgb_set_features,
4605 .ndo_validate_addr = eth_validate_addr,
4606 .ndo_do_ioctl = cxgb_ioctl,
4607 .ndo_change_mtu = cxgb_change_mtu,
4608 #ifdef CONFIG_NET_POLL_CONTROLLER
4609 .ndo_poll_controller = cxgb_netpoll,
4613 void t4_fatal_err(struct adapter *adap)
4615 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4616 t4_intr_disable(adap);
4617 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4620 static void setup_memwin(struct adapter *adap)
4622 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4624 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4625 if (is_t4(adap->params.chip)) {
4626 mem_win0_base = bar0 + MEMWIN0_BASE;
4627 mem_win1_base = bar0 + MEMWIN1_BASE;
4628 mem_win2_base = bar0 + MEMWIN2_BASE;
4630 /* For T5, only relative offset inside the PCIe BAR is passed */
4631 mem_win0_base = MEMWIN0_BASE;
4632 mem_win1_base = MEMWIN1_BASE_T5;
4633 mem_win2_base = MEMWIN2_BASE_T5;
4635 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4636 mem_win0_base | BIR(0) |
4637 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4638 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4639 mem_win1_base | BIR(0) |
4640 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4641 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4642 mem_win2_base | BIR(0) |
4643 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4646 static void setup_memwin_rdma(struct adapter *adap)
4648 if (adap->vres.ocq.size) {
4649 unsigned int start, sz_kb;
4651 start = pci_resource_start(adap->pdev, 2) +
4652 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4653 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4655 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4656 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4658 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4659 adap->vres.ocq.start);
4661 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4665 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4670 /* get device capabilities */
4671 memset(c, 0, sizeof(*c));
4672 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4673 FW_CMD_REQUEST | FW_CMD_READ);
4674 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4675 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4679 /* select capabilities we'll be using */
4680 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4682 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4684 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4685 } else if (vf_acls) {
4686 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4689 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4690 FW_CMD_REQUEST | FW_CMD_WRITE);
4691 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4695 ret = t4_config_glbl_rss(adap, adap->fn,
4696 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4697 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4698 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4702 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4703 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4709 /* tweak some settings */
4710 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4711 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4712 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4713 v = t4_read_reg(adap, TP_PIO_DATA);
4714 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4716 /* first 4 Tx modulation queues point to consecutive Tx channels */
4717 adap->params.tp.tx_modq_map = 0xE4;
4718 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4719 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4721 /* associate each Tx modulation queue with consecutive Tx channels */
4723 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4724 &v, 1, A_TP_TX_SCHED_HDR);
4725 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4726 &v, 1, A_TP_TX_SCHED_FIFO);
4727 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4728 &v, 1, A_TP_TX_SCHED_PCMD);
4730 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4731 if (is_offload(adap)) {
4732 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4733 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4734 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4735 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4736 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4737 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4738 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4739 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4740 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4741 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4744 /* get basic stuff going */
4745 return t4_early_init(adap, adap->fn);
4749 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4751 #define MAX_ATIDS 8192U
4754 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4756 * If the firmware we're dealing with has Configuration File support, then
4757 * we use that to perform all configuration
4761 * Tweak configuration based on module parameters, etc. Most of these have
4762 * defaults assigned to them by Firmware Configuration Files (if we're using
4763 * them) but need to be explicitly set if we're using hard-coded
4764 * initialization. But even in the case of using Firmware Configuration
4765 * Files, we'd like to expose the ability to change these via module
4766 * parameters so these are essentially common tweaks/settings for
4767 * Configuration Files and hard-coded initialization ...
4769 static int adap_init0_tweaks(struct adapter *adapter)
4772 * Fix up various Host-Dependent Parameters like Page Size, Cache
4773 * Line Size, etc. The firmware default is for a 4KB Page Size and
4774 * 64B Cache Line Size ...
4776 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4779 * Process module parameters which affect early initialization.
4781 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4782 dev_err(&adapter->pdev->dev,
4783 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4787 t4_set_reg_field(adapter, SGE_CONTROL,
4789 PKTSHIFT(rx_dma_offset));
4792 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4793 * adds the pseudo header itself.
4795 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4796 CSUM_HAS_PSEUDO_HDR, 0);
4802 * Attempt to initialize the adapter via a Firmware Configuration File.
4804 static int adap_init0_config(struct adapter *adapter, int reset)
4806 struct fw_caps_config_cmd caps_cmd;
4807 const struct firmware *cf;
4808 unsigned long mtype = 0, maddr = 0;
4809 u32 finiver, finicsum, cfcsum;
4811 int config_issued = 0;
4812 char *fw_config_file, fw_config_file_path[256];
4813 char *config_name = NULL;
4816 * Reset device if necessary.
4819 ret = t4_fw_reset(adapter, adapter->mbox,
4820 PIORSTMODE | PIORST);
4826 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4827 * then use that. Otherwise, use the configuration file stored
4828 * in the adapter flash ...
4830 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4832 fw_config_file = FW4_CFNAME;
4835 fw_config_file = FW5_CFNAME;
4838 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4839 adapter->pdev->device);
4844 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4846 config_name = "On FLASH";
4847 mtype = FW_MEMTYPE_CF_FLASH;
4848 maddr = t4_flash_cfg_addr(adapter);
4850 u32 params[7], val[7];
4852 sprintf(fw_config_file_path,
4853 "/lib/firmware/%s", fw_config_file);
4854 config_name = fw_config_file_path;
4856 if (cf->size >= FLASH_CFG_MAX_SIZE)
4859 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4860 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4861 ret = t4_query_params(adapter, adapter->mbox,
4862 adapter->fn, 0, 1, params, val);
4865 * For t4_memory_write() below addresses and
4866 * sizes have to be in terms of multiples of 4
4867 * bytes. So, if the Configuration File isn't
4868 * a multiple of 4 bytes in length we'll have
4869 * to write that out separately since we can't
4870 * guarantee that the bytes following the
4871 * residual byte in the buffer returned by
4872 * request_firmware() are zeroed out ...
4874 size_t resid = cf->size & 0x3;
4875 size_t size = cf->size & ~0x3;
4876 __be32 *data = (__be32 *)cf->data;
4878 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4879 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4881 ret = t4_memory_write(adapter, mtype, maddr,
4883 if (ret == 0 && resid != 0) {
4890 last.word = data[size >> 2];
4891 for (i = resid; i < 4; i++)
4893 ret = t4_memory_write(adapter, mtype,
4900 release_firmware(cf);
4906 * Issue a Capability Configuration command to the firmware to get it
4907 * to parse the Configuration File. We don't use t4_fw_config_file()
4908 * because we want the ability to modify various features after we've
4909 * processed the configuration file ...
4911 memset(&caps_cmd, 0, sizeof(caps_cmd));
4912 caps_cmd.op_to_write =
4913 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4916 caps_cmd.cfvalid_to_len16 =
4917 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4918 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4919 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4920 FW_LEN16(caps_cmd));
4921 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4924 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4925 * Configuration File in FLASH), our last gasp effort is to use the
4926 * Firmware Configuration File which is embedded in the firmware. A
4927 * very few early versions of the firmware didn't have one embedded
4928 * but we can ignore those.
4930 if (ret == -ENOENT) {
4931 memset(&caps_cmd, 0, sizeof(caps_cmd));
4932 caps_cmd.op_to_write =
4933 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4936 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4937 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4938 sizeof(caps_cmd), &caps_cmd);
4939 config_name = "Firmware Default";
4946 finiver = ntohl(caps_cmd.finiver);
4947 finicsum = ntohl(caps_cmd.finicsum);
4948 cfcsum = ntohl(caps_cmd.cfcsum);
4949 if (finicsum != cfcsum)
4950 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4951 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4955 * And now tell the firmware to use the configuration we just loaded.
4957 caps_cmd.op_to_write =
4958 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4961 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4962 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4968 * Tweak configuration based on system architecture, module
4971 ret = adap_init0_tweaks(adapter);
4976 * And finally tell the firmware to initialize itself using the
4977 * parameters from the Configuration File.
4979 ret = t4_fw_initialize(adapter, adapter->mbox);
4984 * Return successfully and note that we're operating with parameters
4985 * not supplied by the driver, rather than from hard-wired
4986 * initialization constants burried in the driver.
4988 adapter->flags |= USING_SOFT_PARAMS;
4989 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4990 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4991 config_name, finiver, cfcsum);
4995 * Something bad happened. Return the error ... (If the "error"
4996 * is that there's no Configuration File on the adapter we don't
4997 * want to issue a warning since this is fairly common.)
5000 if (config_issued && ret != -ENOENT)
5001 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5007 * Attempt to initialize the adapter via hard-coded, driver supplied
5010 static int adap_init0_no_config(struct adapter *adapter, int reset)
5012 struct sge *s = &adapter->sge;
5013 struct fw_caps_config_cmd caps_cmd;
5018 * Reset device if necessary
5021 ret = t4_fw_reset(adapter, adapter->mbox,
5022 PIORSTMODE | PIORST);
5028 * Get device capabilities and select which we'll be using.
5030 memset(&caps_cmd, 0, sizeof(caps_cmd));
5031 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5032 FW_CMD_REQUEST | FW_CMD_READ);
5033 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5034 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5039 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5041 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5043 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5044 } else if (vf_acls) {
5045 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5048 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5049 FW_CMD_REQUEST | FW_CMD_WRITE);
5050 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5056 * Tweak configuration based on system architecture, module
5059 ret = adap_init0_tweaks(adapter);
5064 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5065 * mode which maps each Virtual Interface to its own section of
5066 * the RSS Table and we turn on all map and hash enables ...
5068 adapter->flags |= RSS_TNLALLLOOKUP;
5069 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5070 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5071 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5072 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5073 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5074 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5079 * Set up our own fundamental resource provisioning ...
5081 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5082 PFRES_NEQ, PFRES_NETHCTRL,
5083 PFRES_NIQFLINT, PFRES_NIQ,
5084 PFRES_TC, PFRES_NVI,
5085 FW_PFVF_CMD_CMASK_MASK,
5086 pfvfres_pmask(adapter, adapter->fn, 0),
5088 PFRES_R_CAPS, PFRES_WX_CAPS);
5093 * Perform low level SGE initialization. We need to do this before we
5094 * send the firmware the INITIALIZE command because that will cause
5095 * any other PF Drivers which are waiting for the Master
5096 * Initialization to proceed forward.
5098 for (i = 0; i < SGE_NTIMERS - 1; i++)
5099 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5100 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5101 s->counter_val[0] = 1;
5102 for (i = 1; i < SGE_NCOUNTERS; i++)
5103 s->counter_val[i] = min(intr_cnt[i - 1],
5104 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5105 t4_sge_init(adapter);
5107 #ifdef CONFIG_PCI_IOV
5109 * Provision resource limits for Virtual Functions. We currently
5110 * grant them all the same static resource limits except for the Port
5111 * Access Rights Mask which we're assigning based on the PF. All of
5112 * the static provisioning stuff for both the PF and VF really needs
5113 * to be managed in a persistent manner for each device which the
5114 * firmware controls.
5119 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5120 if (num_vf[pf] <= 0)
5123 /* VF numbering starts at 1! */
5124 for (vf = 1; vf <= num_vf[pf]; vf++) {
5125 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5127 VFRES_NEQ, VFRES_NETHCTRL,
5128 VFRES_NIQFLINT, VFRES_NIQ,
5129 VFRES_TC, VFRES_NVI,
5130 FW_PFVF_CMD_CMASK_MASK,
5134 VFRES_R_CAPS, VFRES_WX_CAPS);
5136 dev_warn(adapter->pdev_dev,
5138 "provision pf/vf=%d/%d; "
5139 "err=%d\n", pf, vf, ret);
5146 * Set up the default filter mode. Later we'll want to implement this
5147 * via a firmware command, etc. ... This needs to be done before the
5148 * firmare initialization command ... If the selected set of fields
5149 * isn't equal to the default value, we'll need to make sure that the
5150 * field selections will fit in the 36-bit budget.
5152 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5155 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5156 switch (tp_vlan_pri_map & (1 << j)) {
5158 /* compressed filter field not enabled */
5178 case ETHERTYPE_MASK:
5184 case MPSHITTYPE_MASK:
5187 case FRAGMENTATION_MASK:
5193 dev_err(adapter->pdev_dev,
5194 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5195 " using %#x\n", tp_vlan_pri_map, bits,
5196 TP_VLAN_PRI_MAP_DEFAULT);
5197 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5200 v = tp_vlan_pri_map;
5201 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5202 &v, 1, TP_VLAN_PRI_MAP);
5205 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5206 * to support any of the compressed filter fields above. Newer
5207 * versions of the firmware do this automatically but it doesn't hurt
5208 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5209 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5210 * since the firmware automatically turns this on and off when we have
5211 * a non-zero number of filters active (since it does have a
5212 * performance impact).
5214 if (tp_vlan_pri_map)
5215 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5216 FIVETUPLELOOKUP_MASK,
5217 FIVETUPLELOOKUP_MASK);
5220 * Tweak some settings.
5222 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5223 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5224 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5225 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5228 * Get basic stuff going by issuing the Firmware Initialize command.
5229 * Note that this _must_ be after all PFVF commands ...
5231 ret = t4_fw_initialize(adapter, adapter->mbox);
5236 * Return successfully!
5238 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5239 "driver parameters\n");
5243 * Something bad happened. Return the error ...
5249 static struct fw_info fw_info_array[] = {
5252 .fs_name = FW4_CFNAME,
5253 .fw_mod_name = FW4_FNAME,
5255 .chip = FW_HDR_CHIP_T4,
5256 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5257 .intfver_nic = FW_INTFVER(T4, NIC),
5258 .intfver_vnic = FW_INTFVER(T4, VNIC),
5259 .intfver_ri = FW_INTFVER(T4, RI),
5260 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5261 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5265 .fs_name = FW5_CFNAME,
5266 .fw_mod_name = FW5_FNAME,
5268 .chip = FW_HDR_CHIP_T5,
5269 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5270 .intfver_nic = FW_INTFVER(T5, NIC),
5271 .intfver_vnic = FW_INTFVER(T5, VNIC),
5272 .intfver_ri = FW_INTFVER(T5, RI),
5273 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5274 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5279 static struct fw_info *find_fw_info(int chip)
5283 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5284 if (fw_info_array[i].chip == chip)
5285 return &fw_info_array[i];
5291 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5293 static int adap_init0(struct adapter *adap)
5297 enum dev_state state;
5298 u32 params[7], val[7];
5299 struct fw_caps_config_cmd caps_cmd;
5303 * Contact FW, advertising Master capability (and potentially forcing
5304 * ourselves as the Master PF if our module parameter force_init is
5307 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5308 force_init ? MASTER_MUST : MASTER_MAY,
5311 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5315 if (ret == adap->mbox)
5316 adap->flags |= MASTER_PF;
5317 if (force_init && state == DEV_STATE_INIT)
5318 state = DEV_STATE_UNINIT;
5321 * If we're the Master PF Driver and the device is uninitialized,
5322 * then let's consider upgrading the firmware ... (We always want
5323 * to check the firmware version number in order to A. get it for
5324 * later reporting and B. to warn if the currently loaded firmware
5325 * is excessively mismatched relative to the driver.)
5327 t4_get_fw_version(adap, &adap->params.fw_vers);
5328 t4_get_tp_version(adap, &adap->params.tp_vers);
5329 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5330 struct fw_info *fw_info;
5331 struct fw_hdr *card_fw;
5332 const struct firmware *fw;
5333 const u8 *fw_data = NULL;
5334 unsigned int fw_size = 0;
5336 /* This is the firmware whose headers the driver was compiled
5339 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5340 if (fw_info == NULL) {
5341 dev_err(adap->pdev_dev,
5342 "unable to get firmware info for chip %d.\n",
5343 CHELSIO_CHIP_VERSION(adap->params.chip));
5347 /* allocate memory to read the header of the firmware on the
5350 card_fw = t4_alloc_mem(sizeof(*card_fw));
5352 /* Get FW from from /lib/firmware/ */
5353 ret = request_firmware(&fw, fw_info->fw_mod_name,
5356 dev_err(adap->pdev_dev,
5357 "unable to load firmware image %s, error %d\n",
5358 fw_info->fw_mod_name, ret);
5364 /* upgrade FW logic */
5365 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5370 release_firmware(fw);
5371 t4_free_mem(card_fw);
5378 * Grab VPD parameters. This should be done after we establish a
5379 * connection to the firmware since some of the VPD parameters
5380 * (notably the Core Clock frequency) are retrieved via requests to
5381 * the firmware. On the other hand, we need these fairly early on
5382 * so we do this right after getting ahold of the firmware.
5384 ret = get_vpd_params(adap, &adap->params.vpd);
5389 * Find out what ports are available to us. Note that we need to do
5390 * this before calling adap_init0_no_config() since it needs nports
5394 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5395 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5396 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5400 adap->params.nports = hweight32(port_vec);
5401 adap->params.portvec = port_vec;
5404 * If the firmware is initialized already (and we're not forcing a
5405 * master initialization), note that we're living with existing
5406 * adapter parameters. Otherwise, it's time to try initializing the
5409 if (state == DEV_STATE_INIT) {
5410 dev_info(adap->pdev_dev, "Coming up as %s: "\
5411 "Adapter already initialized\n",
5412 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5413 adap->flags |= USING_SOFT_PARAMS;
5415 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5416 "Initializing adapter\n");
5419 * If the firmware doesn't support Configuration
5420 * Files warn user and exit,
5423 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5424 "configuration file.\n");
5426 ret = adap_init0_no_config(adap, reset);
5429 * Find out whether we're dealing with a version of
5430 * the firmware which has configuration file support.
5432 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5433 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5434 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5438 * If the firmware doesn't support Configuration
5439 * Files, use the old Driver-based, hard-wired
5440 * initialization. Otherwise, try using the
5441 * Configuration File support and fall back to the
5442 * Driver-based initialization if there's no
5443 * Configuration File found.
5446 ret = adap_init0_no_config(adap, reset);
5449 * The firmware provides us with a memory
5450 * buffer where we can load a Configuration
5451 * File from the host if we want to override
5452 * the Configuration File in flash.
5455 ret = adap_init0_config(adap, reset);
5456 if (ret == -ENOENT) {
5457 dev_info(adap->pdev_dev,
5458 "No Configuration File present "
5459 "on adapter. Using hard-wired "
5460 "configuration parameters.\n");
5461 ret = adap_init0_no_config(adap, reset);
5466 dev_err(adap->pdev_dev,
5467 "could not initialize adapter, error %d\n",
5474 * If we're living with non-hard-coded parameters (either from a
5475 * Firmware Configuration File or values programmed by a different PF
5476 * Driver), give the SGE code a chance to pull in anything that it
5477 * needs ... Note that this must be called after we retrieve our VPD
5478 * parameters in order to know how to convert core ticks to seconds.
5480 if (adap->flags & USING_SOFT_PARAMS) {
5481 ret = t4_sge_init(adap);
5486 if (is_bypass_device(adap->pdev->device))
5487 adap->params.bypass = 1;
5490 * Grab some of our basic fundamental operating parameters.
5492 #define FW_PARAM_DEV(param) \
5493 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5494 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5496 #define FW_PARAM_PFVF(param) \
5497 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5498 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5499 FW_PARAMS_PARAM_Y(0) | \
5500 FW_PARAMS_PARAM_Z(0)
5502 params[0] = FW_PARAM_PFVF(EQ_START);
5503 params[1] = FW_PARAM_PFVF(L2T_START);
5504 params[2] = FW_PARAM_PFVF(L2T_END);
5505 params[3] = FW_PARAM_PFVF(FILTER_START);
5506 params[4] = FW_PARAM_PFVF(FILTER_END);
5507 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5508 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5511 adap->sge.egr_start = val[0];
5512 adap->l2t_start = val[1];
5513 adap->l2t_end = val[2];
5514 adap->tids.ftid_base = val[3];
5515 adap->tids.nftids = val[4] - val[3] + 1;
5516 adap->sge.ingr_start = val[5];
5518 /* query params related to active filter region */
5519 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5520 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5521 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5522 /* If Active filter size is set we enable establishing
5523 * offload connection through firmware work request
5525 if ((val[0] != val[1]) && (ret >= 0)) {
5526 adap->flags |= FW_OFLD_CONN;
5527 adap->tids.aftid_base = val[0];
5528 adap->tids.aftid_end = val[1];
5531 /* If we're running on newer firmware, let it know that we're
5532 * prepared to deal with encapsulated CPL messages. Older
5533 * firmware won't understand this and we'll just get
5534 * unencapsulated messages ...
5536 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5538 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5541 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5542 * capability. Earlier versions of the firmware didn't have the
5543 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5544 * permission to use ULPTX MEMWRITE DSGL.
5546 if (is_t4(adap->params.chip)) {
5547 adap->params.ulptx_memwrite_dsgl = false;
5549 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5550 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5552 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5556 * Get device capabilities so we can determine what resources we need
5559 memset(&caps_cmd, 0, sizeof(caps_cmd));
5560 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5561 FW_CMD_REQUEST | FW_CMD_READ);
5562 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5563 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5568 if (caps_cmd.ofldcaps) {
5569 /* query offload-related parameters */
5570 params[0] = FW_PARAM_DEV(NTID);
5571 params[1] = FW_PARAM_PFVF(SERVER_START);
5572 params[2] = FW_PARAM_PFVF(SERVER_END);
5573 params[3] = FW_PARAM_PFVF(TDDP_START);
5574 params[4] = FW_PARAM_PFVF(TDDP_END);
5575 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5576 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5580 adap->tids.ntids = val[0];
5581 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5582 adap->tids.stid_base = val[1];
5583 adap->tids.nstids = val[2] - val[1] + 1;
5585 * Setup server filter region. Divide the availble filter
5586 * region into two parts. Regular filters get 1/3rd and server
5587 * filters get 2/3rd part. This is only enabled if workarond
5589 * 1. For regular filters.
5590 * 2. Server filter: This are special filters which are used
5591 * to redirect SYN packets to offload queue.
5593 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5594 adap->tids.sftid_base = adap->tids.ftid_base +
5595 DIV_ROUND_UP(adap->tids.nftids, 3);
5596 adap->tids.nsftids = adap->tids.nftids -
5597 DIV_ROUND_UP(adap->tids.nftids, 3);
5598 adap->tids.nftids = adap->tids.sftid_base -
5599 adap->tids.ftid_base;
5601 adap->vres.ddp.start = val[3];
5602 adap->vres.ddp.size = val[4] - val[3] + 1;
5603 adap->params.ofldq_wr_cred = val[5];
5605 adap->params.offload = 1;
5607 if (caps_cmd.rdmacaps) {
5608 params[0] = FW_PARAM_PFVF(STAG_START);
5609 params[1] = FW_PARAM_PFVF(STAG_END);
5610 params[2] = FW_PARAM_PFVF(RQ_START);
5611 params[3] = FW_PARAM_PFVF(RQ_END);
5612 params[4] = FW_PARAM_PFVF(PBL_START);
5613 params[5] = FW_PARAM_PFVF(PBL_END);
5614 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5618 adap->vres.stag.start = val[0];
5619 adap->vres.stag.size = val[1] - val[0] + 1;
5620 adap->vres.rq.start = val[2];
5621 adap->vres.rq.size = val[3] - val[2] + 1;
5622 adap->vres.pbl.start = val[4];
5623 adap->vres.pbl.size = val[5] - val[4] + 1;
5625 params[0] = FW_PARAM_PFVF(SQRQ_START);
5626 params[1] = FW_PARAM_PFVF(SQRQ_END);
5627 params[2] = FW_PARAM_PFVF(CQ_START);
5628 params[3] = FW_PARAM_PFVF(CQ_END);
5629 params[4] = FW_PARAM_PFVF(OCQ_START);
5630 params[5] = FW_PARAM_PFVF(OCQ_END);
5631 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5634 adap->vres.qp.start = val[0];
5635 adap->vres.qp.size = val[1] - val[0] + 1;
5636 adap->vres.cq.start = val[2];
5637 adap->vres.cq.size = val[3] - val[2] + 1;
5638 adap->vres.ocq.start = val[4];
5639 adap->vres.ocq.size = val[5] - val[4] + 1;
5641 if (caps_cmd.iscsicaps) {
5642 params[0] = FW_PARAM_PFVF(ISCSI_START);
5643 params[1] = FW_PARAM_PFVF(ISCSI_END);
5644 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5648 adap->vres.iscsi.start = val[0];
5649 adap->vres.iscsi.size = val[1] - val[0] + 1;
5651 #undef FW_PARAM_PFVF
5654 /* The MTU/MSS Table is initialized by now, so load their values. If
5655 * we're initializing the adapter, then we'll make any modifications
5656 * we want to the MTU/MSS Table and also initialize the congestion
5659 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5660 if (state != DEV_STATE_INIT) {
5663 /* The default MTU Table contains values 1492 and 1500.
5664 * However, for TCP, it's better to have two values which are
5665 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5666 * This allows us to have a TCP Data Payload which is a
5667 * multiple of 8 regardless of what combination of TCP Options
5668 * are in use (always a multiple of 4 bytes) which is
5669 * important for performance reasons. For instance, if no
5670 * options are in use, then we have a 20-byte IP header and a
5671 * 20-byte TCP header. In this case, a 1500-byte MSS would
5672 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5673 * which is not a multiple of 8. So using an MSS of 1488 in
5674 * this case results in a TCP Data Payload of 1448 bytes which
5675 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5676 * Stamps have been negotiated, then an MTU of 1500 bytes
5677 * results in a TCP Data Payload of 1448 bytes which, as
5678 * above, is a multiple of 8 bytes ...
5680 for (i = 0; i < NMTUS; i++)
5681 if (adap->params.mtus[i] == 1492) {
5682 adap->params.mtus[i] = 1488;
5686 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5687 adap->params.b_wnd);
5689 t4_init_tp_params(adap);
5690 adap->flags |= FW_OK;
5694 * Something bad happened. If a command timed out or failed with EIO
5695 * FW does not operate within its spec or something catastrophic
5696 * happened to HW/FW, stop issuing commands.
5699 if (ret != -ETIMEDOUT && ret != -EIO)
5700 t4_fw_bye(adap, adap->mbox);
5706 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5707 pci_channel_state_t state)
5710 struct adapter *adap = pci_get_drvdata(pdev);
5716 adap->flags &= ~FW_OK;
5717 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5718 spin_lock(&adap->stats_lock);
5719 for_each_port(adap, i) {
5720 struct net_device *dev = adap->port[i];
5722 netif_device_detach(dev);
5723 netif_carrier_off(dev);
5725 spin_unlock(&adap->stats_lock);
5726 if (adap->flags & FULL_INIT_DONE)
5729 if ((adap->flags & DEV_ENABLED)) {
5730 pci_disable_device(pdev);
5731 adap->flags &= ~DEV_ENABLED;
5733 out: return state == pci_channel_io_perm_failure ?
5734 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5737 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5740 struct fw_caps_config_cmd c;
5741 struct adapter *adap = pci_get_drvdata(pdev);
5744 pci_restore_state(pdev);
5745 pci_save_state(pdev);
5746 return PCI_ERS_RESULT_RECOVERED;
5749 if (!(adap->flags & DEV_ENABLED)) {
5750 if (pci_enable_device(pdev)) {
5751 dev_err(&pdev->dev, "Cannot reenable PCI "
5752 "device after reset\n");
5753 return PCI_ERS_RESULT_DISCONNECT;
5755 adap->flags |= DEV_ENABLED;
5758 pci_set_master(pdev);
5759 pci_restore_state(pdev);
5760 pci_save_state(pdev);
5761 pci_cleanup_aer_uncorrect_error_status(pdev);
5763 if (t4_wait_dev_ready(adap) < 0)
5764 return PCI_ERS_RESULT_DISCONNECT;
5765 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5766 return PCI_ERS_RESULT_DISCONNECT;
5767 adap->flags |= FW_OK;
5768 if (adap_init1(adap, &c))
5769 return PCI_ERS_RESULT_DISCONNECT;
5771 for_each_port(adap, i) {
5772 struct port_info *p = adap2pinfo(adap, i);
5774 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5777 return PCI_ERS_RESULT_DISCONNECT;
5779 p->xact_addr_filt = -1;
5782 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5783 adap->params.b_wnd);
5786 return PCI_ERS_RESULT_DISCONNECT;
5787 return PCI_ERS_RESULT_RECOVERED;
5790 static void eeh_resume(struct pci_dev *pdev)
5793 struct adapter *adap = pci_get_drvdata(pdev);
5799 for_each_port(adap, i) {
5800 struct net_device *dev = adap->port[i];
5802 if (netif_running(dev)) {
5804 cxgb_set_rxmode(dev);
5806 netif_device_attach(dev);
5811 static const struct pci_error_handlers cxgb4_eeh = {
5812 .error_detected = eeh_err_detected,
5813 .slot_reset = eeh_slot_reset,
5814 .resume = eeh_resume,
5817 static inline bool is_x_10g_port(const struct link_config *lc)
5819 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5820 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5823 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5824 unsigned int us, unsigned int cnt,
5825 unsigned int size, unsigned int iqe_size)
5828 set_rspq_intr_params(q, us, cnt);
5829 q->iqe_len = iqe_size;
5834 * Perform default configuration of DMA queues depending on the number and type
5835 * of ports we found and the number of available CPUs. Most settings can be
5836 * modified by the admin prior to actual use.
5838 static void cfg_queues(struct adapter *adap)
5840 struct sge *s = &adap->sge;
5841 int i, q10g = 0, n10g = 0, qidx = 0;
5844 for_each_port(adap, i)
5845 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5848 * We default to 1 queue per non-10G port and up to # of cores queues
5852 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5853 if (q10g > netif_get_num_default_rss_queues())
5854 q10g = netif_get_num_default_rss_queues();
5856 for_each_port(adap, i) {
5857 struct port_info *pi = adap2pinfo(adap, i);
5859 pi->first_qset = qidx;
5860 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5865 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5867 if (is_offload(adap)) {
5869 * For offload we use 1 queue/channel if all ports are up to 1G,
5870 * otherwise we divide all available queues amongst the channels
5871 * capped by the number of available cores.
5874 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5876 s->ofldqsets = roundup(i, adap->params.nports);
5878 s->ofldqsets = adap->params.nports;
5879 /* For RDMA one Rx queue per channel suffices */
5880 s->rdmaqs = adap->params.nports;
5881 s->rdmaciqs = adap->params.nports;
5884 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5885 struct sge_eth_rxq *r = &s->ethrxq[i];
5887 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5891 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5892 s->ethtxq[i].q.size = 1024;
5894 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5895 s->ctrlq[i].q.size = 512;
5897 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5898 s->ofldtxq[i].q.size = 1024;
5900 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5901 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5903 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
5904 r->rspq.uld = CXGB4_ULD_ISCSI;
5908 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5909 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5911 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
5912 r->rspq.uld = CXGB4_ULD_RDMA;
5916 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5917 if (ciq_size > SGE_MAX_IQ_SIZE) {
5918 CH_WARN(adap, "CIQ size too small for available IQs\n");
5919 ciq_size = SGE_MAX_IQ_SIZE;
5922 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5923 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5925 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
5926 r->rspq.uld = CXGB4_ULD_RDMA;
5929 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5930 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
5934 * Reduce the number of Ethernet queues across all ports to at most n.
5935 * n provides at least one queue per port.
5937 static void reduce_ethqs(struct adapter *adap, int n)
5940 struct port_info *pi;
5942 while (n < adap->sge.ethqsets)
5943 for_each_port(adap, i) {
5944 pi = adap2pinfo(adap, i);
5945 if (pi->nqsets > 1) {
5947 adap->sge.ethqsets--;
5948 if (adap->sge.ethqsets <= n)
5954 for_each_port(adap, i) {
5955 pi = adap2pinfo(adap, i);
5961 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5962 #define EXTRA_VECS 2
5964 static int enable_msix(struct adapter *adap)
5968 struct sge *s = &adap->sge;
5969 unsigned int nchan = adap->params.nports;
5970 struct msix_entry entries[MAX_INGQ + 1];
5972 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5973 entries[i].entry = i;
5975 want = s->max_ethqsets + EXTRA_VECS;
5976 if (is_offload(adap)) {
5977 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5978 /* need nchan for each possible ULD */
5979 ofld_need = 3 * nchan;
5981 need = adap->params.nports + EXTRA_VECS + ofld_need;
5983 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5988 * Distribute available vectors to the various queue groups.
5989 * Every group gets its minimum requirement and NIC gets top
5990 * priority for leftovers.
5992 i = want - EXTRA_VECS - ofld_need;
5993 if (i < s->max_ethqsets) {
5994 s->max_ethqsets = i;
5995 if (i < s->ethqsets)
5996 reduce_ethqs(adap, i);
5998 if (is_offload(adap)) {
5999 i = want - EXTRA_VECS - s->max_ethqsets;
6000 i -= ofld_need - nchan;
6001 s->ofldqsets = (i / nchan) * nchan; /* round down */
6003 for (i = 0; i < want; ++i)
6004 adap->msix_info[i].vec = entries[i].vector;
6011 static int init_rss(struct adapter *adap)
6015 for_each_port(adap, i) {
6016 struct port_info *pi = adap2pinfo(adap, i);
6018 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6021 for (j = 0; j < pi->rss_size; j++)
6022 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6027 static void print_port_info(const struct net_device *dev)
6031 const char *spd = "";
6032 const struct port_info *pi = netdev_priv(dev);
6033 const struct adapter *adap = pi->adapter;
6035 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6037 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6039 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6042 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6043 bufp += sprintf(bufp, "100/");
6044 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6045 bufp += sprintf(bufp, "1000/");
6046 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6047 bufp += sprintf(bufp, "10G/");
6048 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6049 bufp += sprintf(bufp, "40G/");
6052 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6054 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6055 adap->params.vpd.id,
6056 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6057 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6058 (adap->flags & USING_MSIX) ? " MSI-X" :
6059 (adap->flags & USING_MSI) ? " MSI" : "");
6060 netdev_info(dev, "S/N: %s, P/N: %s\n",
6061 adap->params.vpd.sn, adap->params.vpd.pn);
6064 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6066 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6070 * Free the following resources:
6071 * - memory used for tables
6074 * - resources FW is holding for us
6076 static void free_some_resources(struct adapter *adapter)
6080 t4_free_mem(adapter->l2t);
6081 t4_free_mem(adapter->tids.tid_tab);
6082 disable_msi(adapter);
6084 for_each_port(adapter, i)
6085 if (adapter->port[i]) {
6086 kfree(adap2pinfo(adapter, i)->rss);
6087 free_netdev(adapter->port[i]);
6089 if (adapter->flags & FW_OK)
6090 t4_fw_bye(adapter, adapter->fn);
6093 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6094 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6095 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6096 #define SEGMENT_SIZE 128
6098 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6100 int func, i, err, s_qpp, qpp, num_seg;
6101 struct port_info *pi;
6102 bool highdma = false;
6103 struct adapter *adapter = NULL;
6105 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6107 err = pci_request_regions(pdev, KBUILD_MODNAME);
6109 /* Just info, some other driver may have claimed the device. */
6110 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6114 /* We control everything through one PF */
6115 func = PCI_FUNC(pdev->devfn);
6116 if (func != ent->driver_data) {
6117 pci_save_state(pdev); /* to restore SR-IOV later */
6121 err = pci_enable_device(pdev);
6123 dev_err(&pdev->dev, "cannot enable PCI device\n");
6124 goto out_release_regions;
6127 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6129 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6131 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6132 "coherent allocations\n");
6133 goto out_disable_device;
6136 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6138 dev_err(&pdev->dev, "no usable DMA configuration\n");
6139 goto out_disable_device;
6143 pci_enable_pcie_error_reporting(pdev);
6144 enable_pcie_relaxed_ordering(pdev);
6145 pci_set_master(pdev);
6146 pci_save_state(pdev);
6148 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6151 goto out_disable_device;
6154 /* PCI device has been enabled */
6155 adapter->flags |= DEV_ENABLED;
6157 adapter->regs = pci_ioremap_bar(pdev, 0);
6158 if (!adapter->regs) {
6159 dev_err(&pdev->dev, "cannot map device registers\n");
6161 goto out_free_adapter;
6164 adapter->pdev = pdev;
6165 adapter->pdev_dev = &pdev->dev;
6166 adapter->mbox = func;
6168 adapter->msg_enable = dflt_msg_enable;
6169 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6171 spin_lock_init(&adapter->stats_lock);
6172 spin_lock_init(&adapter->tid_release_lock);
6174 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6175 INIT_WORK(&adapter->db_full_task, process_db_full);
6176 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6178 err = t4_prep_adapter(adapter);
6180 goto out_unmap_bar0;
6182 if (!is_t4(adapter->params.chip)) {
6183 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6184 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6185 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6186 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6188 /* Each segment size is 128B. Write coalescing is enabled only
6189 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6190 * queue is less no of segments that can be accommodated in
6193 if (qpp > num_seg) {
6195 "Incorrect number of egress queues per page\n");
6197 goto out_unmap_bar0;
6199 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6200 pci_resource_len(pdev, 2));
6201 if (!adapter->bar2) {
6202 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6204 goto out_unmap_bar0;
6208 setup_memwin(adapter);
6209 err = adap_init0(adapter);
6210 setup_memwin_rdma(adapter);
6214 for_each_port(adapter, i) {
6215 struct net_device *netdev;
6217 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6224 SET_NETDEV_DEV(netdev, &pdev->dev);
6226 adapter->port[i] = netdev;
6227 pi = netdev_priv(netdev);
6228 pi->adapter = adapter;
6229 pi->xact_addr_filt = -1;
6231 netdev->irq = pdev->irq;
6233 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6234 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6235 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6236 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6238 netdev->hw_features |= NETIF_F_HIGHDMA;
6239 netdev->features |= netdev->hw_features;
6240 netdev->vlan_features = netdev->features & VLAN_FEAT;
6242 netdev->priv_flags |= IFF_UNICAST_FLT;
6244 netdev->netdev_ops = &cxgb4_netdev_ops;
6245 netdev->ethtool_ops = &cxgb_ethtool_ops;
6248 pci_set_drvdata(pdev, adapter);
6250 if (adapter->flags & FW_OK) {
6251 err = t4_port_init(adapter, func, func, 0);
6257 * Configure queues and allocate tables now, they can be needed as
6258 * soon as the first register_netdev completes.
6260 cfg_queues(adapter);
6262 adapter->l2t = t4_init_l2t();
6263 if (!adapter->l2t) {
6264 /* We tolerate a lack of L2T, giving up some functionality */
6265 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6266 adapter->params.offload = 0;
6269 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6270 dev_warn(&pdev->dev, "could not allocate TID table, "
6272 adapter->params.offload = 0;
6275 /* See what interrupts we'll be using */
6276 if (msi > 1 && enable_msix(adapter) == 0)
6277 adapter->flags |= USING_MSIX;
6278 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6279 adapter->flags |= USING_MSI;
6281 err = init_rss(adapter);
6286 * The card is now ready to go. If any errors occur during device
6287 * registration we do not fail the whole card but rather proceed only
6288 * with the ports we manage to register successfully. However we must
6289 * register at least one net device.
6291 for_each_port(adapter, i) {
6292 pi = adap2pinfo(adapter, i);
6293 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6294 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6296 err = register_netdev(adapter->port[i]);
6299 adapter->chan_map[pi->tx_chan] = i;
6300 print_port_info(adapter->port[i]);
6303 dev_err(&pdev->dev, "could not register any net devices\n");
6307 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6311 if (cxgb4_debugfs_root) {
6312 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6313 cxgb4_debugfs_root);
6314 setup_debugfs(adapter);
6317 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6318 pdev->needs_freset = 1;
6320 if (is_offload(adapter))
6321 attach_ulds(adapter);
6324 #ifdef CONFIG_PCI_IOV
6325 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6326 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6327 dev_info(&pdev->dev,
6328 "instantiated %u virtual functions\n",
6334 free_some_resources(adapter);
6336 if (!is_t4(adapter->params.chip))
6337 iounmap(adapter->bar2);
6339 iounmap(adapter->regs);
6343 pci_disable_pcie_error_reporting(pdev);
6344 pci_disable_device(pdev);
6345 out_release_regions:
6346 pci_release_regions(pdev);
6350 static void remove_one(struct pci_dev *pdev)
6352 struct adapter *adapter = pci_get_drvdata(pdev);
6354 #ifdef CONFIG_PCI_IOV
6355 pci_disable_sriov(pdev);
6362 if (is_offload(adapter))
6363 detach_ulds(adapter);
6365 for_each_port(adapter, i)
6366 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6367 unregister_netdev(adapter->port[i]);
6369 if (adapter->debugfs_root)
6370 debugfs_remove_recursive(adapter->debugfs_root);
6372 /* If we allocated filters, free up state associated with any
6375 if (adapter->tids.ftid_tab) {
6376 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6377 for (i = 0; i < (adapter->tids.nftids +
6378 adapter->tids.nsftids); i++, f++)
6380 clear_filter(adapter, f);
6383 if (adapter->flags & FULL_INIT_DONE)
6386 free_some_resources(adapter);
6387 iounmap(adapter->regs);
6388 if (!is_t4(adapter->params.chip))
6389 iounmap(adapter->bar2);
6390 pci_disable_pcie_error_reporting(pdev);
6391 if ((adapter->flags & DEV_ENABLED)) {
6392 pci_disable_device(pdev);
6393 adapter->flags &= ~DEV_ENABLED;
6395 pci_release_regions(pdev);
6399 pci_release_regions(pdev);
6402 static struct pci_driver cxgb4_driver = {
6403 .name = KBUILD_MODNAME,
6404 .id_table = cxgb4_pci_tbl,
6406 .remove = remove_one,
6407 .shutdown = remove_one,
6408 .err_handler = &cxgb4_eeh,
6411 static int __init cxgb4_init_module(void)
6415 workq = create_singlethread_workqueue("cxgb4");
6419 /* Debugfs support is optional, just warn if this fails */
6420 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6421 if (!cxgb4_debugfs_root)
6422 pr_warn("could not create debugfs entry, continuing\n");
6424 ret = pci_register_driver(&cxgb4_driver);
6426 debugfs_remove(cxgb4_debugfs_root);
6427 destroy_workqueue(workq);
6430 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6435 static void __exit cxgb4_cleanup_module(void)
6437 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6438 pci_unregister_driver(&cxgb4_driver);
6439 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6440 flush_workqueue(workq);
6441 destroy_workqueue(workq);
6444 module_init(cxgb4_init_module);
6445 module_exit(cxgb4_cleanup_module);