2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
72 #include <../drivers/net/bonding/bonding.h>
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
85 #define MAX_SGE_TIMERVAL 200U
89 * Physical Function provisioning constants.
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 #ifdef CONFIG_PCI_IOV
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
134 static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
137 unsigned int portn, portvec;
140 * Give PF's access to all of the ports.
143 return FW_PFVF_CMD_PMASK_MASK;
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
152 if (adapter->params.nports == 0)
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
184 /* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
190 struct filter_entry {
191 /* Administrative fields for filter.
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 struct ch_filter_specification fs;
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
215 CH_DEVICE(0xa000, 0), /* PE10K */
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
259 CH_DEVICE(0x5080, 4),
260 CH_DEVICE(0x5081, 4),
261 CH_DEVICE(0x5082, 4),
262 CH_DEVICE(0x5083, 4),
263 CH_DEVICE(0x5084, 4),
264 CH_DEVICE(0x5085, 4),
265 CH_DEVICE(0x5401, 4),
266 CH_DEVICE(0x5402, 4),
267 CH_DEVICE(0x5403, 4),
268 CH_DEVICE(0x5404, 4),
269 CH_DEVICE(0x5405, 4),
270 CH_DEVICE(0x5406, 4),
271 CH_DEVICE(0x5407, 4),
272 CH_DEVICE(0x5408, 4),
273 CH_DEVICE(0x5409, 4),
274 CH_DEVICE(0x540A, 4),
275 CH_DEVICE(0x540B, 4),
276 CH_DEVICE(0x540C, 4),
277 CH_DEVICE(0x540D, 4),
278 CH_DEVICE(0x540E, 4),
279 CH_DEVICE(0x540F, 4),
280 CH_DEVICE(0x5410, 4),
281 CH_DEVICE(0x5411, 4),
282 CH_DEVICE(0x5412, 4),
283 CH_DEVICE(0x5413, 4),
284 CH_DEVICE(0x5414, 4),
285 CH_DEVICE(0x5415, 4),
286 CH_DEVICE(0x5480, 4),
287 CH_DEVICE(0x5481, 4),
288 CH_DEVICE(0x5482, 4),
289 CH_DEVICE(0x5483, 4),
290 CH_DEVICE(0x5484, 4),
291 CH_DEVICE(0x5485, 4),
295 #define FW4_FNAME "cxgb4/t4fw.bin"
296 #define FW5_FNAME "cxgb4/t5fw.bin"
297 #define FW4_CFNAME "cxgb4/t4-config.txt"
298 #define FW5_CFNAME "cxgb4/t5-config.txt"
300 MODULE_DESCRIPTION(DRV_DESC);
301 MODULE_AUTHOR("Chelsio Communications");
302 MODULE_LICENSE("Dual BSD/GPL");
303 MODULE_VERSION(DRV_VERSION);
304 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
305 MODULE_FIRMWARE(FW4_FNAME);
306 MODULE_FIRMWARE(FW5_FNAME);
309 * Normally we're willing to become the firmware's Master PF but will be happy
310 * if another PF has already become the Master and initialized the adapter.
311 * Setting "force_init" will cause this driver to forcibly establish itself as
312 * the Master PF and initialize the adapter.
314 static uint force_init;
316 module_param(force_init, uint, 0644);
317 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
320 * Normally if the firmware we connect to has Configuration File support, we
321 * use that and only fall back to the old Driver-based initialization if the
322 * Configuration File fails for some reason. If force_old_init is set, then
323 * we'll always use the old Driver-based initialization sequence.
325 static uint force_old_init;
327 module_param(force_old_init, uint, 0644);
328 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
330 static int dflt_msg_enable = DFLT_MSG_ENABLE;
332 module_param(dflt_msg_enable, int, 0644);
333 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
336 * The driver uses the best interrupt scheme available on a platform in the
337 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
338 * of these schemes the driver may consider as follows:
340 * msi = 2: choose from among all three options
341 * msi = 1: only consider MSI and INTx interrupts
342 * msi = 0: force INTx interrupts
346 module_param(msi, int, 0644);
347 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
350 * Queue interrupt hold-off timer values. Queues default to the first of these
353 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
355 module_param_array(intr_holdoff, uint, NULL, 0644);
356 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
357 "0..4 in microseconds");
359 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
361 module_param_array(intr_cnt, uint, NULL, 0644);
362 MODULE_PARM_DESC(intr_cnt,
363 "thresholds 1..3 for queue interrupt packet counters");
366 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
367 * offset by 2 bytes in order to have the IP headers line up on 4-byte
368 * boundaries. This is a requirement for many architectures which will throw
369 * a machine check fault if an attempt is made to access one of the 4-byte IP
370 * header fields on a non-4-byte boundary. And it's a major performance issue
371 * even on some architectures which allow it like some implementations of the
372 * x86 ISA. However, some architectures don't mind this and for some very
373 * edge-case performance sensitive applications (like forwarding large volumes
374 * of small packets), setting this DMA offset to 0 will decrease the number of
375 * PCI-E Bus transfers enough to measurably affect performance.
377 static int rx_dma_offset = 2;
381 #ifdef CONFIG_PCI_IOV
382 module_param(vf_acls, bool, 0644);
383 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
385 /* Configure the number of PCI-E Virtual Function which are to be instantiated
386 * on SR-IOV Capable Physical Functions.
388 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
390 module_param_array(num_vf, uint, NULL, 0644);
391 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
395 * The filter TCAM has a fixed portion and a variable portion. The fixed
396 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
397 * ports. The variable portion is 36 bits which can include things like Exact
398 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
399 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
400 * far exceed the 36-bit budget for this "compressed" header portion of the
401 * filter. Thus, we have a scarce resource which must be carefully managed.
403 * By default we set this up to mostly match the set of filter matching
404 * capabilities of T3 but with accommodations for some of T4's more
405 * interesting features:
407 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
408 * [Inner] VLAN (17), Port (3), FCoE (1) }
411 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
412 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
413 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
416 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
418 module_param(tp_vlan_pri_map, uint, 0644);
419 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
421 static struct dentry *cxgb4_debugfs_root;
423 static LIST_HEAD(adapter_list);
424 static DEFINE_MUTEX(uld_mutex);
425 /* Adapter list to be accessed from atomic context */
426 static LIST_HEAD(adap_rcu_list);
427 static DEFINE_SPINLOCK(adap_rcu_lock);
428 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
429 static const char *uld_str[] = { "RDMA", "iSCSI" };
431 static void link_report(struct net_device *dev)
433 if (!netif_carrier_ok(dev))
434 netdev_info(dev, "link down\n");
436 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
438 const char *s = "10Mbps";
439 const struct port_info *p = netdev_priv(dev);
441 switch (p->link_cfg.speed) {
456 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
461 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
463 struct net_device *dev = adapter->port[port_id];
465 /* Skip changes from disabled ports. */
466 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
468 netif_carrier_on(dev);
470 netif_carrier_off(dev);
476 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
478 static const char *mod_str[] = {
479 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
482 const struct net_device *dev = adap->port[port_id];
483 const struct port_info *pi = netdev_priv(dev);
485 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
486 netdev_info(dev, "port module unplugged\n");
487 else if (pi->mod_type < ARRAY_SIZE(mod_str))
488 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
492 * Configure the exact and hash address filters to handle a port's multicast
493 * and secondary unicast MAC addresses.
495 static int set_addr_filters(const struct net_device *dev, bool sleep)
503 const struct netdev_hw_addr *ha;
504 int uc_cnt = netdev_uc_count(dev);
505 int mc_cnt = netdev_mc_count(dev);
506 const struct port_info *pi = netdev_priv(dev);
507 unsigned int mb = pi->adapter->fn;
509 /* first do the secondary unicast addresses */
510 netdev_for_each_uc_addr(ha, dev) {
511 addr[naddr++] = ha->addr;
512 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
513 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
514 naddr, addr, filt_idx, &uhash, sleep);
523 /* next set up the multicast addresses */
524 netdev_for_each_mc_addr(ha, dev) {
525 addr[naddr++] = ha->addr;
526 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
527 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
528 naddr, addr, filt_idx, &mhash, sleep);
537 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
538 uhash | mhash, sleep);
541 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
542 module_param(dbfifo_int_thresh, int, 0644);
543 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
546 * usecs to sleep while draining the dbfifo
548 static int dbfifo_drain_delay = 1000;
549 module_param(dbfifo_drain_delay, int, 0644);
550 MODULE_PARM_DESC(dbfifo_drain_delay,
551 "usecs to sleep while draining the dbfifo");
554 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
555 * If @mtu is -1 it is left unchanged.
557 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
560 struct port_info *pi = netdev_priv(dev);
562 ret = set_addr_filters(dev, sleep_ok);
564 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
565 (dev->flags & IFF_PROMISC) ? 1 : 0,
566 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
571 static struct workqueue_struct *workq;
574 * link_start - enable a port
575 * @dev: the port to enable
577 * Performs the MAC and PHY actions needed to enable a port.
579 static int link_start(struct net_device *dev)
582 struct port_info *pi = netdev_priv(dev);
583 unsigned int mb = pi->adapter->fn;
586 * We do not set address filters and promiscuity here, the stack does
587 * that step explicitly.
589 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
590 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
592 ret = t4_change_mac(pi->adapter, mb, pi->viid,
593 pi->xact_addr_filt, dev->dev_addr, true,
596 pi->xact_addr_filt = ret;
601 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
604 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
608 /* Clear a filter and release any of its resources that we own. This also
609 * clears the filter's "pending" status.
611 static void clear_filter(struct adapter *adap, struct filter_entry *f)
613 /* If the new or old filter have loopback rewriteing rules then we'll
614 * need to free any existing Layer Two Table (L2T) entries of the old
615 * filter rule. The firmware will handle freeing up any Source MAC
616 * Table (SMT) entries used for rewriting Source MAC Addresses in
620 cxgb4_l2t_release(f->l2t);
622 /* The zeroing of the filter rule below clears the filter valid,
623 * pending, locked flags, l2t pointer, etc. so it's all we need for
626 memset(f, 0, sizeof(*f));
629 /* Handle a filter write/deletion reply.
631 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
633 unsigned int idx = GET_TID(rpl);
634 unsigned int nidx = idx - adap->tids.ftid_base;
636 struct filter_entry *f;
638 if (idx >= adap->tids.ftid_base && nidx <
639 (adap->tids.nftids + adap->tids.nsftids)) {
641 ret = GET_TCB_COOKIE(rpl->cookie);
642 f = &adap->tids.ftid_tab[idx];
644 if (ret == FW_FILTER_WR_FLT_DELETED) {
645 /* Clear the filter when we get confirmation from the
646 * hardware that the filter has been deleted.
648 clear_filter(adap, f);
649 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
650 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
652 clear_filter(adap, f);
653 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
654 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
655 f->pending = 0; /* asynchronous setup completed */
658 /* Something went wrong. Issue a warning about the
659 * problem and clear everything out.
661 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
663 clear_filter(adap, f);
668 /* Response queue handler for the FW event queue.
670 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
671 const struct pkt_gl *gl)
673 u8 opcode = ((const struct rss_header *)rsp)->opcode;
675 rsp++; /* skip RSS header */
677 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
679 if (unlikely(opcode == CPL_FW4_MSG &&
680 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
682 opcode = ((const struct rss_header *)rsp)->opcode;
684 if (opcode != CPL_SGE_EGR_UPDATE) {
685 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
691 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
692 const struct cpl_sge_egr_update *p = (void *)rsp;
693 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
696 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
698 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
699 struct sge_eth_txq *eq;
701 eq = container_of(txq, struct sge_eth_txq, q);
702 netif_tx_wake_queue(eq->txq);
704 struct sge_ofld_txq *oq;
706 oq = container_of(txq, struct sge_ofld_txq, q);
707 tasklet_schedule(&oq->qresume_tsk);
709 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
710 const struct cpl_fw6_msg *p = (void *)rsp;
713 t4_handle_fw_rpl(q->adap, p->data);
714 } else if (opcode == CPL_L2T_WRITE_RPL) {
715 const struct cpl_l2t_write_rpl *p = (void *)rsp;
717 do_l2t_write_rpl(q->adap, p);
718 } else if (opcode == CPL_SET_TCB_RPL) {
719 const struct cpl_set_tcb_rpl *p = (void *)rsp;
721 filter_rpl(q->adap, p);
723 dev_err(q->adap->pdev_dev,
724 "unexpected CPL %#x on FW event queue\n", opcode);
730 * uldrx_handler - response queue handler for ULD queues
731 * @q: the response queue that received the packet
732 * @rsp: the response queue descriptor holding the offload message
733 * @gl: the gather list of packet fragments
735 * Deliver an ingress offload packet to a ULD. All processing is done by
736 * the ULD, we just maintain statistics.
738 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
739 const struct pkt_gl *gl)
741 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
743 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
745 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
746 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
749 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
755 else if (gl == CXGB4_MSG_AN)
762 static void disable_msi(struct adapter *adapter)
764 if (adapter->flags & USING_MSIX) {
765 pci_disable_msix(adapter->pdev);
766 adapter->flags &= ~USING_MSIX;
767 } else if (adapter->flags & USING_MSI) {
768 pci_disable_msi(adapter->pdev);
769 adapter->flags &= ~USING_MSI;
774 * Interrupt handler for non-data events used with MSI-X.
776 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
778 struct adapter *adap = cookie;
780 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
783 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
785 t4_slow_intr_handler(adap);
790 * Name the MSI-X interrupts.
792 static void name_msix_vecs(struct adapter *adap)
794 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
796 /* non-data interrupts */
797 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
800 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
801 adap->port[0]->name);
803 /* Ethernet queues */
804 for_each_port(adap, j) {
805 struct net_device *d = adap->port[j];
806 const struct port_info *pi = netdev_priv(d);
808 for (i = 0; i < pi->nqsets; i++, msi_idx++)
809 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
814 for_each_ofldrxq(&adap->sge, i)
815 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
816 adap->port[0]->name, i);
818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i);
823 static int request_msix_queue_irqs(struct adapter *adap)
825 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq);
833 for_each_ethrxq(s, ethqidx) {
834 err = request_irq(adap->msix_info[msi_index].vec,
836 adap->msix_info[msi_index].desc,
837 &s->ethrxq[ethqidx].rspq);
842 for_each_ofldrxq(s, ofldqidx) {
843 err = request_irq(adap->msix_info[msi_index].vec,
845 adap->msix_info[msi_index].desc,
846 &s->ofldrxq[ofldqidx].rspq);
851 for_each_rdmarxq(s, rdmaqidx) {
852 err = request_irq(adap->msix_info[msi_index].vec,
854 adap->msix_info[msi_index].desc,
855 &s->rdmarxq[rdmaqidx].rspq);
863 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq);
866 while (--ofldqidx >= 0)
867 free_irq(adap->msix_info[--msi_index].vec,
868 &s->ofldrxq[ofldqidx].rspq);
869 while (--ethqidx >= 0)
870 free_irq(adap->msix_info[--msi_index].vec,
871 &s->ethrxq[ethqidx].rspq);
872 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
876 static void free_msix_queue_irqs(struct adapter *adap)
878 int i, msi_index = 2;
879 struct sge *s = &adap->sge;
881 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
882 for_each_ethrxq(s, i)
883 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
884 for_each_ofldrxq(s, i)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
891 * write_rss - write the RSS table for a given port
893 * @queues: array of queue indices for RSS
895 * Sets up the portion of the HW RSS table for the port's VI to distribute
896 * packets to the Rx queues in @queues.
898 static int write_rss(const struct port_info *pi, const u16 *queues)
902 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
904 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
908 /* map the queue indices to queue ids */
909 for (i = 0; i < pi->rss_size; i++, queues++)
910 rss[i] = q[*queues].rspq.abs_id;
912 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
913 pi->rss_size, rss, pi->rss_size);
919 * setup_rss - configure RSS
922 * Sets up RSS for each port.
924 static int setup_rss(struct adapter *adap)
928 for_each_port(adap, i) {
929 const struct port_info *pi = adap2pinfo(adap, i);
931 err = write_rss(pi, pi->rss);
939 * Return the channel of the ingress queue with the given qid.
941 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
943 qid -= p->ingr_start;
944 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
948 * Wait until all NAPI handlers are descheduled.
950 static void quiesce_rx(struct adapter *adap)
954 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
955 struct sge_rspq *q = adap->sge.ingr_map[i];
958 napi_disable(&q->napi);
963 * Enable NAPI scheduling and interrupt generation for all Rx queues.
965 static void enable_rx(struct adapter *adap)
969 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
970 struct sge_rspq *q = adap->sge.ingr_map[i];
975 napi_enable(&q->napi);
976 /* 0-increment GTS to start the timer and enable interrupts */
977 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
978 SEINTARM(q->intr_params) |
979 INGRESSQID(q->cntxt_id));
984 * setup_sge_queues - configure SGE Tx/Rx/response queues
987 * Determines how many sets of SGE queues to use and initializes them.
988 * We support multiple queue sets per port if we have MSI-X, otherwise
989 * just one queue set per port.
991 static int setup_sge_queues(struct adapter *adap)
993 int err, msi_idx, i, j;
994 struct sge *s = &adap->sge;
996 bitmap_zero(s->starving_fl, MAX_EGRQ);
997 bitmap_zero(s->txq_maperr, MAX_EGRQ);
999 if (adap->flags & USING_MSIX)
1000 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1002 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1006 msi_idx = -((int)s->intrq.abs_id + 1);
1009 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1010 msi_idx, NULL, fwevtq_handler);
1012 freeout: t4_free_sge_resources(adap);
1016 for_each_port(adap, i) {
1017 struct net_device *dev = adap->port[i];
1018 struct port_info *pi = netdev_priv(dev);
1019 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1020 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1022 for (j = 0; j < pi->nqsets; j++, q++) {
1025 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1031 memset(&q->stats, 0, sizeof(q->stats));
1033 for (j = 0; j < pi->nqsets; j++, t++) {
1034 err = t4_sge_alloc_eth_txq(adap, t, dev,
1035 netdev_get_tx_queue(dev, j),
1036 s->fw_evtq.cntxt_id);
1042 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1043 for_each_ofldrxq(s, i) {
1044 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1045 struct net_device *dev = adap->port[i / j];
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler);
1053 memset(&q->stats, 0, sizeof(q->stats));
1054 s->ofld_rxq[i] = q->rspq.abs_id;
1055 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1056 s->fw_evtq.cntxt_id);
1061 for_each_rdmarxq(s, i) {
1062 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler);
1070 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id;
1074 for_each_port(adap, i) {
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1077 * have RDMA queues, and that's the right value.
1079 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1080 s->fw_evtq.cntxt_id,
1081 s->rdmarxq[i].rspq.cntxt_id);
1086 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1087 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1088 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1093 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1094 * The allocated memory is cleared.
1096 void *t4_alloc_mem(size_t size)
1098 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1106 * Free memory allocated through alloc_mem().
1108 static void t4_free_mem(void *addr)
1110 if (is_vmalloc_addr(addr))
1116 /* Send a Work Request to write the filter at a specified index. We construct
1117 * a Firmware Filter Work Request to have the work done and put the indicated
1118 * filter into "pending" mode which will prevent any further actions against
1119 * it till we get a reply from the firmware on the completion status of the
1122 static int set_filter_wr(struct adapter *adapter, int fidx)
1124 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1125 struct sk_buff *skb;
1126 struct fw_filter_wr *fwr;
1129 /* If the new filter requires loopback Destination MAC and/or VLAN
1130 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1133 if (f->fs.newdmac || f->fs.newvlan) {
1134 /* allocate L2T entry for new filter */
1135 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1138 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1139 f->fs.eport, f->fs.dmac)) {
1140 cxgb4_l2t_release(f->l2t);
1146 ftid = adapter->tids.ftid_base + fidx;
1148 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1149 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1150 memset(fwr, 0, sizeof(*fwr));
1152 /* It would be nice to put most of the following in t4_hw.c but most
1153 * of the work is translating the cxgbtool ch_filter_specification
1154 * into the Work Request and the definition of that structure is
1155 * currently in cxgbtool.h which isn't appropriate to pull into the
1156 * common code. We may eventually try to come up with a more neutral
1157 * filter specification structure but for now it's easiest to simply
1158 * put this fairly direct code in line ...
1160 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1161 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1163 htonl(V_FW_FILTER_WR_TID(ftid) |
1164 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1165 V_FW_FILTER_WR_NOREPLY(0) |
1166 V_FW_FILTER_WR_IQ(f->fs.iq));
1167 fwr->del_filter_to_l2tix =
1168 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1169 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1170 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1171 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1172 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1173 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1174 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1175 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1176 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1177 f->fs.newvlan == VLAN_REWRITE) |
1178 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1179 f->fs.newvlan == VLAN_REWRITE) |
1180 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1181 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1182 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1183 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1184 fwr->ethtype = htons(f->fs.val.ethtype);
1185 fwr->ethtypem = htons(f->fs.mask.ethtype);
1186 fwr->frag_to_ovlan_vldm =
1187 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1188 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1189 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1190 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1191 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1192 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1194 fwr->rx_chan_rx_rpl_iq =
1195 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1196 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1197 fwr->maci_to_matchtypem =
1198 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1199 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1200 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1201 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1202 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1203 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1204 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1205 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1206 fwr->ptcl = f->fs.val.proto;
1207 fwr->ptclm = f->fs.mask.proto;
1208 fwr->ttyp = f->fs.val.tos;
1209 fwr->ttypm = f->fs.mask.tos;
1210 fwr->ivlan = htons(f->fs.val.ivlan);
1211 fwr->ivlanm = htons(f->fs.mask.ivlan);
1212 fwr->ovlan = htons(f->fs.val.ovlan);
1213 fwr->ovlanm = htons(f->fs.mask.ovlan);
1214 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1215 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1216 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1217 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1218 fwr->lp = htons(f->fs.val.lport);
1219 fwr->lpm = htons(f->fs.mask.lport);
1220 fwr->fp = htons(f->fs.val.fport);
1221 fwr->fpm = htons(f->fs.mask.fport);
1223 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1225 /* Mark the filter as "pending" and ship off the Filter Work Request.
1226 * When we get the Work Request Reply we'll clear the pending status.
1229 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1230 t4_ofld_send(adapter, skb);
1234 /* Delete the filter at a specified index.
1236 static int del_filter_wr(struct adapter *adapter, int fidx)
1238 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1239 struct sk_buff *skb;
1240 struct fw_filter_wr *fwr;
1241 unsigned int len, ftid;
1244 ftid = adapter->tids.ftid_base + fidx;
1246 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1247 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1248 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1250 /* Mark the filter as "pending" and ship off the Filter Work Request.
1251 * When we get the Work Request Reply we'll clear the pending status.
1254 t4_mgmt_tx(adapter, skb);
1258 static inline int is_offload(const struct adapter *adap)
1260 return adap->params.offload;
1264 * Implementation of ethtool operations.
1267 static u32 get_msglevel(struct net_device *dev)
1269 return netdev2adap(dev)->msg_enable;
1272 static void set_msglevel(struct net_device *dev, u32 val)
1274 netdev2adap(dev)->msg_enable = val;
1277 static char stats_strings[][ETH_GSTRING_LEN] = {
1280 "TxBroadcastFrames ",
1281 "TxMulticastFrames ",
1287 "TxFrames128To255 ",
1288 "TxFrames256To511 ",
1289 "TxFrames512To1023 ",
1290 "TxFrames1024To1518 ",
1291 "TxFrames1519ToMax ",
1306 "RxBroadcastFrames ",
1307 "RxMulticastFrames ",
1319 "RxFrames128To255 ",
1320 "RxFrames256To511 ",
1321 "RxFrames512To1023 ",
1322 "RxFrames1024To1518 ",
1323 "RxFrames1519ToMax ",
1335 "RxBG0FramesDropped ",
1336 "RxBG1FramesDropped ",
1337 "RxBG2FramesDropped ",
1338 "RxBG3FramesDropped ",
1339 "RxBG0FramesTrunc ",
1340 "RxBG1FramesTrunc ",
1341 "RxBG2FramesTrunc ",
1342 "RxBG3FramesTrunc ",
1351 "WriteCoalSuccess ",
1355 static int get_sset_count(struct net_device *dev, int sset)
1359 return ARRAY_SIZE(stats_strings);
1365 #define T4_REGMAP_SIZE (160 * 1024)
1366 #define T5_REGMAP_SIZE (332 * 1024)
1368 static int get_regs_len(struct net_device *dev)
1370 struct adapter *adap = netdev2adap(dev);
1371 if (is_t4(adap->params.chip))
1372 return T4_REGMAP_SIZE;
1374 return T5_REGMAP_SIZE;
1377 static int get_eeprom_len(struct net_device *dev)
1382 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1384 struct adapter *adapter = netdev2adap(dev);
1386 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1387 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1388 strlcpy(info->bus_info, pci_name(adapter->pdev),
1389 sizeof(info->bus_info));
1391 if (adapter->params.fw_vers)
1392 snprintf(info->fw_version, sizeof(info->fw_version),
1393 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1394 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1395 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1396 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1397 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1398 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1399 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1400 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1401 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1404 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1406 if (stringset == ETH_SS_STATS)
1407 memcpy(data, stats_strings, sizeof(stats_strings));
1411 * port stats maintained per queue of the port. They should be in the same
1412 * order as in stats_strings above.
1414 struct queue_port_stats {
1424 static void collect_sge_port_stats(const struct adapter *adap,
1425 const struct port_info *p, struct queue_port_stats *s)
1428 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1429 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1431 memset(s, 0, sizeof(*s));
1432 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1434 s->tx_csum += tx->tx_cso;
1435 s->rx_csum += rx->stats.rx_cso;
1436 s->vlan_ex += rx->stats.vlan_ex;
1437 s->vlan_ins += tx->vlan_ins;
1438 s->gro_pkts += rx->stats.lro_pkts;
1439 s->gro_merged += rx->stats.lro_merged;
1443 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1446 struct port_info *pi = netdev_priv(dev);
1447 struct adapter *adapter = pi->adapter;
1450 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1452 data += sizeof(struct port_stats) / sizeof(u64);
1453 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1454 data += sizeof(struct queue_port_stats) / sizeof(u64);
1455 if (!is_t4(adapter->params.chip)) {
1456 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1457 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1458 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1459 *data = val1 - val2;
1464 memset(data, 0, 2 * sizeof(u64));
1470 * Return a version number to identify the type of adapter. The scheme is:
1471 * - bits 0..9: chip version
1472 * - bits 10..15: chip revision
1473 * - bits 16..23: register dump version
1475 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1477 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1478 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1481 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1484 u32 *p = buf + start;
1486 for ( ; start <= end; start += sizeof(u32))
1487 *p++ = t4_read_reg(ap, start);
1490 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1493 static const unsigned int t4_reg_ranges[] = {
1713 static const unsigned int t5_reg_ranges[] = {
2141 struct adapter *ap = netdev2adap(dev);
2142 static const unsigned int *reg_ranges;
2143 int arr_size = 0, buf_size = 0;
2145 if (is_t4(ap->params.chip)) {
2146 reg_ranges = &t4_reg_ranges[0];
2147 arr_size = ARRAY_SIZE(t4_reg_ranges);
2148 buf_size = T4_REGMAP_SIZE;
2150 reg_ranges = &t5_reg_ranges[0];
2151 arr_size = ARRAY_SIZE(t5_reg_ranges);
2152 buf_size = T5_REGMAP_SIZE;
2155 regs->version = mk_adap_vers(ap);
2157 memset(buf, 0, buf_size);
2158 for (i = 0; i < arr_size; i += 2)
2159 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2162 static int restart_autoneg(struct net_device *dev)
2164 struct port_info *p = netdev_priv(dev);
2166 if (!netif_running(dev))
2168 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2170 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2174 static int identify_port(struct net_device *dev,
2175 enum ethtool_phys_id_state state)
2178 struct adapter *adap = netdev2adap(dev);
2180 if (state == ETHTOOL_ID_ACTIVE)
2182 else if (state == ETHTOOL_ID_INACTIVE)
2187 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2190 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2194 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2195 type == FW_PORT_TYPE_BT_XAUI) {
2197 if (caps & FW_PORT_CAP_SPEED_100M)
2198 v |= SUPPORTED_100baseT_Full;
2199 if (caps & FW_PORT_CAP_SPEED_1G)
2200 v |= SUPPORTED_1000baseT_Full;
2201 if (caps & FW_PORT_CAP_SPEED_10G)
2202 v |= SUPPORTED_10000baseT_Full;
2203 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2204 v |= SUPPORTED_Backplane;
2205 if (caps & FW_PORT_CAP_SPEED_1G)
2206 v |= SUPPORTED_1000baseKX_Full;
2207 if (caps & FW_PORT_CAP_SPEED_10G)
2208 v |= SUPPORTED_10000baseKX4_Full;
2209 } else if (type == FW_PORT_TYPE_KR)
2210 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2211 else if (type == FW_PORT_TYPE_BP_AP)
2212 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2213 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2214 else if (type == FW_PORT_TYPE_BP4_AP)
2215 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2216 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2217 SUPPORTED_10000baseKX4_Full;
2218 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2219 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2220 v |= SUPPORTED_FIBRE;
2221 else if (type == FW_PORT_TYPE_BP40_BA)
2222 v |= SUPPORTED_40000baseSR4_Full;
2224 if (caps & FW_PORT_CAP_ANEG)
2225 v |= SUPPORTED_Autoneg;
2229 static unsigned int to_fw_linkcaps(unsigned int caps)
2233 if (caps & ADVERTISED_100baseT_Full)
2234 v |= FW_PORT_CAP_SPEED_100M;
2235 if (caps & ADVERTISED_1000baseT_Full)
2236 v |= FW_PORT_CAP_SPEED_1G;
2237 if (caps & ADVERTISED_10000baseT_Full)
2238 v |= FW_PORT_CAP_SPEED_10G;
2239 if (caps & ADVERTISED_40000baseSR4_Full)
2240 v |= FW_PORT_CAP_SPEED_40G;
2244 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2246 const struct port_info *p = netdev_priv(dev);
2248 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2249 p->port_type == FW_PORT_TYPE_BT_XFI ||
2250 p->port_type == FW_PORT_TYPE_BT_XAUI)
2251 cmd->port = PORT_TP;
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2254 cmd->port = PORT_FIBRE;
2255 else if (p->port_type == FW_PORT_TYPE_SFP) {
2256 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2257 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2258 cmd->port = PORT_DA;
2260 cmd->port = PORT_FIBRE;
2262 cmd->port = PORT_OTHER;
2264 if (p->mdio_addr >= 0) {
2265 cmd->phy_address = p->mdio_addr;
2266 cmd->transceiver = XCVR_EXTERNAL;
2267 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2268 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2270 cmd->phy_address = 0; /* not really, but no better option */
2271 cmd->transceiver = XCVR_INTERNAL;
2272 cmd->mdio_support = 0;
2275 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2276 cmd->advertising = from_fw_linkcaps(p->port_type,
2277 p->link_cfg.advertising);
2278 ethtool_cmd_speed_set(cmd,
2279 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2280 cmd->duplex = DUPLEX_FULL;
2281 cmd->autoneg = p->link_cfg.autoneg;
2287 static unsigned int speed_to_caps(int speed)
2290 return FW_PORT_CAP_SPEED_100M;
2292 return FW_PORT_CAP_SPEED_1G;
2294 return FW_PORT_CAP_SPEED_10G;
2296 return FW_PORT_CAP_SPEED_40G;
2300 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2303 struct port_info *p = netdev_priv(dev);
2304 struct link_config *lc = &p->link_cfg;
2305 u32 speed = ethtool_cmd_speed(cmd);
2307 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2310 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2312 * PHY offers a single speed. See if that's what's
2315 if (cmd->autoneg == AUTONEG_DISABLE &&
2316 (lc->supported & speed_to_caps(speed)))
2321 if (cmd->autoneg == AUTONEG_DISABLE) {
2322 cap = speed_to_caps(speed);
2324 if (!(lc->supported & cap) ||
2329 lc->requested_speed = cap;
2330 lc->advertising = 0;
2332 cap = to_fw_linkcaps(cmd->advertising);
2333 if (!(lc->supported & cap))
2335 lc->requested_speed = 0;
2336 lc->advertising = cap | FW_PORT_CAP_ANEG;
2338 lc->autoneg = cmd->autoneg;
2340 if (netif_running(dev))
2341 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2346 static void get_pauseparam(struct net_device *dev,
2347 struct ethtool_pauseparam *epause)
2349 struct port_info *p = netdev_priv(dev);
2351 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2352 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2353 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2356 static int set_pauseparam(struct net_device *dev,
2357 struct ethtool_pauseparam *epause)
2359 struct port_info *p = netdev_priv(dev);
2360 struct link_config *lc = &p->link_cfg;
2362 if (epause->autoneg == AUTONEG_DISABLE)
2363 lc->requested_fc = 0;
2364 else if (lc->supported & FW_PORT_CAP_ANEG)
2365 lc->requested_fc = PAUSE_AUTONEG;
2369 if (epause->rx_pause)
2370 lc->requested_fc |= PAUSE_RX;
2371 if (epause->tx_pause)
2372 lc->requested_fc |= PAUSE_TX;
2373 if (netif_running(dev))
2374 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2379 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2381 const struct port_info *pi = netdev_priv(dev);
2382 const struct sge *s = &pi->adapter->sge;
2384 e->rx_max_pending = MAX_RX_BUFFERS;
2385 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2386 e->rx_jumbo_max_pending = 0;
2387 e->tx_max_pending = MAX_TXQ_ENTRIES;
2389 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2390 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2391 e->rx_jumbo_pending = 0;
2392 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2395 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2398 const struct port_info *pi = netdev_priv(dev);
2399 struct adapter *adapter = pi->adapter;
2400 struct sge *s = &adapter->sge;
2402 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2403 e->tx_pending > MAX_TXQ_ENTRIES ||
2404 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2405 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2406 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2409 if (adapter->flags & FULL_INIT_DONE)
2412 for (i = 0; i < pi->nqsets; ++i) {
2413 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2414 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2415 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2420 static int closest_timer(const struct sge *s, int time)
2422 int i, delta, match = 0, min_delta = INT_MAX;
2424 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2425 delta = time - s->timer_val[i];
2428 if (delta < min_delta) {
2436 static int closest_thres(const struct sge *s, int thres)
2438 int i, delta, match = 0, min_delta = INT_MAX;
2440 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2441 delta = thres - s->counter_val[i];
2444 if (delta < min_delta) {
2453 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2455 static unsigned int qtimer_val(const struct adapter *adap,
2456 const struct sge_rspq *q)
2458 unsigned int idx = q->intr_params >> 1;
2460 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2464 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2465 * @adap: the adapter
2467 * @us: the hold-off time in us, or 0 to disable timer
2468 * @cnt: the hold-off packet count, or 0 to disable counter
2470 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2471 * one of the two needs to be enabled for the queue to generate interrupts.
2473 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2474 unsigned int us, unsigned int cnt)
2476 if ((us | cnt) == 0)
2483 new_idx = closest_thres(&adap->sge, cnt);
2484 if (q->desc && q->pktcnt_idx != new_idx) {
2485 /* the queue has already been created, update it */
2486 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2487 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2488 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2489 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2494 q->pktcnt_idx = new_idx;
2497 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2498 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2502 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2504 const struct port_info *pi = netdev_priv(dev);
2505 struct adapter *adap = pi->adapter;
2510 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2511 q = &adap->sge.ethrxq[i].rspq;
2512 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2513 c->rx_max_coalesced_frames);
2515 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2522 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2524 const struct port_info *pi = netdev_priv(dev);
2525 const struct adapter *adap = pi->adapter;
2526 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2528 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2529 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2530 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2535 * eeprom_ptov - translate a physical EEPROM address to virtual
2536 * @phys_addr: the physical EEPROM address
2537 * @fn: the PCI function number
2538 * @sz: size of function-specific area
2540 * Translate a physical EEPROM address to virtual. The first 1K is
2541 * accessed through virtual addresses starting at 31K, the rest is
2542 * accessed through virtual addresses starting at 0.
2544 * The mapping is as follows:
2545 * [0..1K) -> [31K..32K)
2546 * [1K..1K+A) -> [31K-A..31K)
2547 * [1K+A..ES) -> [0..ES-A-1K)
2549 * where A = @fn * @sz, and ES = EEPROM size.
2551 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2554 if (phys_addr < 1024)
2555 return phys_addr + (31 << 10);
2556 if (phys_addr < 1024 + fn)
2557 return 31744 - fn + phys_addr - 1024;
2558 if (phys_addr < EEPROMSIZE)
2559 return phys_addr - 1024 - fn;
2564 * The next two routines implement eeprom read/write from physical addresses.
2566 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2568 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2571 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2572 return vaddr < 0 ? vaddr : 0;
2575 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2577 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2580 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2581 return vaddr < 0 ? vaddr : 0;
2584 #define EEPROM_MAGIC 0x38E2F10C
2586 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2590 struct adapter *adapter = netdev2adap(dev);
2592 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2596 e->magic = EEPROM_MAGIC;
2597 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2598 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2601 memcpy(data, buf + e->offset, e->len);
2606 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2611 u32 aligned_offset, aligned_len, *p;
2612 struct adapter *adapter = netdev2adap(dev);
2614 if (eeprom->magic != EEPROM_MAGIC)
2617 aligned_offset = eeprom->offset & ~3;
2618 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2620 if (adapter->fn > 0) {
2621 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2623 if (aligned_offset < start ||
2624 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2628 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2630 * RMW possibly needed for first or last words.
2632 buf = kmalloc(aligned_len, GFP_KERNEL);
2635 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2636 if (!err && aligned_len > 4)
2637 err = eeprom_rd_phys(adapter,
2638 aligned_offset + aligned_len - 4,
2639 (u32 *)&buf[aligned_len - 4]);
2642 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2646 err = t4_seeprom_wp(adapter, false);
2650 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2651 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2652 aligned_offset += 4;
2656 err = t4_seeprom_wp(adapter, true);
2663 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2666 const struct firmware *fw;
2667 struct adapter *adap = netdev2adap(netdev);
2669 ef->data[sizeof(ef->data) - 1] = '\0';
2670 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2674 ret = t4_load_fw(adap, fw->data, fw->size);
2675 release_firmware(fw);
2677 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2681 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2682 #define BCAST_CRC 0xa0ccc1a6
2684 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2686 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2687 wol->wolopts = netdev2adap(dev)->wol;
2688 memset(&wol->sopass, 0, sizeof(wol->sopass));
2691 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2694 struct port_info *pi = netdev_priv(dev);
2696 if (wol->wolopts & ~WOL_SUPPORTED)
2698 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2699 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2700 if (wol->wolopts & WAKE_BCAST) {
2701 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2704 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2705 ~6ULL, ~0ULL, BCAST_CRC, true);
2707 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2711 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2713 const struct port_info *pi = netdev_priv(dev);
2714 netdev_features_t changed = dev->features ^ features;
2717 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2720 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2722 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2724 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2728 static u32 get_rss_table_size(struct net_device *dev)
2730 const struct port_info *pi = netdev_priv(dev);
2732 return pi->rss_size;
2735 static int get_rss_table(struct net_device *dev, u32 *p)
2737 const struct port_info *pi = netdev_priv(dev);
2738 unsigned int n = pi->rss_size;
2745 static int set_rss_table(struct net_device *dev, const u32 *p)
2748 struct port_info *pi = netdev_priv(dev);
2750 for (i = 0; i < pi->rss_size; i++)
2752 if (pi->adapter->flags & FULL_INIT_DONE)
2753 return write_rss(pi, pi->rss);
2757 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2760 const struct port_info *pi = netdev_priv(dev);
2762 switch (info->cmd) {
2763 case ETHTOOL_GRXFH: {
2764 unsigned int v = pi->rss_mode;
2767 switch (info->flow_type) {
2769 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2770 info->data = RXH_IP_SRC | RXH_IP_DST |
2771 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2772 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2773 info->data = RXH_IP_SRC | RXH_IP_DST;
2776 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2777 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2778 info->data = RXH_IP_SRC | RXH_IP_DST |
2779 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2780 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2781 info->data = RXH_IP_SRC | RXH_IP_DST;
2784 case AH_ESP_V4_FLOW:
2786 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2787 info->data = RXH_IP_SRC | RXH_IP_DST;
2790 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2791 info->data = RXH_IP_SRC | RXH_IP_DST |
2792 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2793 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2794 info->data = RXH_IP_SRC | RXH_IP_DST;
2797 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2798 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2799 info->data = RXH_IP_SRC | RXH_IP_DST |
2800 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2801 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2802 info->data = RXH_IP_SRC | RXH_IP_DST;
2805 case AH_ESP_V6_FLOW:
2807 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2808 info->data = RXH_IP_SRC | RXH_IP_DST;
2813 case ETHTOOL_GRXRINGS:
2814 info->data = pi->nqsets;
2820 static const struct ethtool_ops cxgb_ethtool_ops = {
2821 .get_settings = get_settings,
2822 .set_settings = set_settings,
2823 .get_drvinfo = get_drvinfo,
2824 .get_msglevel = get_msglevel,
2825 .set_msglevel = set_msglevel,
2826 .get_ringparam = get_sge_param,
2827 .set_ringparam = set_sge_param,
2828 .get_coalesce = get_coalesce,
2829 .set_coalesce = set_coalesce,
2830 .get_eeprom_len = get_eeprom_len,
2831 .get_eeprom = get_eeprom,
2832 .set_eeprom = set_eeprom,
2833 .get_pauseparam = get_pauseparam,
2834 .set_pauseparam = set_pauseparam,
2835 .get_link = ethtool_op_get_link,
2836 .get_strings = get_strings,
2837 .set_phys_id = identify_port,
2838 .nway_reset = restart_autoneg,
2839 .get_sset_count = get_sset_count,
2840 .get_ethtool_stats = get_stats,
2841 .get_regs_len = get_regs_len,
2842 .get_regs = get_regs,
2845 .get_rxnfc = get_rxnfc,
2846 .get_rxfh_indir_size = get_rss_table_size,
2847 .get_rxfh_indir = get_rss_table,
2848 .set_rxfh_indir = set_rss_table,
2849 .flash_device = set_flash,
2855 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2859 loff_t avail = file_inode(file)->i_size;
2860 unsigned int mem = (uintptr_t)file->private_data & 3;
2861 struct adapter *adap = file->private_data - mem;
2867 if (count > avail - pos)
2868 count = avail - pos;
2875 if ((mem == MEM_MC) || (mem == MEM_MC1))
2876 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2878 ret = t4_edc_read(adap, mem, pos, data, NULL);
2882 ofst = pos % sizeof(data);
2883 len = min(count, sizeof(data) - ofst);
2884 if (copy_to_user(buf, (u8 *)data + ofst, len))
2891 count = pos - *ppos;
2896 static const struct file_operations mem_debugfs_fops = {
2897 .owner = THIS_MODULE,
2898 .open = simple_open,
2900 .llseek = default_llseek,
2903 static void add_debugfs_mem(struct adapter *adap, const char *name,
2904 unsigned int idx, unsigned int size_mb)
2908 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2909 (void *)adap + idx, &mem_debugfs_fops);
2910 if (de && de->d_inode)
2911 de->d_inode->i_size = size_mb << 20;
2914 static int setup_debugfs(struct adapter *adap)
2919 if (IS_ERR_OR_NULL(adap->debugfs_root))
2922 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2923 if (i & EDRAM0_ENABLE) {
2924 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2925 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2927 if (i & EDRAM1_ENABLE) {
2928 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2929 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2931 if (is_t4(adap->params.chip)) {
2932 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2933 if (i & EXT_MEM_ENABLE)
2934 add_debugfs_mem(adap, "mc", MEM_MC,
2935 EXT_MEM_SIZE_GET(size));
2937 if (i & EXT_MEM_ENABLE) {
2938 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2939 add_debugfs_mem(adap, "mc0", MEM_MC0,
2940 EXT_MEM_SIZE_GET(size));
2942 if (i & EXT_MEM1_ENABLE) {
2943 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2944 add_debugfs_mem(adap, "mc1", MEM_MC1,
2945 EXT_MEM_SIZE_GET(size));
2949 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2955 * upper-layer driver support
2959 * Allocate an active-open TID and set it to the supplied value.
2961 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2965 spin_lock_bh(&t->atid_lock);
2967 union aopen_entry *p = t->afree;
2969 atid = (p - t->atid_tab) + t->atid_base;
2974 spin_unlock_bh(&t->atid_lock);
2977 EXPORT_SYMBOL(cxgb4_alloc_atid);
2980 * Release an active-open TID.
2982 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2984 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2986 spin_lock_bh(&t->atid_lock);
2990 spin_unlock_bh(&t->atid_lock);
2992 EXPORT_SYMBOL(cxgb4_free_atid);
2995 * Allocate a server TID and set it to the supplied value.
2997 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3001 spin_lock_bh(&t->stid_lock);
3002 if (family == PF_INET) {
3003 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3004 if (stid < t->nstids)
3005 __set_bit(stid, t->stid_bmap);
3009 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3014 t->stid_tab[stid].data = data;
3015 stid += t->stid_base;
3016 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3017 * This is equivalent to 4 TIDs. With CLIP enabled it
3020 if (family == PF_INET)
3023 t->stids_in_use += 4;
3025 spin_unlock_bh(&t->stid_lock);
3028 EXPORT_SYMBOL(cxgb4_alloc_stid);
3030 /* Allocate a server filter TID and set it to the supplied value.
3032 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3036 spin_lock_bh(&t->stid_lock);
3037 if (family == PF_INET) {
3038 stid = find_next_zero_bit(t->stid_bmap,
3039 t->nstids + t->nsftids, t->nstids);
3040 if (stid < (t->nstids + t->nsftids))
3041 __set_bit(stid, t->stid_bmap);
3048 t->stid_tab[stid].data = data;
3050 stid += t->sftid_base;
3053 spin_unlock_bh(&t->stid_lock);
3056 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3058 /* Release a server TID.
3060 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3062 /* Is it a server filter TID? */
3063 if (t->nsftids && (stid >= t->sftid_base)) {
3064 stid -= t->sftid_base;
3067 stid -= t->stid_base;
3070 spin_lock_bh(&t->stid_lock);
3071 if (family == PF_INET)
3072 __clear_bit(stid, t->stid_bmap);
3074 bitmap_release_region(t->stid_bmap, stid, 2);
3075 t->stid_tab[stid].data = NULL;
3076 if (family == PF_INET)
3079 t->stids_in_use -= 4;
3080 spin_unlock_bh(&t->stid_lock);
3082 EXPORT_SYMBOL(cxgb4_free_stid);
3085 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3087 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3090 struct cpl_tid_release *req;
3092 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3093 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3094 INIT_TP_WR(req, tid);
3095 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3099 * Queue a TID release request and if necessary schedule a work queue to
3102 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3105 void **p = &t->tid_tab[tid];
3106 struct adapter *adap = container_of(t, struct adapter, tids);
3108 spin_lock_bh(&adap->tid_release_lock);
3109 *p = adap->tid_release_head;
3110 /* Low 2 bits encode the Tx channel number */
3111 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3112 if (!adap->tid_release_task_busy) {
3113 adap->tid_release_task_busy = true;
3114 queue_work(workq, &adap->tid_release_task);
3116 spin_unlock_bh(&adap->tid_release_lock);
3120 * Process the list of pending TID release requests.
3122 static void process_tid_release_list(struct work_struct *work)
3124 struct sk_buff *skb;
3125 struct adapter *adap;
3127 adap = container_of(work, struct adapter, tid_release_task);
3129 spin_lock_bh(&adap->tid_release_lock);
3130 while (adap->tid_release_head) {
3131 void **p = adap->tid_release_head;
3132 unsigned int chan = (uintptr_t)p & 3;
3133 p = (void *)p - chan;
3135 adap->tid_release_head = *p;
3137 spin_unlock_bh(&adap->tid_release_lock);
3139 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3141 schedule_timeout_uninterruptible(1);
3143 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3144 t4_ofld_send(adap, skb);
3145 spin_lock_bh(&adap->tid_release_lock);
3147 adap->tid_release_task_busy = false;
3148 spin_unlock_bh(&adap->tid_release_lock);
3152 * Release a TID and inform HW. If we are unable to allocate the release
3153 * message we defer to a work queue.
3155 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3158 struct sk_buff *skb;
3159 struct adapter *adap = container_of(t, struct adapter, tids);
3161 old = t->tid_tab[tid];
3162 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3164 t->tid_tab[tid] = NULL;
3165 mk_tid_release(skb, chan, tid);
3166 t4_ofld_send(adap, skb);
3168 cxgb4_queue_tid_release(t, chan, tid);
3170 atomic_dec(&t->tids_in_use);
3172 EXPORT_SYMBOL(cxgb4_remove_tid);
3175 * Allocate and initialize the TID tables. Returns 0 on success.
3177 static int tid_init(struct tid_info *t)
3180 unsigned int stid_bmap_size;
3181 unsigned int natids = t->natids;
3182 struct adapter *adap = container_of(t, struct adapter, tids);
3184 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3185 size = t->ntids * sizeof(*t->tid_tab) +
3186 natids * sizeof(*t->atid_tab) +
3187 t->nstids * sizeof(*t->stid_tab) +
3188 t->nsftids * sizeof(*t->stid_tab) +
3189 stid_bmap_size * sizeof(long) +
3190 t->nftids * sizeof(*t->ftid_tab) +
3191 t->nsftids * sizeof(*t->ftid_tab);
3193 t->tid_tab = t4_alloc_mem(size);
3197 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3198 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3199 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3200 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3201 spin_lock_init(&t->stid_lock);
3202 spin_lock_init(&t->atid_lock);
3204 t->stids_in_use = 0;
3206 t->atids_in_use = 0;
3207 atomic_set(&t->tids_in_use, 0);
3209 /* Setup the free list for atid_tab and clear the stid bitmap. */
3212 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3213 t->afree = t->atid_tab;
3215 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3216 /* Reserve stid 0 for T4/T5 adapters */
3217 if (!t->stid_base &&
3218 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3219 __set_bit(0, t->stid_bmap);
3224 static int cxgb4_clip_get(const struct net_device *dev,
3225 const struct in6_addr *lip)
3227 struct adapter *adap;
3228 struct fw_clip_cmd c;
3230 adap = netdev2adap(dev);
3231 memset(&c, 0, sizeof(c));
3232 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3233 FW_CMD_REQUEST | FW_CMD_WRITE);
3234 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3235 c.ip_hi = *(__be64 *)(lip->s6_addr);
3236 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3237 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3240 static int cxgb4_clip_release(const struct net_device *dev,
3241 const struct in6_addr *lip)
3243 struct adapter *adap;
3244 struct fw_clip_cmd c;
3246 adap = netdev2adap(dev);
3247 memset(&c, 0, sizeof(c));
3248 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3249 FW_CMD_REQUEST | FW_CMD_READ);
3250 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3251 c.ip_hi = *(__be64 *)(lip->s6_addr);
3252 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3253 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3257 * cxgb4_create_server - create an IP server
3259 * @stid: the server TID
3260 * @sip: local IP address to bind server to
3261 * @sport: the server's TCP port
3262 * @queue: queue to direct messages from this server to
3264 * Create an IP server for the given port and address.
3265 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3267 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3268 __be32 sip, __be16 sport, __be16 vlan,
3272 struct sk_buff *skb;
3273 struct adapter *adap;
3274 struct cpl_pass_open_req *req;
3277 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3281 adap = netdev2adap(dev);
3282 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3284 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3285 req->local_port = sport;
3286 req->peer_port = htons(0);
3287 req->local_ip = sip;
3288 req->peer_ip = htonl(0);
3289 chan = rxq_to_chan(&adap->sge, queue);
3290 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3291 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3292 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3293 ret = t4_mgmt_tx(adap, skb);
3294 return net_xmit_eval(ret);
3296 EXPORT_SYMBOL(cxgb4_create_server);
3298 /* cxgb4_create_server6 - create an IPv6 server
3300 * @stid: the server TID
3301 * @sip: local IPv6 address to bind server to
3302 * @sport: the server's TCP port
3303 * @queue: queue to direct messages from this server to
3305 * Create an IPv6 server for the given port and address.
3306 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3308 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3309 const struct in6_addr *sip, __be16 sport,
3313 struct sk_buff *skb;
3314 struct adapter *adap;
3315 struct cpl_pass_open_req6 *req;
3318 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3322 adap = netdev2adap(dev);
3323 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3325 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3326 req->local_port = sport;
3327 req->peer_port = htons(0);
3328 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3329 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3330 req->peer_ip_hi = cpu_to_be64(0);
3331 req->peer_ip_lo = cpu_to_be64(0);
3332 chan = rxq_to_chan(&adap->sge, queue);
3333 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3334 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3335 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3336 ret = t4_mgmt_tx(adap, skb);
3337 return net_xmit_eval(ret);
3339 EXPORT_SYMBOL(cxgb4_create_server6);
3341 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3342 unsigned int queue, bool ipv6)
3344 struct sk_buff *skb;
3345 struct adapter *adap;
3346 struct cpl_close_listsvr_req *req;
3349 adap = netdev2adap(dev);
3351 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3355 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3357 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3358 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3359 LISTSVR_IPV6(0)) | QUEUENO(queue));
3360 ret = t4_mgmt_tx(adap, skb);
3361 return net_xmit_eval(ret);
3363 EXPORT_SYMBOL(cxgb4_remove_server);
3366 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3367 * @mtus: the HW MTU table
3368 * @mtu: the target MTU
3369 * @idx: index of selected entry in the MTU table
3371 * Returns the index and the value in the HW MTU table that is closest to
3372 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3373 * table, in which case that smallest available value is selected.
3375 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3380 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3386 EXPORT_SYMBOL(cxgb4_best_mtu);
3389 * cxgb4_port_chan - get the HW channel of a port
3390 * @dev: the net device for the port
3392 * Return the HW Tx channel of the given port.
3394 unsigned int cxgb4_port_chan(const struct net_device *dev)
3396 return netdev2pinfo(dev)->tx_chan;
3398 EXPORT_SYMBOL(cxgb4_port_chan);
3400 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3402 struct adapter *adap = netdev2adap(dev);
3403 u32 v1, v2, lp_count, hp_count;
3405 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3406 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3407 if (is_t4(adap->params.chip)) {
3408 lp_count = G_LP_COUNT(v1);
3409 hp_count = G_HP_COUNT(v1);
3411 lp_count = G_LP_COUNT_T5(v1);
3412 hp_count = G_HP_COUNT_T5(v2);
3414 return lpfifo ? lp_count : hp_count;
3416 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3419 * cxgb4_port_viid - get the VI id of a port
3420 * @dev: the net device for the port
3422 * Return the VI id of the given port.
3424 unsigned int cxgb4_port_viid(const struct net_device *dev)
3426 return netdev2pinfo(dev)->viid;
3428 EXPORT_SYMBOL(cxgb4_port_viid);
3431 * cxgb4_port_idx - get the index of a port
3432 * @dev: the net device for the port
3434 * Return the index of the given port.
3436 unsigned int cxgb4_port_idx(const struct net_device *dev)
3438 return netdev2pinfo(dev)->port_id;
3440 EXPORT_SYMBOL(cxgb4_port_idx);
3442 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3443 struct tp_tcp_stats *v6)
3445 struct adapter *adap = pci_get_drvdata(pdev);
3447 spin_lock(&adap->stats_lock);
3448 t4_tp_get_tcp_stats(adap, v4, v6);
3449 spin_unlock(&adap->stats_lock);
3451 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3453 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3454 const unsigned int *pgsz_order)
3456 struct adapter *adap = netdev2adap(dev);
3458 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3459 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3460 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3461 HPZ3(pgsz_order[3]));
3463 EXPORT_SYMBOL(cxgb4_iscsi_init);
3465 int cxgb4_flush_eq_cache(struct net_device *dev)
3467 struct adapter *adap = netdev2adap(dev);
3470 ret = t4_fwaddrspace_write(adap, adap->mbox,
3471 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3474 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3476 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3478 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3482 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3484 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3485 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3490 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3493 struct adapter *adap = netdev2adap(dev);
3494 u16 hw_pidx, hw_cidx;
3497 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3501 if (pidx != hw_pidx) {
3504 if (pidx >= hw_pidx)
3505 delta = pidx - hw_pidx;
3507 delta = size - hw_pidx + pidx;
3509 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3510 QID(qid) | PIDX(delta));
3515 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3517 void cxgb4_disable_db_coalescing(struct net_device *dev)
3519 struct adapter *adap;
3521 adap = netdev2adap(dev);
3522 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3525 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3527 void cxgb4_enable_db_coalescing(struct net_device *dev)
3529 struct adapter *adap;
3531 adap = netdev2adap(dev);
3532 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3534 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3536 static struct pci_driver cxgb4_driver;
3538 static void check_neigh_update(struct neighbour *neigh)
3540 const struct device *parent;
3541 const struct net_device *netdev = neigh->dev;
3543 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3544 netdev = vlan_dev_real_dev(netdev);
3545 parent = netdev->dev.parent;
3546 if (parent && parent->driver == &cxgb4_driver.driver)
3547 t4_l2t_update(dev_get_drvdata(parent), neigh);
3550 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3554 case NETEVENT_NEIGH_UPDATE:
3555 check_neigh_update(data);
3557 case NETEVENT_REDIRECT:
3564 static bool netevent_registered;
3565 static struct notifier_block cxgb4_netevent_nb = {
3566 .notifier_call = netevent_cb
3569 static void drain_db_fifo(struct adapter *adap, int usecs)
3571 u32 v1, v2, lp_count, hp_count;
3574 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3575 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3576 if (is_t4(adap->params.chip)) {
3577 lp_count = G_LP_COUNT(v1);
3578 hp_count = G_HP_COUNT(v1);
3580 lp_count = G_LP_COUNT_T5(v1);
3581 hp_count = G_HP_COUNT_T5(v2);
3584 if (lp_count == 0 && hp_count == 0)
3586 set_current_state(TASK_UNINTERRUPTIBLE);
3587 schedule_timeout(usecs_to_jiffies(usecs));
3591 static void disable_txq_db(struct sge_txq *q)
3593 unsigned long flags;
3595 spin_lock_irqsave(&q->db_lock, flags);
3597 spin_unlock_irqrestore(&q->db_lock, flags);
3600 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3602 spin_lock_irq(&q->db_lock);
3603 if (q->db_pidx_inc) {
3604 /* Make sure that all writes to the TX descriptors
3605 * are committed before we tell HW about them.
3608 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3609 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3613 spin_unlock_irq(&q->db_lock);
3616 static void disable_dbs(struct adapter *adap)
3620 for_each_ethrxq(&adap->sge, i)
3621 disable_txq_db(&adap->sge.ethtxq[i].q);
3622 for_each_ofldrxq(&adap->sge, i)
3623 disable_txq_db(&adap->sge.ofldtxq[i].q);
3624 for_each_port(adap, i)
3625 disable_txq_db(&adap->sge.ctrlq[i].q);
3628 static void enable_dbs(struct adapter *adap)
3632 for_each_ethrxq(&adap->sge, i)
3633 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3634 for_each_ofldrxq(&adap->sge, i)
3635 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3636 for_each_port(adap, i)
3637 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3640 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3642 if (adap->uld_handle[CXGB4_ULD_RDMA])
3643 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3647 static void process_db_full(struct work_struct *work)
3649 struct adapter *adap;
3651 adap = container_of(work, struct adapter, db_full_task);
3653 drain_db_fifo(adap, dbfifo_drain_delay);
3655 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3656 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3657 DBFIFO_HP_INT | DBFIFO_LP_INT,
3658 DBFIFO_HP_INT | DBFIFO_LP_INT);
3661 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3663 u16 hw_pidx, hw_cidx;
3666 spin_lock_irq(&q->db_lock);
3667 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3670 if (q->db_pidx != hw_pidx) {
3673 if (q->db_pidx >= hw_pidx)
3674 delta = q->db_pidx - hw_pidx;
3676 delta = q->size - hw_pidx + q->db_pidx;
3678 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3679 QID(q->cntxt_id) | PIDX(delta));
3684 spin_unlock_irq(&q->db_lock);
3686 CH_WARN(adap, "DB drop recovery failed.\n");
3688 static void recover_all_queues(struct adapter *adap)
3692 for_each_ethrxq(&adap->sge, i)
3693 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3694 for_each_ofldrxq(&adap->sge, i)
3695 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3696 for_each_port(adap, i)
3697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3700 static void process_db_drop(struct work_struct *work)
3702 struct adapter *adap;
3704 adap = container_of(work, struct adapter, db_drop_task);
3706 if (is_t4(adap->params.chip)) {
3707 drain_db_fifo(adap, dbfifo_drain_delay);
3708 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3709 drain_db_fifo(adap, dbfifo_drain_delay);
3710 recover_all_queues(adap);
3711 drain_db_fifo(adap, dbfifo_drain_delay);
3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3715 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3716 u16 qid = (dropped_db >> 15) & 0x1ffff;
3717 u16 pidx_inc = dropped_db & 0x1fff;
3719 unsigned short udb_density;
3720 unsigned long qpshift;
3724 dev_warn(adap->pdev_dev,
3725 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3727 (dropped_db >> 14) & 1,
3728 (dropped_db >> 13) & 1,
3731 drain_db_fifo(adap, 1);
3733 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3734 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3735 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3736 qpshift = PAGE_SHIFT - ilog2(udb_density);
3737 udb = qid << qpshift;
3739 page = udb / PAGE_SIZE;
3740 udb += (qid - (page * udb_density)) * 128;
3742 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3744 /* Re-enable BAR2 WC */
3745 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3748 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3751 void t4_db_full(struct adapter *adap)
3753 if (is_t4(adap->params.chip)) {
3755 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3756 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3757 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3758 queue_work(workq, &adap->db_full_task);
3762 void t4_db_dropped(struct adapter *adap)
3764 if (is_t4(adap->params.chip)) {
3766 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3768 queue_work(workq, &adap->db_drop_task);
3771 static void uld_attach(struct adapter *adap, unsigned int uld)
3774 struct cxgb4_lld_info lli;
3777 lli.pdev = adap->pdev;
3778 lli.l2t = adap->l2t;
3779 lli.tids = &adap->tids;
3780 lli.ports = adap->port;
3781 lli.vr = &adap->vres;
3782 lli.mtus = adap->params.mtus;
3783 if (uld == CXGB4_ULD_RDMA) {
3784 lli.rxq_ids = adap->sge.rdma_rxq;
3785 lli.nrxq = adap->sge.rdmaqs;
3786 } else if (uld == CXGB4_ULD_ISCSI) {
3787 lli.rxq_ids = adap->sge.ofld_rxq;
3788 lli.nrxq = adap->sge.ofldqsets;
3790 lli.ntxq = adap->sge.ofldqsets;
3791 lli.nchan = adap->params.nports;
3792 lli.nports = adap->params.nports;
3793 lli.wr_cred = adap->params.ofldq_wr_cred;
3794 lli.adapter_type = adap->params.chip;
3795 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3796 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3797 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3799 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3800 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3802 lli.filt_mode = adap->params.tp.vlan_pri_map;
3803 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3804 for (i = 0; i < NCHAN; i++)
3806 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3807 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3808 lli.fw_vers = adap->params.fw_vers;
3809 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3810 lli.sge_pktshift = adap->sge.pktshift;
3811 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3812 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3814 handle = ulds[uld].add(&lli);
3815 if (IS_ERR(handle)) {
3816 dev_warn(adap->pdev_dev,
3817 "could not attach to the %s driver, error %ld\n",
3818 uld_str[uld], PTR_ERR(handle));
3822 adap->uld_handle[uld] = handle;
3824 if (!netevent_registered) {
3825 register_netevent_notifier(&cxgb4_netevent_nb);
3826 netevent_registered = true;
3829 if (adap->flags & FULL_INIT_DONE)
3830 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3833 static void attach_ulds(struct adapter *adap)
3837 spin_lock(&adap_rcu_lock);
3838 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3839 spin_unlock(&adap_rcu_lock);
3841 mutex_lock(&uld_mutex);
3842 list_add_tail(&adap->list_node, &adapter_list);
3843 for (i = 0; i < CXGB4_ULD_MAX; i++)
3845 uld_attach(adap, i);
3846 mutex_unlock(&uld_mutex);
3849 static void detach_ulds(struct adapter *adap)
3853 mutex_lock(&uld_mutex);
3854 list_del(&adap->list_node);
3855 for (i = 0; i < CXGB4_ULD_MAX; i++)
3856 if (adap->uld_handle[i]) {
3857 ulds[i].state_change(adap->uld_handle[i],
3858 CXGB4_STATE_DETACH);
3859 adap->uld_handle[i] = NULL;
3861 if (netevent_registered && list_empty(&adapter_list)) {
3862 unregister_netevent_notifier(&cxgb4_netevent_nb);
3863 netevent_registered = false;
3865 mutex_unlock(&uld_mutex);
3867 spin_lock(&adap_rcu_lock);
3868 list_del_rcu(&adap->rcu_node);
3869 spin_unlock(&adap_rcu_lock);
3872 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3876 mutex_lock(&uld_mutex);
3877 for (i = 0; i < CXGB4_ULD_MAX; i++)
3878 if (adap->uld_handle[i])
3879 ulds[i].state_change(adap->uld_handle[i], new_state);
3880 mutex_unlock(&uld_mutex);
3884 * cxgb4_register_uld - register an upper-layer driver
3885 * @type: the ULD type
3886 * @p: the ULD methods
3888 * Registers an upper-layer driver with this driver and notifies the ULD
3889 * about any presently available devices that support its type. Returns
3890 * %-EBUSY if a ULD of the same type is already registered.
3892 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3895 struct adapter *adap;
3897 if (type >= CXGB4_ULD_MAX)
3899 mutex_lock(&uld_mutex);
3900 if (ulds[type].add) {
3905 list_for_each_entry(adap, &adapter_list, list_node)
3906 uld_attach(adap, type);
3907 out: mutex_unlock(&uld_mutex);
3910 EXPORT_SYMBOL(cxgb4_register_uld);
3913 * cxgb4_unregister_uld - unregister an upper-layer driver
3914 * @type: the ULD type
3916 * Unregisters an existing upper-layer driver.
3918 int cxgb4_unregister_uld(enum cxgb4_uld type)
3920 struct adapter *adap;
3922 if (type >= CXGB4_ULD_MAX)
3924 mutex_lock(&uld_mutex);
3925 list_for_each_entry(adap, &adapter_list, list_node)
3926 adap->uld_handle[type] = NULL;
3927 ulds[type].add = NULL;
3928 mutex_unlock(&uld_mutex);
3931 EXPORT_SYMBOL(cxgb4_unregister_uld);
3933 /* Check if netdev on which event is occured belongs to us or not. Return
3934 * suceess (1) if it belongs otherwise failure (0).
3936 static int cxgb4_netdev(struct net_device *netdev)
3938 struct adapter *adap;
3941 spin_lock(&adap_rcu_lock);
3942 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3943 for (i = 0; i < MAX_NPORTS; i++)
3944 if (adap->port[i] == netdev) {
3945 spin_unlock(&adap_rcu_lock);
3948 spin_unlock(&adap_rcu_lock);
3952 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3953 unsigned long event)
3955 int ret = NOTIFY_DONE;
3958 if (cxgb4_netdev(event_dev)) {
3961 ret = cxgb4_clip_get(event_dev,
3962 (const struct in6_addr *)ifa->addr.s6_addr);
3970 cxgb4_clip_release(event_dev,
3971 (const struct in6_addr *)ifa->addr.s6_addr);
3982 static int cxgb4_inet6addr_handler(struct notifier_block *this,
3983 unsigned long event, void *data)
3985 struct inet6_ifaddr *ifa = data;
3986 struct net_device *event_dev;
3987 int ret = NOTIFY_DONE;
3988 struct bonding *bond = netdev_priv(ifa->idev->dev);
3989 struct list_head *iter;
3990 struct slave *slave;
3991 struct pci_dev *first_pdev = NULL;
3993 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3994 event_dev = vlan_dev_real_dev(ifa->idev->dev);
3995 ret = clip_add(event_dev, ifa, event);
3996 } else if (ifa->idev->dev->flags & IFF_MASTER) {
3997 /* It is possible that two different adapters are bonded in one
3998 * bond. We need to find such different adapters and add clip
3999 * in all of them only once.
4001 read_lock(&bond->lock);
4002 bond_for_each_slave(bond, slave, iter) {
4004 ret = clip_add(slave->dev, ifa, event);
4005 /* If clip_add is success then only initialize
4006 * first_pdev since it means it is our device
4008 if (ret == NOTIFY_OK)
4009 first_pdev = to_pci_dev(
4010 slave->dev->dev.parent);
4011 } else if (first_pdev !=
4012 to_pci_dev(slave->dev->dev.parent))
4013 ret = clip_add(slave->dev, ifa, event);
4015 read_unlock(&bond->lock);
4017 ret = clip_add(ifa->idev->dev, ifa, event);
4022 static struct notifier_block cxgb4_inet6addr_notifier = {
4023 .notifier_call = cxgb4_inet6addr_handler
4026 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4027 * a physical device.
4028 * The physical device reference is needed to send the actul CLIP command.
4030 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4032 struct inet6_dev *idev = NULL;
4033 struct inet6_ifaddr *ifa;
4036 idev = __in6_dev_get(root_dev);
4040 read_lock_bh(&idev->lock);
4041 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4042 ret = cxgb4_clip_get(dev,
4043 (const struct in6_addr *)ifa->addr.s6_addr);
4047 read_unlock_bh(&idev->lock);
4052 static int update_root_dev_clip(struct net_device *dev)
4054 struct net_device *root_dev = NULL;
4057 /* First populate the real net device's IPv6 addresses */
4058 ret = update_dev_clip(dev, dev);
4062 /* Parse all bond and vlan devices layered on top of the physical dev */
4063 for (i = 0; i < VLAN_N_VID; i++) {
4064 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4068 ret = update_dev_clip(root_dev, dev);
4075 static void update_clip(const struct adapter *adap)
4078 struct net_device *dev;
4083 for (i = 0; i < MAX_NPORTS; i++) {
4084 dev = adap->port[i];
4088 ret = update_root_dev_clip(dev);
4097 * cxgb_up - enable the adapter
4098 * @adap: adapter being enabled
4100 * Called when the first port is enabled, this function performs the
4101 * actions necessary to make an adapter operational, such as completing
4102 * the initialization of HW modules, and enabling interrupts.
4104 * Must be called with the rtnl lock held.
4106 static int cxgb_up(struct adapter *adap)
4110 err = setup_sge_queues(adap);
4113 err = setup_rss(adap);
4117 if (adap->flags & USING_MSIX) {
4118 name_msix_vecs(adap);
4119 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4120 adap->msix_info[0].desc, adap);
4124 err = request_msix_queue_irqs(adap);
4126 free_irq(adap->msix_info[0].vec, adap);
4130 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4131 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4132 adap->port[0]->name, adap);
4138 t4_intr_enable(adap);
4139 adap->flags |= FULL_INIT_DONE;
4140 notify_ulds(adap, CXGB4_STATE_UP);
4145 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4147 t4_free_sge_resources(adap);
4151 static void cxgb_down(struct adapter *adapter)
4153 t4_intr_disable(adapter);
4154 cancel_work_sync(&adapter->tid_release_task);
4155 cancel_work_sync(&adapter->db_full_task);
4156 cancel_work_sync(&adapter->db_drop_task);
4157 adapter->tid_release_task_busy = false;
4158 adapter->tid_release_head = NULL;
4160 if (adapter->flags & USING_MSIX) {
4161 free_msix_queue_irqs(adapter);
4162 free_irq(adapter->msix_info[0].vec, adapter);
4164 free_irq(adapter->pdev->irq, adapter);
4165 quiesce_rx(adapter);
4166 t4_sge_stop(adapter);
4167 t4_free_sge_resources(adapter);
4168 adapter->flags &= ~FULL_INIT_DONE;
4172 * net_device operations
4174 static int cxgb_open(struct net_device *dev)
4177 struct port_info *pi = netdev_priv(dev);
4178 struct adapter *adapter = pi->adapter;
4180 netif_carrier_off(dev);
4182 if (!(adapter->flags & FULL_INIT_DONE)) {
4183 err = cxgb_up(adapter);
4188 err = link_start(dev);
4190 netif_tx_start_all_queues(dev);
4194 static int cxgb_close(struct net_device *dev)
4196 struct port_info *pi = netdev_priv(dev);
4197 struct adapter *adapter = pi->adapter;
4199 netif_tx_stop_all_queues(dev);
4200 netif_carrier_off(dev);
4201 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4204 /* Return an error number if the indicated filter isn't writable ...
4206 static int writable_filter(struct filter_entry *f)
4216 /* Delete the filter at the specified index (if valid). The checks for all
4217 * the common problems with doing this like the filter being locked, currently
4218 * pending in another operation, etc.
4220 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4222 struct filter_entry *f;
4225 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4228 f = &adapter->tids.ftid_tab[fidx];
4229 ret = writable_filter(f);
4233 return del_filter_wr(adapter, fidx);
4238 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4239 __be32 sip, __be16 sport, __be16 vlan,
4240 unsigned int queue, unsigned char port, unsigned char mask)
4243 struct filter_entry *f;
4244 struct adapter *adap;
4248 adap = netdev2adap(dev);
4250 /* Adjust stid to correct filter index */
4251 stid -= adap->tids.sftid_base;
4252 stid += adap->tids.nftids;
4254 /* Check to make sure the filter requested is writable ...
4256 f = &adap->tids.ftid_tab[stid];
4257 ret = writable_filter(f);
4261 /* Clear out any old resources being used by the filter before
4262 * we start constructing the new filter.
4265 clear_filter(adap, f);
4267 /* Clear out filter specifications */
4268 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4269 f->fs.val.lport = cpu_to_be16(sport);
4270 f->fs.mask.lport = ~0;
4272 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4273 for (i = 0; i < 4; i++) {
4274 f->fs.val.lip[i] = val[i];
4275 f->fs.mask.lip[i] = ~0;
4277 if (adap->params.tp.vlan_pri_map & F_PORT) {
4278 f->fs.val.iport = port;
4279 f->fs.mask.iport = mask;
4283 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4284 f->fs.val.proto = IPPROTO_TCP;
4285 f->fs.mask.proto = ~0;
4290 /* Mark filter as locked */
4294 ret = set_filter_wr(adap, stid);
4296 clear_filter(adap, f);
4302 EXPORT_SYMBOL(cxgb4_create_server_filter);
4304 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4305 unsigned int queue, bool ipv6)
4308 struct filter_entry *f;
4309 struct adapter *adap;
4311 adap = netdev2adap(dev);
4313 /* Adjust stid to correct filter index */
4314 stid -= adap->tids.sftid_base;
4315 stid += adap->tids.nftids;
4317 f = &adap->tids.ftid_tab[stid];
4318 /* Unlock the filter */
4321 ret = delete_filter(adap, stid);
4327 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4329 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4330 struct rtnl_link_stats64 *ns)
4332 struct port_stats stats;
4333 struct port_info *p = netdev_priv(dev);
4334 struct adapter *adapter = p->adapter;
4336 /* Block retrieving statistics during EEH error
4337 * recovery. Otherwise, the recovery might fail
4338 * and the PCI device will be removed permanently
4340 spin_lock(&adapter->stats_lock);
4341 if (!netif_device_present(dev)) {
4342 spin_unlock(&adapter->stats_lock);
4345 t4_get_port_stats(adapter, p->tx_chan, &stats);
4346 spin_unlock(&adapter->stats_lock);
4348 ns->tx_bytes = stats.tx_octets;
4349 ns->tx_packets = stats.tx_frames;
4350 ns->rx_bytes = stats.rx_octets;
4351 ns->rx_packets = stats.rx_frames;
4352 ns->multicast = stats.rx_mcast_frames;
4354 /* detailed rx_errors */
4355 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4357 ns->rx_over_errors = 0;
4358 ns->rx_crc_errors = stats.rx_fcs_err;
4359 ns->rx_frame_errors = stats.rx_symbol_err;
4360 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4361 stats.rx_ovflow2 + stats.rx_ovflow3 +
4362 stats.rx_trunc0 + stats.rx_trunc1 +
4363 stats.rx_trunc2 + stats.rx_trunc3;
4364 ns->rx_missed_errors = 0;
4366 /* detailed tx_errors */
4367 ns->tx_aborted_errors = 0;
4368 ns->tx_carrier_errors = 0;
4369 ns->tx_fifo_errors = 0;
4370 ns->tx_heartbeat_errors = 0;
4371 ns->tx_window_errors = 0;
4373 ns->tx_errors = stats.tx_error_frames;
4374 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4375 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4379 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4382 int ret = 0, prtad, devad;
4383 struct port_info *pi = netdev_priv(dev);
4384 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4388 if (pi->mdio_addr < 0)
4390 data->phy_id = pi->mdio_addr;
4394 if (mdio_phy_id_is_c45(data->phy_id)) {
4395 prtad = mdio_phy_id_prtad(data->phy_id);
4396 devad = mdio_phy_id_devad(data->phy_id);
4397 } else if (data->phy_id < 32) {
4398 prtad = data->phy_id;
4400 data->reg_num &= 0x1f;
4404 mbox = pi->adapter->fn;
4405 if (cmd == SIOCGMIIREG)
4406 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4407 data->reg_num, &data->val_out);
4409 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4410 data->reg_num, data->val_in);
4418 static void cxgb_set_rxmode(struct net_device *dev)
4420 /* unfortunately we can't return errors to the stack */
4421 set_rxmode(dev, -1, false);
4424 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4427 struct port_info *pi = netdev_priv(dev);
4429 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4431 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4438 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4441 struct sockaddr *addr = p;
4442 struct port_info *pi = netdev_priv(dev);
4444 if (!is_valid_ether_addr(addr->sa_data))
4445 return -EADDRNOTAVAIL;
4447 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4448 pi->xact_addr_filt, addr->sa_data, true, true);
4452 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4453 pi->xact_addr_filt = ret;
4457 #ifdef CONFIG_NET_POLL_CONTROLLER
4458 static void cxgb_netpoll(struct net_device *dev)
4460 struct port_info *pi = netdev_priv(dev);
4461 struct adapter *adap = pi->adapter;
4463 if (adap->flags & USING_MSIX) {
4465 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4467 for (i = pi->nqsets; i; i--, rx++)
4468 t4_sge_intr_msix(0, &rx->rspq);
4470 t4_intr_handler(adap)(0, adap);
4474 static const struct net_device_ops cxgb4_netdev_ops = {
4475 .ndo_open = cxgb_open,
4476 .ndo_stop = cxgb_close,
4477 .ndo_start_xmit = t4_eth_xmit,
4478 .ndo_get_stats64 = cxgb_get_stats,
4479 .ndo_set_rx_mode = cxgb_set_rxmode,
4480 .ndo_set_mac_address = cxgb_set_mac_addr,
4481 .ndo_set_features = cxgb_set_features,
4482 .ndo_validate_addr = eth_validate_addr,
4483 .ndo_do_ioctl = cxgb_ioctl,
4484 .ndo_change_mtu = cxgb_change_mtu,
4485 #ifdef CONFIG_NET_POLL_CONTROLLER
4486 .ndo_poll_controller = cxgb_netpoll,
4490 void t4_fatal_err(struct adapter *adap)
4492 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4493 t4_intr_disable(adap);
4494 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4497 static void setup_memwin(struct adapter *adap)
4499 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4501 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4502 if (is_t4(adap->params.chip)) {
4503 mem_win0_base = bar0 + MEMWIN0_BASE;
4504 mem_win1_base = bar0 + MEMWIN1_BASE;
4505 mem_win2_base = bar0 + MEMWIN2_BASE;
4507 /* For T5, only relative offset inside the PCIe BAR is passed */
4508 mem_win0_base = MEMWIN0_BASE;
4509 mem_win1_base = MEMWIN1_BASE_T5;
4510 mem_win2_base = MEMWIN2_BASE_T5;
4512 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4513 mem_win0_base | BIR(0) |
4514 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4515 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4516 mem_win1_base | BIR(0) |
4517 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4518 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4519 mem_win2_base | BIR(0) |
4520 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4523 static void setup_memwin_rdma(struct adapter *adap)
4525 if (adap->vres.ocq.size) {
4526 unsigned int start, sz_kb;
4528 start = pci_resource_start(adap->pdev, 2) +
4529 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4530 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4532 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4533 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4535 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4536 adap->vres.ocq.start);
4538 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4542 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4547 /* get device capabilities */
4548 memset(c, 0, sizeof(*c));
4549 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4550 FW_CMD_REQUEST | FW_CMD_READ);
4551 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4552 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4556 /* select capabilities we'll be using */
4557 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4559 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4561 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4562 } else if (vf_acls) {
4563 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4566 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4567 FW_CMD_REQUEST | FW_CMD_WRITE);
4568 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4572 ret = t4_config_glbl_rss(adap, adap->fn,
4573 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4574 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4575 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4579 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4580 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4586 /* tweak some settings */
4587 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4588 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4589 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4590 v = t4_read_reg(adap, TP_PIO_DATA);
4591 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4593 /* first 4 Tx modulation queues point to consecutive Tx channels */
4594 adap->params.tp.tx_modq_map = 0xE4;
4595 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4596 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4598 /* associate each Tx modulation queue with consecutive Tx channels */
4600 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4601 &v, 1, A_TP_TX_SCHED_HDR);
4602 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4603 &v, 1, A_TP_TX_SCHED_FIFO);
4604 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4605 &v, 1, A_TP_TX_SCHED_PCMD);
4607 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4608 if (is_offload(adap)) {
4609 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4610 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4611 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4612 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4613 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4614 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4615 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4616 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4617 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4618 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4621 /* get basic stuff going */
4622 return t4_early_init(adap, adap->fn);
4626 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4628 #define MAX_ATIDS 8192U
4631 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4633 * If the firmware we're dealing with has Configuration File support, then
4634 * we use that to perform all configuration
4638 * Tweak configuration based on module parameters, etc. Most of these have
4639 * defaults assigned to them by Firmware Configuration Files (if we're using
4640 * them) but need to be explicitly set if we're using hard-coded
4641 * initialization. But even in the case of using Firmware Configuration
4642 * Files, we'd like to expose the ability to change these via module
4643 * parameters so these are essentially common tweaks/settings for
4644 * Configuration Files and hard-coded initialization ...
4646 static int adap_init0_tweaks(struct adapter *adapter)
4649 * Fix up various Host-Dependent Parameters like Page Size, Cache
4650 * Line Size, etc. The firmware default is for a 4KB Page Size and
4651 * 64B Cache Line Size ...
4653 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4656 * Process module parameters which affect early initialization.
4658 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4659 dev_err(&adapter->pdev->dev,
4660 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4664 t4_set_reg_field(adapter, SGE_CONTROL,
4666 PKTSHIFT(rx_dma_offset));
4669 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4670 * adds the pseudo header itself.
4672 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4673 CSUM_HAS_PSEUDO_HDR, 0);
4679 * Attempt to initialize the adapter via a Firmware Configuration File.
4681 static int adap_init0_config(struct adapter *adapter, int reset)
4683 struct fw_caps_config_cmd caps_cmd;
4684 const struct firmware *cf;
4685 unsigned long mtype = 0, maddr = 0;
4686 u32 finiver, finicsum, cfcsum;
4688 int config_issued = 0;
4689 char *fw_config_file, fw_config_file_path[256];
4690 char *config_name = NULL;
4693 * Reset device if necessary.
4696 ret = t4_fw_reset(adapter, adapter->mbox,
4697 PIORSTMODE | PIORST);
4703 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4704 * then use that. Otherwise, use the configuration file stored
4705 * in the adapter flash ...
4707 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4709 fw_config_file = FW4_CFNAME;
4712 fw_config_file = FW5_CFNAME;
4715 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4716 adapter->pdev->device);
4721 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4723 config_name = "On FLASH";
4724 mtype = FW_MEMTYPE_CF_FLASH;
4725 maddr = t4_flash_cfg_addr(adapter);
4727 u32 params[7], val[7];
4729 sprintf(fw_config_file_path,
4730 "/lib/firmware/%s", fw_config_file);
4731 config_name = fw_config_file_path;
4733 if (cf->size >= FLASH_CFG_MAX_SIZE)
4736 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4737 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4738 ret = t4_query_params(adapter, adapter->mbox,
4739 adapter->fn, 0, 1, params, val);
4742 * For t4_memory_write() below addresses and
4743 * sizes have to be in terms of multiples of 4
4744 * bytes. So, if the Configuration File isn't
4745 * a multiple of 4 bytes in length we'll have
4746 * to write that out separately since we can't
4747 * guarantee that the bytes following the
4748 * residual byte in the buffer returned by
4749 * request_firmware() are zeroed out ...
4751 size_t resid = cf->size & 0x3;
4752 size_t size = cf->size & ~0x3;
4753 __be32 *data = (__be32 *)cf->data;
4755 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4756 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4758 ret = t4_memory_write(adapter, mtype, maddr,
4760 if (ret == 0 && resid != 0) {
4767 last.word = data[size >> 2];
4768 for (i = resid; i < 4; i++)
4770 ret = t4_memory_write(adapter, mtype,
4777 release_firmware(cf);
4783 * Issue a Capability Configuration command to the firmware to get it
4784 * to parse the Configuration File. We don't use t4_fw_config_file()
4785 * because we want the ability to modify various features after we've
4786 * processed the configuration file ...
4788 memset(&caps_cmd, 0, sizeof(caps_cmd));
4789 caps_cmd.op_to_write =
4790 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4793 caps_cmd.cfvalid_to_len16 =
4794 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4795 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4796 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4797 FW_LEN16(caps_cmd));
4798 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4801 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4802 * Configuration File in FLASH), our last gasp effort is to use the
4803 * Firmware Configuration File which is embedded in the firmware. A
4804 * very few early versions of the firmware didn't have one embedded
4805 * but we can ignore those.
4807 if (ret == -ENOENT) {
4808 memset(&caps_cmd, 0, sizeof(caps_cmd));
4809 caps_cmd.op_to_write =
4810 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4813 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4814 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4815 sizeof(caps_cmd), &caps_cmd);
4816 config_name = "Firmware Default";
4823 finiver = ntohl(caps_cmd.finiver);
4824 finicsum = ntohl(caps_cmd.finicsum);
4825 cfcsum = ntohl(caps_cmd.cfcsum);
4826 if (finicsum != cfcsum)
4827 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4828 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4832 * And now tell the firmware to use the configuration we just loaded.
4834 caps_cmd.op_to_write =
4835 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4838 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4839 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4845 * Tweak configuration based on system architecture, module
4848 ret = adap_init0_tweaks(adapter);
4853 * And finally tell the firmware to initialize itself using the
4854 * parameters from the Configuration File.
4856 ret = t4_fw_initialize(adapter, adapter->mbox);
4861 * Return successfully and note that we're operating with parameters
4862 * not supplied by the driver, rather than from hard-wired
4863 * initialization constants burried in the driver.
4865 adapter->flags |= USING_SOFT_PARAMS;
4866 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4867 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4868 config_name, finiver, cfcsum);
4872 * Something bad happened. Return the error ... (If the "error"
4873 * is that there's no Configuration File on the adapter we don't
4874 * want to issue a warning since this is fairly common.)
4877 if (config_issued && ret != -ENOENT)
4878 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4884 * Attempt to initialize the adapter via hard-coded, driver supplied
4887 static int adap_init0_no_config(struct adapter *adapter, int reset)
4889 struct sge *s = &adapter->sge;
4890 struct fw_caps_config_cmd caps_cmd;
4895 * Reset device if necessary
4898 ret = t4_fw_reset(adapter, adapter->mbox,
4899 PIORSTMODE | PIORST);
4905 * Get device capabilities and select which we'll be using.
4907 memset(&caps_cmd, 0, sizeof(caps_cmd));
4908 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4909 FW_CMD_REQUEST | FW_CMD_READ);
4910 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4911 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4916 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4918 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4920 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4921 } else if (vf_acls) {
4922 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4925 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4926 FW_CMD_REQUEST | FW_CMD_WRITE);
4927 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4933 * Tweak configuration based on system architecture, module
4936 ret = adap_init0_tweaks(adapter);
4941 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4942 * mode which maps each Virtual Interface to its own section of
4943 * the RSS Table and we turn on all map and hash enables ...
4945 adapter->flags |= RSS_TNLALLLOOKUP;
4946 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4947 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4948 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4949 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4950 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4951 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4956 * Set up our own fundamental resource provisioning ...
4958 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4959 PFRES_NEQ, PFRES_NETHCTRL,
4960 PFRES_NIQFLINT, PFRES_NIQ,
4961 PFRES_TC, PFRES_NVI,
4962 FW_PFVF_CMD_CMASK_MASK,
4963 pfvfres_pmask(adapter, adapter->fn, 0),
4965 PFRES_R_CAPS, PFRES_WX_CAPS);
4970 * Perform low level SGE initialization. We need to do this before we
4971 * send the firmware the INITIALIZE command because that will cause
4972 * any other PF Drivers which are waiting for the Master
4973 * Initialization to proceed forward.
4975 for (i = 0; i < SGE_NTIMERS - 1; i++)
4976 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4977 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4978 s->counter_val[0] = 1;
4979 for (i = 1; i < SGE_NCOUNTERS; i++)
4980 s->counter_val[i] = min(intr_cnt[i - 1],
4981 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4982 t4_sge_init(adapter);
4984 #ifdef CONFIG_PCI_IOV
4986 * Provision resource limits for Virtual Functions. We currently
4987 * grant them all the same static resource limits except for the Port
4988 * Access Rights Mask which we're assigning based on the PF. All of
4989 * the static provisioning stuff for both the PF and VF really needs
4990 * to be managed in a persistent manner for each device which the
4991 * firmware controls.
4996 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4997 if (num_vf[pf] <= 0)
5000 /* VF numbering starts at 1! */
5001 for (vf = 1; vf <= num_vf[pf]; vf++) {
5002 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5004 VFRES_NEQ, VFRES_NETHCTRL,
5005 VFRES_NIQFLINT, VFRES_NIQ,
5006 VFRES_TC, VFRES_NVI,
5007 FW_PFVF_CMD_CMASK_MASK,
5011 VFRES_R_CAPS, VFRES_WX_CAPS);
5013 dev_warn(adapter->pdev_dev,
5015 "provision pf/vf=%d/%d; "
5016 "err=%d\n", pf, vf, ret);
5023 * Set up the default filter mode. Later we'll want to implement this
5024 * via a firmware command, etc. ... This needs to be done before the
5025 * firmare initialization command ... If the selected set of fields
5026 * isn't equal to the default value, we'll need to make sure that the
5027 * field selections will fit in the 36-bit budget.
5029 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5032 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5033 switch (tp_vlan_pri_map & (1 << j)) {
5035 /* compressed filter field not enabled */
5055 case ETHERTYPE_MASK:
5061 case MPSHITTYPE_MASK:
5064 case FRAGMENTATION_MASK:
5070 dev_err(adapter->pdev_dev,
5071 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5072 " using %#x\n", tp_vlan_pri_map, bits,
5073 TP_VLAN_PRI_MAP_DEFAULT);
5074 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5077 v = tp_vlan_pri_map;
5078 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5079 &v, 1, TP_VLAN_PRI_MAP);
5082 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5083 * to support any of the compressed filter fields above. Newer
5084 * versions of the firmware do this automatically but it doesn't hurt
5085 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5086 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5087 * since the firmware automatically turns this on and off when we have
5088 * a non-zero number of filters active (since it does have a
5089 * performance impact).
5091 if (tp_vlan_pri_map)
5092 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5093 FIVETUPLELOOKUP_MASK,
5094 FIVETUPLELOOKUP_MASK);
5097 * Tweak some settings.
5099 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5100 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5101 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5102 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5105 * Get basic stuff going by issuing the Firmware Initialize command.
5106 * Note that this _must_ be after all PFVF commands ...
5108 ret = t4_fw_initialize(adapter, adapter->mbox);
5113 * Return successfully!
5115 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5116 "driver parameters\n");
5120 * Something bad happened. Return the error ...
5126 static struct fw_info fw_info_array[] = {
5129 .fs_name = FW4_CFNAME,
5130 .fw_mod_name = FW4_FNAME,
5132 .chip = FW_HDR_CHIP_T4,
5133 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5134 .intfver_nic = FW_INTFVER(T4, NIC),
5135 .intfver_vnic = FW_INTFVER(T4, VNIC),
5136 .intfver_ri = FW_INTFVER(T4, RI),
5137 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5138 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5142 .fs_name = FW5_CFNAME,
5143 .fw_mod_name = FW5_FNAME,
5145 .chip = FW_HDR_CHIP_T5,
5146 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5147 .intfver_nic = FW_INTFVER(T5, NIC),
5148 .intfver_vnic = FW_INTFVER(T5, VNIC),
5149 .intfver_ri = FW_INTFVER(T5, RI),
5150 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5151 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5156 static struct fw_info *find_fw_info(int chip)
5160 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5161 if (fw_info_array[i].chip == chip)
5162 return &fw_info_array[i];
5168 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5170 static int adap_init0(struct adapter *adap)
5174 enum dev_state state;
5175 u32 params[7], val[7];
5176 struct fw_caps_config_cmd caps_cmd;
5180 * Contact FW, advertising Master capability (and potentially forcing
5181 * ourselves as the Master PF if our module parameter force_init is
5184 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5185 force_init ? MASTER_MUST : MASTER_MAY,
5188 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5192 if (ret == adap->mbox)
5193 adap->flags |= MASTER_PF;
5194 if (force_init && state == DEV_STATE_INIT)
5195 state = DEV_STATE_UNINIT;
5198 * If we're the Master PF Driver and the device is uninitialized,
5199 * then let's consider upgrading the firmware ... (We always want
5200 * to check the firmware version number in order to A. get it for
5201 * later reporting and B. to warn if the currently loaded firmware
5202 * is excessively mismatched relative to the driver.)
5204 t4_get_fw_version(adap, &adap->params.fw_vers);
5205 t4_get_tp_version(adap, &adap->params.tp_vers);
5206 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5207 struct fw_info *fw_info;
5208 struct fw_hdr *card_fw;
5209 const struct firmware *fw;
5210 const u8 *fw_data = NULL;
5211 unsigned int fw_size = 0;
5213 /* This is the firmware whose headers the driver was compiled
5216 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5217 if (fw_info == NULL) {
5218 dev_err(adap->pdev_dev,
5219 "unable to get firmware info for chip %d.\n",
5220 CHELSIO_CHIP_VERSION(adap->params.chip));
5224 /* allocate memory to read the header of the firmware on the
5227 card_fw = t4_alloc_mem(sizeof(*card_fw));
5229 /* Get FW from from /lib/firmware/ */
5230 ret = request_firmware(&fw, fw_info->fw_mod_name,
5233 dev_err(adap->pdev_dev,
5234 "unable to load firmware image %s, error %d\n",
5235 fw_info->fw_mod_name, ret);
5241 /* upgrade FW logic */
5242 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5247 release_firmware(fw);
5248 t4_free_mem(card_fw);
5255 * Grab VPD parameters. This should be done after we establish a
5256 * connection to the firmware since some of the VPD parameters
5257 * (notably the Core Clock frequency) are retrieved via requests to
5258 * the firmware. On the other hand, we need these fairly early on
5259 * so we do this right after getting ahold of the firmware.
5261 ret = get_vpd_params(adap, &adap->params.vpd);
5266 * Find out what ports are available to us. Note that we need to do
5267 * this before calling adap_init0_no_config() since it needs nports
5271 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5272 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5273 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5277 adap->params.nports = hweight32(port_vec);
5278 adap->params.portvec = port_vec;
5281 * If the firmware is initialized already (and we're not forcing a
5282 * master initialization), note that we're living with existing
5283 * adapter parameters. Otherwise, it's time to try initializing the
5286 if (state == DEV_STATE_INIT) {
5287 dev_info(adap->pdev_dev, "Coming up as %s: "\
5288 "Adapter already initialized\n",
5289 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5290 adap->flags |= USING_SOFT_PARAMS;
5292 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5293 "Initializing adapter\n");
5296 * If the firmware doesn't support Configuration
5297 * Files warn user and exit,
5300 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5301 "configuration file.\n");
5303 ret = adap_init0_no_config(adap, reset);
5306 * Find out whether we're dealing with a version of
5307 * the firmware which has configuration file support.
5309 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5310 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5311 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5315 * If the firmware doesn't support Configuration
5316 * Files, use the old Driver-based, hard-wired
5317 * initialization. Otherwise, try using the
5318 * Configuration File support and fall back to the
5319 * Driver-based initialization if there's no
5320 * Configuration File found.
5323 ret = adap_init0_no_config(adap, reset);
5326 * The firmware provides us with a memory
5327 * buffer where we can load a Configuration
5328 * File from the host if we want to override
5329 * the Configuration File in flash.
5332 ret = adap_init0_config(adap, reset);
5333 if (ret == -ENOENT) {
5334 dev_info(adap->pdev_dev,
5335 "No Configuration File present "
5336 "on adapter. Using hard-wired "
5337 "configuration parameters.\n");
5338 ret = adap_init0_no_config(adap, reset);
5343 dev_err(adap->pdev_dev,
5344 "could not initialize adapter, error %d\n",
5351 * If we're living with non-hard-coded parameters (either from a
5352 * Firmware Configuration File or values programmed by a different PF
5353 * Driver), give the SGE code a chance to pull in anything that it
5354 * needs ... Note that this must be called after we retrieve our VPD
5355 * parameters in order to know how to convert core ticks to seconds.
5357 if (adap->flags & USING_SOFT_PARAMS) {
5358 ret = t4_sge_init(adap);
5363 if (is_bypass_device(adap->pdev->device))
5364 adap->params.bypass = 1;
5367 * Grab some of our basic fundamental operating parameters.
5369 #define FW_PARAM_DEV(param) \
5370 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5371 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5373 #define FW_PARAM_PFVF(param) \
5374 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5375 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5376 FW_PARAMS_PARAM_Y(0) | \
5377 FW_PARAMS_PARAM_Z(0)
5379 params[0] = FW_PARAM_PFVF(EQ_START);
5380 params[1] = FW_PARAM_PFVF(L2T_START);
5381 params[2] = FW_PARAM_PFVF(L2T_END);
5382 params[3] = FW_PARAM_PFVF(FILTER_START);
5383 params[4] = FW_PARAM_PFVF(FILTER_END);
5384 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5385 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5388 adap->sge.egr_start = val[0];
5389 adap->l2t_start = val[1];
5390 adap->l2t_end = val[2];
5391 adap->tids.ftid_base = val[3];
5392 adap->tids.nftids = val[4] - val[3] + 1;
5393 adap->sge.ingr_start = val[5];
5395 /* query params related to active filter region */
5396 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5397 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5398 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5399 /* If Active filter size is set we enable establishing
5400 * offload connection through firmware work request
5402 if ((val[0] != val[1]) && (ret >= 0)) {
5403 adap->flags |= FW_OFLD_CONN;
5404 adap->tids.aftid_base = val[0];
5405 adap->tids.aftid_end = val[1];
5408 /* If we're running on newer firmware, let it know that we're
5409 * prepared to deal with encapsulated CPL messages. Older
5410 * firmware won't understand this and we'll just get
5411 * unencapsulated messages ...
5413 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5415 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5418 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5419 * capability. Earlier versions of the firmware didn't have the
5420 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5421 * permission to use ULPTX MEMWRITE DSGL.
5423 if (is_t4(adap->params.chip)) {
5424 adap->params.ulptx_memwrite_dsgl = false;
5426 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5427 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5429 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5433 * Get device capabilities so we can determine what resources we need
5436 memset(&caps_cmd, 0, sizeof(caps_cmd));
5437 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5438 FW_CMD_REQUEST | FW_CMD_READ);
5439 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5440 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5445 if (caps_cmd.ofldcaps) {
5446 /* query offload-related parameters */
5447 params[0] = FW_PARAM_DEV(NTID);
5448 params[1] = FW_PARAM_PFVF(SERVER_START);
5449 params[2] = FW_PARAM_PFVF(SERVER_END);
5450 params[3] = FW_PARAM_PFVF(TDDP_START);
5451 params[4] = FW_PARAM_PFVF(TDDP_END);
5452 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5453 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5457 adap->tids.ntids = val[0];
5458 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5459 adap->tids.stid_base = val[1];
5460 adap->tids.nstids = val[2] - val[1] + 1;
5462 * Setup server filter region. Divide the availble filter
5463 * region into two parts. Regular filters get 1/3rd and server
5464 * filters get 2/3rd part. This is only enabled if workarond
5466 * 1. For regular filters.
5467 * 2. Server filter: This are special filters which are used
5468 * to redirect SYN packets to offload queue.
5470 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5471 adap->tids.sftid_base = adap->tids.ftid_base +
5472 DIV_ROUND_UP(adap->tids.nftids, 3);
5473 adap->tids.nsftids = adap->tids.nftids -
5474 DIV_ROUND_UP(adap->tids.nftids, 3);
5475 adap->tids.nftids = adap->tids.sftid_base -
5476 adap->tids.ftid_base;
5478 adap->vres.ddp.start = val[3];
5479 adap->vres.ddp.size = val[4] - val[3] + 1;
5480 adap->params.ofldq_wr_cred = val[5];
5482 adap->params.offload = 1;
5484 if (caps_cmd.rdmacaps) {
5485 params[0] = FW_PARAM_PFVF(STAG_START);
5486 params[1] = FW_PARAM_PFVF(STAG_END);
5487 params[2] = FW_PARAM_PFVF(RQ_START);
5488 params[3] = FW_PARAM_PFVF(RQ_END);
5489 params[4] = FW_PARAM_PFVF(PBL_START);
5490 params[5] = FW_PARAM_PFVF(PBL_END);
5491 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5495 adap->vres.stag.start = val[0];
5496 adap->vres.stag.size = val[1] - val[0] + 1;
5497 adap->vres.rq.start = val[2];
5498 adap->vres.rq.size = val[3] - val[2] + 1;
5499 adap->vres.pbl.start = val[4];
5500 adap->vres.pbl.size = val[5] - val[4] + 1;
5502 params[0] = FW_PARAM_PFVF(SQRQ_START);
5503 params[1] = FW_PARAM_PFVF(SQRQ_END);
5504 params[2] = FW_PARAM_PFVF(CQ_START);
5505 params[3] = FW_PARAM_PFVF(CQ_END);
5506 params[4] = FW_PARAM_PFVF(OCQ_START);
5507 params[5] = FW_PARAM_PFVF(OCQ_END);
5508 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5511 adap->vres.qp.start = val[0];
5512 adap->vres.qp.size = val[1] - val[0] + 1;
5513 adap->vres.cq.start = val[2];
5514 adap->vres.cq.size = val[3] - val[2] + 1;
5515 adap->vres.ocq.start = val[4];
5516 adap->vres.ocq.size = val[5] - val[4] + 1;
5518 if (caps_cmd.iscsicaps) {
5519 params[0] = FW_PARAM_PFVF(ISCSI_START);
5520 params[1] = FW_PARAM_PFVF(ISCSI_END);
5521 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5525 adap->vres.iscsi.start = val[0];
5526 adap->vres.iscsi.size = val[1] - val[0] + 1;
5528 #undef FW_PARAM_PFVF
5532 * These are finalized by FW initialization, load their values now.
5534 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5535 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5536 adap->params.b_wnd);
5538 t4_init_tp_params(adap);
5539 adap->flags |= FW_OK;
5543 * Something bad happened. If a command timed out or failed with EIO
5544 * FW does not operate within its spec or something catastrophic
5545 * happened to HW/FW, stop issuing commands.
5548 if (ret != -ETIMEDOUT && ret != -EIO)
5549 t4_fw_bye(adap, adap->mbox);
5555 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5556 pci_channel_state_t state)
5559 struct adapter *adap = pci_get_drvdata(pdev);
5565 adap->flags &= ~FW_OK;
5566 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5567 spin_lock(&adap->stats_lock);
5568 for_each_port(adap, i) {
5569 struct net_device *dev = adap->port[i];
5571 netif_device_detach(dev);
5572 netif_carrier_off(dev);
5574 spin_unlock(&adap->stats_lock);
5575 if (adap->flags & FULL_INIT_DONE)
5578 if ((adap->flags & DEV_ENABLED)) {
5579 pci_disable_device(pdev);
5580 adap->flags &= ~DEV_ENABLED;
5582 out: return state == pci_channel_io_perm_failure ?
5583 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5586 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5589 struct fw_caps_config_cmd c;
5590 struct adapter *adap = pci_get_drvdata(pdev);
5593 pci_restore_state(pdev);
5594 pci_save_state(pdev);
5595 return PCI_ERS_RESULT_RECOVERED;
5598 if (!(adap->flags & DEV_ENABLED)) {
5599 if (pci_enable_device(pdev)) {
5600 dev_err(&pdev->dev, "Cannot reenable PCI "
5601 "device after reset\n");
5602 return PCI_ERS_RESULT_DISCONNECT;
5604 adap->flags |= DEV_ENABLED;
5607 pci_set_master(pdev);
5608 pci_restore_state(pdev);
5609 pci_save_state(pdev);
5610 pci_cleanup_aer_uncorrect_error_status(pdev);
5612 if (t4_wait_dev_ready(adap) < 0)
5613 return PCI_ERS_RESULT_DISCONNECT;
5614 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5615 return PCI_ERS_RESULT_DISCONNECT;
5616 adap->flags |= FW_OK;
5617 if (adap_init1(adap, &c))
5618 return PCI_ERS_RESULT_DISCONNECT;
5620 for_each_port(adap, i) {
5621 struct port_info *p = adap2pinfo(adap, i);
5623 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5626 return PCI_ERS_RESULT_DISCONNECT;
5628 p->xact_addr_filt = -1;
5631 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5632 adap->params.b_wnd);
5635 return PCI_ERS_RESULT_DISCONNECT;
5636 return PCI_ERS_RESULT_RECOVERED;
5639 static void eeh_resume(struct pci_dev *pdev)
5642 struct adapter *adap = pci_get_drvdata(pdev);
5648 for_each_port(adap, i) {
5649 struct net_device *dev = adap->port[i];
5651 if (netif_running(dev)) {
5653 cxgb_set_rxmode(dev);
5655 netif_device_attach(dev);
5660 static const struct pci_error_handlers cxgb4_eeh = {
5661 .error_detected = eeh_err_detected,
5662 .slot_reset = eeh_slot_reset,
5663 .resume = eeh_resume,
5666 static inline bool is_x_10g_port(const struct link_config *lc)
5668 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5672 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5673 unsigned int size, unsigned int iqe_size)
5675 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5676 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5677 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5678 q->iqe_len = iqe_size;
5683 * Perform default configuration of DMA queues depending on the number and type
5684 * of ports we found and the number of available CPUs. Most settings can be
5685 * modified by the admin prior to actual use.
5687 static void cfg_queues(struct adapter *adap)
5689 struct sge *s = &adap->sge;
5690 int i, q10g = 0, n10g = 0, qidx = 0;
5692 for_each_port(adap, i)
5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5696 * We default to 1 queue per non-10G port and up to # of cores queues
5700 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5701 if (q10g > netif_get_num_default_rss_queues())
5702 q10g = netif_get_num_default_rss_queues();
5704 for_each_port(adap, i) {
5705 struct port_info *pi = adap2pinfo(adap, i);
5707 pi->first_qset = qidx;
5708 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5713 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5715 if (is_offload(adap)) {
5717 * For offload we use 1 queue/channel if all ports are up to 1G,
5718 * otherwise we divide all available queues amongst the channels
5719 * capped by the number of available cores.
5722 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5724 s->ofldqsets = roundup(i, adap->params.nports);
5726 s->ofldqsets = adap->params.nports;
5727 /* For RDMA one Rx queue per channel suffices */
5728 s->rdmaqs = adap->params.nports;
5731 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5732 struct sge_eth_rxq *r = &s->ethrxq[i];
5734 init_rspq(&r->rspq, 0, 0, 1024, 64);
5738 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5739 s->ethtxq[i].q.size = 1024;
5741 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5742 s->ctrlq[i].q.size = 512;
5744 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5745 s->ofldtxq[i].q.size = 1024;
5747 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5748 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5750 init_rspq(&r->rspq, 0, 0, 1024, 64);
5751 r->rspq.uld = CXGB4_ULD_ISCSI;
5755 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5756 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5758 init_rspq(&r->rspq, 0, 0, 511, 64);
5759 r->rspq.uld = CXGB4_ULD_RDMA;
5763 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5764 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5768 * Reduce the number of Ethernet queues across all ports to at most n.
5769 * n provides at least one queue per port.
5771 static void reduce_ethqs(struct adapter *adap, int n)
5774 struct port_info *pi;
5776 while (n < adap->sge.ethqsets)
5777 for_each_port(adap, i) {
5778 pi = adap2pinfo(adap, i);
5779 if (pi->nqsets > 1) {
5781 adap->sge.ethqsets--;
5782 if (adap->sge.ethqsets <= n)
5788 for_each_port(adap, i) {
5789 pi = adap2pinfo(adap, i);
5795 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5796 #define EXTRA_VECS 2
5798 static int enable_msix(struct adapter *adap)
5802 struct sge *s = &adap->sge;
5803 unsigned int nchan = adap->params.nports;
5804 struct msix_entry entries[MAX_INGQ + 1];
5806 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5807 entries[i].entry = i;
5809 want = s->max_ethqsets + EXTRA_VECS;
5810 if (is_offload(adap)) {
5811 want += s->rdmaqs + s->ofldqsets;
5812 /* need nchan for each possible ULD */
5813 ofld_need = 2 * nchan;
5815 need = adap->params.nports + EXTRA_VECS + ofld_need;
5817 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5822 * Distribute available vectors to the various queue groups.
5823 * Every group gets its minimum requirement and NIC gets top
5824 * priority for leftovers.
5826 i = want - EXTRA_VECS - ofld_need;
5827 if (i < s->max_ethqsets) {
5828 s->max_ethqsets = i;
5829 if (i < s->ethqsets)
5830 reduce_ethqs(adap, i);
5832 if (is_offload(adap)) {
5833 i = want - EXTRA_VECS - s->max_ethqsets;
5834 i -= ofld_need - nchan;
5835 s->ofldqsets = (i / nchan) * nchan; /* round down */
5837 for (i = 0; i < want; ++i)
5838 adap->msix_info[i].vec = entries[i].vector;
5845 static int init_rss(struct adapter *adap)
5849 for_each_port(adap, i) {
5850 struct port_info *pi = adap2pinfo(adap, i);
5852 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5855 for (j = 0; j < pi->rss_size; j++)
5856 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5861 static void print_port_info(const struct net_device *dev)
5865 const char *spd = "";
5866 const struct port_info *pi = netdev_priv(dev);
5867 const struct adapter *adap = pi->adapter;
5869 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5873 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5877 bufp += sprintf(bufp, "100/");
5878 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5879 bufp += sprintf(bufp, "1000/");
5880 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5881 bufp += sprintf(bufp, "10G/");
5882 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5883 bufp += sprintf(bufp, "40G/");
5886 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5888 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5889 adap->params.vpd.id,
5890 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5891 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5892 (adap->flags & USING_MSIX) ? " MSI-X" :
5893 (adap->flags & USING_MSI) ? " MSI" : "");
5894 netdev_info(dev, "S/N: %s, P/N: %s\n",
5895 adap->params.vpd.sn, adap->params.vpd.pn);
5898 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5900 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5904 * Free the following resources:
5905 * - memory used for tables
5908 * - resources FW is holding for us
5910 static void free_some_resources(struct adapter *adapter)
5914 t4_free_mem(adapter->l2t);
5915 t4_free_mem(adapter->tids.tid_tab);
5916 disable_msi(adapter);
5918 for_each_port(adapter, i)
5919 if (adapter->port[i]) {
5920 kfree(adap2pinfo(adapter, i)->rss);
5921 free_netdev(adapter->port[i]);
5923 if (adapter->flags & FW_OK)
5924 t4_fw_bye(adapter, adapter->fn);
5927 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5928 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5929 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5930 #define SEGMENT_SIZE 128
5932 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5934 int func, i, err, s_qpp, qpp, num_seg;
5935 struct port_info *pi;
5936 bool highdma = false;
5937 struct adapter *adapter = NULL;
5939 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5941 err = pci_request_regions(pdev, KBUILD_MODNAME);
5943 /* Just info, some other driver may have claimed the device. */
5944 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5948 /* We control everything through one PF */
5949 func = PCI_FUNC(pdev->devfn);
5950 if (func != ent->driver_data) {
5951 pci_save_state(pdev); /* to restore SR-IOV later */
5955 err = pci_enable_device(pdev);
5957 dev_err(&pdev->dev, "cannot enable PCI device\n");
5958 goto out_release_regions;
5961 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5963 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5965 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5966 "coherent allocations\n");
5967 goto out_disable_device;
5970 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5972 dev_err(&pdev->dev, "no usable DMA configuration\n");
5973 goto out_disable_device;
5977 pci_enable_pcie_error_reporting(pdev);
5978 enable_pcie_relaxed_ordering(pdev);
5979 pci_set_master(pdev);
5980 pci_save_state(pdev);
5982 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5985 goto out_disable_device;
5988 /* PCI device has been enabled */
5989 adapter->flags |= DEV_ENABLED;
5991 adapter->regs = pci_ioremap_bar(pdev, 0);
5992 if (!adapter->regs) {
5993 dev_err(&pdev->dev, "cannot map device registers\n");
5995 goto out_free_adapter;
5998 adapter->pdev = pdev;
5999 adapter->pdev_dev = &pdev->dev;
6000 adapter->mbox = func;
6002 adapter->msg_enable = dflt_msg_enable;
6003 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6005 spin_lock_init(&adapter->stats_lock);
6006 spin_lock_init(&adapter->tid_release_lock);
6008 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6009 INIT_WORK(&adapter->db_full_task, process_db_full);
6010 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6012 err = t4_prep_adapter(adapter);
6014 goto out_unmap_bar0;
6016 if (!is_t4(adapter->params.chip)) {
6017 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6018 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6019 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6020 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6022 /* Each segment size is 128B. Write coalescing is enabled only
6023 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6024 * queue is less no of segments that can be accommodated in
6027 if (qpp > num_seg) {
6029 "Incorrect number of egress queues per page\n");
6031 goto out_unmap_bar0;
6033 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6034 pci_resource_len(pdev, 2));
6035 if (!adapter->bar2) {
6036 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6038 goto out_unmap_bar0;
6042 setup_memwin(adapter);
6043 err = adap_init0(adapter);
6044 setup_memwin_rdma(adapter);
6048 for_each_port(adapter, i) {
6049 struct net_device *netdev;
6051 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6058 SET_NETDEV_DEV(netdev, &pdev->dev);
6060 adapter->port[i] = netdev;
6061 pi = netdev_priv(netdev);
6062 pi->adapter = adapter;
6063 pi->xact_addr_filt = -1;
6065 netdev->irq = pdev->irq;
6067 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6068 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6069 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6070 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6072 netdev->hw_features |= NETIF_F_HIGHDMA;
6073 netdev->features |= netdev->hw_features;
6074 netdev->vlan_features = netdev->features & VLAN_FEAT;
6076 netdev->priv_flags |= IFF_UNICAST_FLT;
6078 netdev->netdev_ops = &cxgb4_netdev_ops;
6079 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6082 pci_set_drvdata(pdev, adapter);
6084 if (adapter->flags & FW_OK) {
6085 err = t4_port_init(adapter, func, func, 0);
6091 * Configure queues and allocate tables now, they can be needed as
6092 * soon as the first register_netdev completes.
6094 cfg_queues(adapter);
6096 adapter->l2t = t4_init_l2t();
6097 if (!adapter->l2t) {
6098 /* We tolerate a lack of L2T, giving up some functionality */
6099 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6100 adapter->params.offload = 0;
6103 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6104 dev_warn(&pdev->dev, "could not allocate TID table, "
6106 adapter->params.offload = 0;
6109 /* See what interrupts we'll be using */
6110 if (msi > 1 && enable_msix(adapter) == 0)
6111 adapter->flags |= USING_MSIX;
6112 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6113 adapter->flags |= USING_MSI;
6115 err = init_rss(adapter);
6120 * The card is now ready to go. If any errors occur during device
6121 * registration we do not fail the whole card but rather proceed only
6122 * with the ports we manage to register successfully. However we must
6123 * register at least one net device.
6125 for_each_port(adapter, i) {
6126 pi = adap2pinfo(adapter, i);
6127 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6128 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6130 err = register_netdev(adapter->port[i]);
6133 adapter->chan_map[pi->tx_chan] = i;
6134 print_port_info(adapter->port[i]);
6137 dev_err(&pdev->dev, "could not register any net devices\n");
6141 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6145 if (cxgb4_debugfs_root) {
6146 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6147 cxgb4_debugfs_root);
6148 setup_debugfs(adapter);
6151 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6152 pdev->needs_freset = 1;
6154 if (is_offload(adapter))
6155 attach_ulds(adapter);
6158 #ifdef CONFIG_PCI_IOV
6159 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6160 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6161 dev_info(&pdev->dev,
6162 "instantiated %u virtual functions\n",
6168 free_some_resources(adapter);
6170 if (!is_t4(adapter->params.chip))
6171 iounmap(adapter->bar2);
6173 iounmap(adapter->regs);
6177 pci_disable_pcie_error_reporting(pdev);
6178 pci_disable_device(pdev);
6179 out_release_regions:
6180 pci_release_regions(pdev);
6184 static void remove_one(struct pci_dev *pdev)
6186 struct adapter *adapter = pci_get_drvdata(pdev);
6188 #ifdef CONFIG_PCI_IOV
6189 pci_disable_sriov(pdev);
6196 if (is_offload(adapter))
6197 detach_ulds(adapter);
6199 for_each_port(adapter, i)
6200 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6201 unregister_netdev(adapter->port[i]);
6203 if (adapter->debugfs_root)
6204 debugfs_remove_recursive(adapter->debugfs_root);
6206 /* If we allocated filters, free up state associated with any
6209 if (adapter->tids.ftid_tab) {
6210 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6211 for (i = 0; i < (adapter->tids.nftids +
6212 adapter->tids.nsftids); i++, f++)
6214 clear_filter(adapter, f);
6217 if (adapter->flags & FULL_INIT_DONE)
6220 free_some_resources(adapter);
6221 iounmap(adapter->regs);
6222 if (!is_t4(adapter->params.chip))
6223 iounmap(adapter->bar2);
6224 pci_disable_pcie_error_reporting(pdev);
6225 if ((adapter->flags & DEV_ENABLED)) {
6226 pci_disable_device(pdev);
6227 adapter->flags &= ~DEV_ENABLED;
6229 pci_release_regions(pdev);
6232 pci_release_regions(pdev);
6235 static struct pci_driver cxgb4_driver = {
6236 .name = KBUILD_MODNAME,
6237 .id_table = cxgb4_pci_tbl,
6239 .remove = remove_one,
6240 .shutdown = remove_one,
6241 .err_handler = &cxgb4_eeh,
6244 static int __init cxgb4_init_module(void)
6248 workq = create_singlethread_workqueue("cxgb4");
6252 /* Debugfs support is optional, just warn if this fails */
6253 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6254 if (!cxgb4_debugfs_root)
6255 pr_warn("could not create debugfs entry, continuing\n");
6257 ret = pci_register_driver(&cxgb4_driver);
6259 debugfs_remove(cxgb4_debugfs_root);
6260 destroy_workqueue(workq);
6263 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6268 static void __exit cxgb4_cleanup_module(void)
6270 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6271 pci_unregister_driver(&cxgb4_driver);
6272 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6273 flush_workqueue(workq);
6274 destroy_workqueue(workq);
6277 module_init(cxgb4_init_module);
6278 module_exit(cxgb4_cleanup_module);