2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
226 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
244 static const struct mdio_ops mi1_mdio_ops = {
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
254 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
268 * MI1 read/write operations for indirect-addressed PHYs.
270 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284 ret = t3_read_reg(adapter, A_MI1_DATA);
286 mutex_unlock(&adapter->mdio_lock);
290 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305 mutex_unlock(&adapter->mdio_lock);
309 static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
326 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
332 ret = t3_mdio_read(phy, mmd, reg, &val);
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364 ctl &= MDIO_CTRL1_RESET;
367 } while (ctl && --wait);
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
380 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383 unsigned int val = 0;
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
423 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
425 unsigned int val = 0;
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
447 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 int t3_phy_lasi_intr_enable(struct cphy *phy)
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
479 int t3_phy_lasi_intr_disable(struct cphy *phy)
481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484 int t3_phy_lasi_intr_clear(struct cphy *phy)
488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491 int t3_phy_lasi_intr_handler(struct cphy *phy)
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502 static const struct adapter_info t3_adap_info[] = {
504 F_GPIO2_OEN | F_GPIO4_OEN |
505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506 &mi1_mdio_ops, "Chelsio PE9000"},
508 F_GPIO2_OEN | F_GPIO4_OEN |
509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510 &mi1_mdio_ops, "Chelsio T302"},
512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515 &mi1_mdio_ext_ops, "Chelsio T310"},
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T320"},
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
537 * Return the adapter_info structure with a given index. Out-of-range indices
540 const struct adapter_info *t3_get_adapter_info(unsigned int id)
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545 struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
550 static const struct port_type_info port_types[] = {
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
555 { t3_xaui_direct_phy_prep },
556 { t3_ael2005_phy_prep },
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
564 #define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
596 #define EEPROM_MAX_POLL 40
597 #define EEPROM_STAT_ADDR 0x4000
598 #define VPD_BASE 0xc00
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
611 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614 int attempts = EEPROM_MAX_POLL;
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
645 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
675 * Enables or disables write protection on the serial EEPROM.
677 int t3_seeprom_wp(struct adapter *adapter, int enable)
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
683 * Convert a character holding a hex digit to a number.
685 static unsigned int hex2int(unsigned char c)
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
691 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read
693 * @p: where to store the parameters
695 * Reads card parameters stored in VPD EEPROM.
697 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
703 * Card information is normally at VPD_BASE but some early cards had
706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
713 (__le32 *)((u8 *)&vpd + i));
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
725 /* Old eeproms didn't have port information */
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
742 /* serial flash and firmware constants */
744 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
745 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
746 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
748 /* flash command opcodes */
749 SF_PROG_PAGE = 2, /* program page */
750 SF_WR_DISABLE = 4, /* disable writes */
751 SF_RD_STATUS = 5, /* read status register */
752 SF_WR_ENABLE = 6, /* enable writes */
753 SF_RD_DATA_FAST = 0xb, /* read flash */
754 SF_ERASE_SECTOR = 0xd8, /* erase sector */
756 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
757 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
758 FW_MIN_SIZE = 8 /* at least version and csum */
762 * sf1_read - read data from the serial flash
763 * @adapter: the adapter
764 * @byte_cnt: number of bytes to read
765 * @cont: whether another operation will be chained
766 * @valp: where to store the read data
768 * Reads up to 4 bytes of data from the serial flash. The location of
769 * the read needs to be specified prior to calling this by issuing the
770 * appropriate commands to the serial flash.
772 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
777 if (!byte_cnt || byte_cnt > 4)
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
784 *valp = t3_read_reg(adapter, A_SF_DATA);
789 * sf1_write - write data to the serial flash
790 * @adapter: the adapter
791 * @byte_cnt: number of bytes to write
792 * @cont: whether another operation will be chained
793 * @val: value to write
795 * Writes up to 4 bytes of data to the serial flash. The location of
796 * the write needs to be specified prior to calling this by issuing the
797 * appropriate commands to the serial flash.
799 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
802 if (!byte_cnt || byte_cnt > 4)
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
813 * flash_wait_op - wait for a flash operation to complete
814 * @adapter: the adapter
815 * @attempts: max number of polls of the status register
816 * @delay: delay between polls in ms
818 * Wait for a flash operation to complete by polling the status register.
820 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
839 * t3_read_flash - read words from serial flash
840 * @adapter: the adapter
841 * @addr: the start address for the read
842 * @nwords: how many 32-bit words to read
843 * @data: where to store the read data
844 * @byte_oriented: whether to store data as bytes or as words
846 * Read the specified number of 32-bit words from the serial flash.
847 * If @byte_oriented is set the read data is stored as a byte array
848 * (i.e., big-endian), otherwise as 32-bit words in the platform's
851 int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
859 addr = swab32(addr) | SF_RD_DATA_FAST;
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
870 *data = htonl(*data);
876 * t3_write_flash - write up to a page of data to the serial flash
877 * @adapter: the adapter
878 * @addr: the start address to write
879 * @n: length of data to write
880 * @data: the data to write
882 * Writes up to a page of data (256 bytes) to the serial flash starting
883 * at the given address.
885 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
890 unsigned int i, c, left, val, offset = addr & 0xff;
892 if (addr + n > SF_SIZE || offset + n > 256)
895 val = swab32(addr) | SF_PROG_PAGE;
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
901 for (left = n; left; left -= c) {
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
906 ret = sf1_write(adapter, c, c != left, val);
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
918 if (memcmp(data - n, (u8 *) buf + offset, n))
924 * t3_get_tp_version - read the tp sram version
925 * @adapter: the adapter
926 * @vers: where to place the version
928 * Reads the protocol sram version from sram.
930 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
934 /* Get version loaded in SRAM */
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
947 * t3_check_tpsram_version - read the tp sram version
948 * @adapter: the adapter
950 * Reads the protocol sram version from flash.
952 int t3_check_tpsram_version(struct adapter *adapter)
956 unsigned int major, minor;
958 if (adapter->params.rev == T3_REV_A)
962 ret = t3_get_tp_version(adapter, &vers);
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
980 * t3_check_tpsram - check if provided protocol SRAM
981 * is compatible with this driver
982 * @adapter: the adapter
983 * @tp_sram: the firmware image to write
986 * Checks if an adapter's tp sram is compatible with the driver.
987 * Returns 0 if the versions are compatible, a negative error otherwise.
989 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
994 const __be32 *p = (const __be32 *)tp_sram;
996 /* Verify checksum */
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1008 enum fw_version_type {
1014 * t3_get_fw_version - read the firmware version
1015 * @adapter: the adapter
1016 * @vers: where to place the version
1018 * Reads the FW version from flash.
1020 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1026 * t3_check_fw_version - check if the FW is compatible with this driver
1027 * @adapter: the adapter
1029 * Checks if an adapter's FW is compatible with the driver. Returns 0
1030 * if the versions are compatible, a negative error otherwise.
1032 int t3_check_fw_version(struct adapter *adapter)
1036 unsigned int type, major, minor;
1038 ret = t3_get_fw_version(adapter, &vers);
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1063 * t3_flash_erase_sectors - erase a range of flash sectors
1064 * @adapter: the adapter
1065 * @start: the first sector to erase
1066 * @end: the last sector to erase
1068 * Erases the sectors in the given range.
1070 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1072 while (start <= end) {
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1086 * t3_load_fw - download firmware
1087 * @adapter: the adapter
1088 * @fw_data: the firmware image to write
1091 * Write the supplied firmware image to the card's serial flash.
1092 * The FW image has the following sections: @size - 8 bytes of code and
1093 * data, followed by 4 bytes of FW version, followed by the 32-bit
1094 * 1's complement checksum of the whole image.
1096 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1100 const __be32 *p = (const __be32 *)fw_data;
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1103 if ((size & 3) || size < FW_MIN_SIZE)
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1120 size -= 8; /* trim off version and checksum */
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1129 fw_data += chunk_size;
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1140 #define CIM_CTL_BASE 0x2000
1143 * t3_cim_ctl_blk_read - read a block from CIM control region
1145 * @adap: the adapter
1146 * @addr: the start address within the CIM control region
1147 * @n: number of words to read
1148 * @valp: where to store the result
1150 * Reads a block of 4-byte words from the CIM control region.
1152 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1170 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1173 /* stop Rx unicast traffic */
1174 t3_mac_disable_exact_filters(mac);
1176 /* stop broadcast, multicast, promiscuous mode traffic */
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1188 /* Leave time to drain max RX fifo */
1192 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1204 * t3_link_changed - handle interface link changes
1205 * @adapter: the adapter
1206 * @port_id: the port index that changed link state
1208 * Called when a port's link settings change to propagate the new values
1209 * to the associated PHY and MAC. After performing the common tasks it
1210 * invokes an OS-specific handler.
1212 void t3_link_changed(struct adapter *adapter, int port_id)
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return; /* nothing changed */
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260 /* Set MAC speed, duplex, and flow control to match PHY. */
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1265 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1268 void t3_link_fault(struct adapter *adapter, int port_id)
1270 struct port_info *pi = adap2pinfo(adapter, port_id);
1271 struct cmac *mac = &pi->mac;
1272 struct cphy *phy = &pi->phy;
1273 struct link_config *lc = &pi->link_config;
1274 int link_ok, speed, duplex, fc, link_fault;
1275 u32 rx_cfg, rx_hash_high, rx_hash_low;
1277 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1279 if (adapter->params.rev > 0 && uses_xaui(adapter))
1280 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1282 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1283 t3_mac_enable(mac, MAC_DIRECTION_RX);
1285 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1287 link_fault = t3_read_reg(adapter,
1288 A_XGM_INT_STATUS + mac->offset);
1289 link_fault &= F_LINKFAULTCHANGE;
1291 link_ok = lc->link_ok;
1293 duplex = lc->duplex;
1296 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1300 lc->speed = SPEED_INVALID;
1301 lc->duplex = DUPLEX_INVALID;
1303 t3_os_link_fault(adapter, port_id, 0);
1305 /* Account link faults only when the phy reports a link up */
1307 mac->stats.link_faults++;
1310 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1311 F_TXACTENABLE | F_RXEN);
1314 lc->link_ok = (unsigned char)link_ok;
1315 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1316 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1317 t3_os_link_fault(adapter, port_id, link_ok);
1322 * t3_link_start - apply link configuration to MAC/PHY
1323 * @phy: the PHY to setup
1324 * @mac: the MAC to setup
1325 * @lc: the requested link configuration
1327 * Set up a port's MAC and PHY according to a desired link configuration.
1328 * - If the PHY can auto-negotiate first decide what to advertise, then
1329 * enable/disable auto-negotiation as desired, and reset.
1330 * - If the PHY does not auto-negotiate just reset it.
1331 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1332 * otherwise do it later based on the outcome of auto-negotiation.
1334 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1336 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1339 if (lc->supported & SUPPORTED_Autoneg) {
1340 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1342 lc->advertising |= ADVERTISED_Asym_Pause;
1344 lc->advertising |= ADVERTISED_Pause;
1346 phy->ops->advertise(phy, lc->advertising);
1348 if (lc->autoneg == AUTONEG_DISABLE) {
1349 lc->speed = lc->requested_speed;
1350 lc->duplex = lc->requested_duplex;
1351 lc->fc = (unsigned char)fc;
1352 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1354 /* Also disables autoneg */
1355 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1357 phy->ops->autoneg_enable(phy);
1359 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1360 lc->fc = (unsigned char)fc;
1361 phy->ops->reset(phy, 0);
1367 * t3_set_vlan_accel - control HW VLAN extraction
1368 * @adapter: the adapter
1369 * @ports: bitmap of adapter ports to operate on
1370 * @on: enable (1) or disable (0) HW VLAN extraction
1372 * Enables or disables HW extraction of VLAN tags for the given port.
1374 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1376 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1377 ports << S_VLANEXTRACTIONENABLE,
1378 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1382 unsigned int mask; /* bits to check in interrupt status */
1383 const char *msg; /* message to print or NULL */
1384 short stat_idx; /* stat counter to increment or -1 */
1385 unsigned short fatal; /* whether the condition reported is fatal */
1389 * t3_handle_intr_status - table driven interrupt handler
1390 * @adapter: the adapter that generated the interrupt
1391 * @reg: the interrupt status register to process
1392 * @mask: a mask to apply to the interrupt status
1393 * @acts: table of interrupt actions
1394 * @stats: statistics counters tracking interrupt occurences
1396 * A table driven interrupt handler that applies a set of masks to an
1397 * interrupt status word and performs the corresponding actions if the
1398 * interrupts described by the mask have occured. The actions include
1399 * optionally printing a warning or alert message, and optionally
1400 * incrementing a stat counter. The table is terminated by an entry
1401 * specifying mask 0. Returns the number of fatal interrupt conditions.
1403 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1405 const struct intr_info *acts,
1406 unsigned long *stats)
1409 unsigned int status = t3_read_reg(adapter, reg) & mask;
1411 for (; acts->mask; ++acts) {
1412 if (!(status & acts->mask))
1416 CH_ALERT(adapter, "%s (0x%x)\n",
1417 acts->msg, status & acts->mask);
1418 } else if (acts->msg)
1419 CH_WARN(adapter, "%s (0x%x)\n",
1420 acts->msg, status & acts->mask);
1421 if (acts->stat_idx >= 0)
1422 stats[acts->stat_idx]++;
1424 if (status) /* clear processed interrupts */
1425 t3_write_reg(adapter, reg, status);
1429 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1430 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1431 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1435 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1436 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1437 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1440 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1442 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1443 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1444 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1446 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1447 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1448 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1449 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1450 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1451 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1452 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1453 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1454 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1455 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1456 F_TXPARERR | V_BISTERR(M_BISTERR))
1457 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1458 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1459 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1460 #define ULPTX_INTR_MASK 0xfc
1461 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1462 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1463 F_ZERO_SWITCH_ERROR)
1464 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1465 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1466 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1467 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1468 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1469 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1470 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1471 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1472 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1473 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1474 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1475 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1476 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1477 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1478 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1479 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1480 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1481 V_MCAPARERRENB(M_MCAPARERRENB))
1482 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1483 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1484 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1485 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1486 F_MPS0 | F_CPL_SWITCH)
1488 * Interrupt handler for the PCIX1 module.
1490 static void pci_intr_handler(struct adapter *adapter)
1492 static const struct intr_info pcix1_intr_info[] = {
1493 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1494 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1495 {F_RCVTARABT, "PCI received target abort", -1, 1},
1496 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1497 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1498 {F_DETPARERR, "PCI detected parity error", -1, 1},
1499 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1500 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1501 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1503 {F_DETCORECCERR, "PCI correctable ECC error",
1504 STAT_PCI_CORR_ECC, 0},
1505 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1506 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1507 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1509 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1511 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1513 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1518 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1519 pcix1_intr_info, adapter->irq_stats))
1520 t3_fatal_err(adapter);
1524 * Interrupt handler for the PCIE module.
1526 static void pcie_intr_handler(struct adapter *adapter)
1528 static const struct intr_info pcie_intr_info[] = {
1529 {F_PEXERR, "PCI PEX error", -1, 1},
1531 "PCI unexpected split completion DMA read error", -1, 1},
1533 "PCI unexpected split completion DMA command error", -1, 1},
1534 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1535 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1536 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1537 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1538 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1539 "PCI MSI-X table/PBA parity error", -1, 1},
1540 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1541 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1542 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1543 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1544 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1548 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1549 CH_ALERT(adapter, "PEX error code 0x%x\n",
1550 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1552 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1553 pcie_intr_info, adapter->irq_stats))
1554 t3_fatal_err(adapter);
1558 * TP interrupt handler.
1560 static void tp_intr_handler(struct adapter *adapter)
1562 static const struct intr_info tp_intr_info[] = {
1563 {0xffffff, "TP parity error", -1, 1},
1564 {0x1000000, "TP out of Rx pages", -1, 1},
1565 {0x2000000, "TP out of Tx pages", -1, 1},
1569 static struct intr_info tp_intr_info_t3c[] = {
1570 {0x1fffffff, "TP parity error", -1, 1},
1571 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1572 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1576 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1577 adapter->params.rev < T3_REV_C ?
1578 tp_intr_info : tp_intr_info_t3c, NULL))
1579 t3_fatal_err(adapter);
1583 * CIM interrupt handler.
1585 static void cim_intr_handler(struct adapter *adapter)
1587 static const struct intr_info cim_intr_info[] = {
1588 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1589 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1590 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1591 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1592 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1593 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1594 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1595 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1596 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1597 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1598 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1599 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1600 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1601 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1602 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1603 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1604 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1605 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1606 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1607 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1608 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1609 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1610 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1611 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1615 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1616 cim_intr_info, NULL))
1617 t3_fatal_err(adapter);
1621 * ULP RX interrupt handler.
1623 static void ulprx_intr_handler(struct adapter *adapter)
1625 static const struct intr_info ulprx_intr_info[] = {
1626 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1627 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1628 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1629 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1630 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1631 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1632 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1633 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1637 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1638 ulprx_intr_info, NULL))
1639 t3_fatal_err(adapter);
1643 * ULP TX interrupt handler.
1645 static void ulptx_intr_handler(struct adapter *adapter)
1647 static const struct intr_info ulptx_intr_info[] = {
1648 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1649 STAT_ULP_CH0_PBL_OOB, 0},
1650 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1651 STAT_ULP_CH1_PBL_OOB, 0},
1652 {0xfc, "ULP TX parity error", -1, 1},
1656 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1657 ulptx_intr_info, adapter->irq_stats))
1658 t3_fatal_err(adapter);
1661 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1662 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1663 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1664 F_ICSPI1_TX_FRAMING_ERROR)
1665 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1666 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1667 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1668 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1671 * PM TX interrupt handler.
1673 static void pmtx_intr_handler(struct adapter *adapter)
1675 static const struct intr_info pmtx_intr_info[] = {
1676 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1677 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1678 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1679 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1680 "PMTX ispi parity error", -1, 1},
1681 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1682 "PMTX ospi parity error", -1, 1},
1686 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1687 pmtx_intr_info, NULL))
1688 t3_fatal_err(adapter);
1691 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1692 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1693 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1694 F_IESPI1_TX_FRAMING_ERROR)
1695 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1696 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1697 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1698 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1701 * PM RX interrupt handler.
1703 static void pmrx_intr_handler(struct adapter *adapter)
1705 static const struct intr_info pmrx_intr_info[] = {
1706 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1707 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1708 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1709 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1710 "PMRX ispi parity error", -1, 1},
1711 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1712 "PMRX ospi parity error", -1, 1},
1716 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1717 pmrx_intr_info, NULL))
1718 t3_fatal_err(adapter);
1722 * CPL switch interrupt handler.
1724 static void cplsw_intr_handler(struct adapter *adapter)
1726 static const struct intr_info cplsw_intr_info[] = {
1727 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1728 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1729 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1730 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1731 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1732 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1736 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1737 cplsw_intr_info, NULL))
1738 t3_fatal_err(adapter);
1742 * MPS interrupt handler.
1744 static void mps_intr_handler(struct adapter *adapter)
1746 static const struct intr_info mps_intr_info[] = {
1747 {0x1ff, "MPS parity error", -1, 1},
1751 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1752 mps_intr_info, NULL))
1753 t3_fatal_err(adapter);
1756 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1759 * MC7 interrupt handler.
1761 static void mc7_intr_handler(struct mc7 *mc7)
1763 struct adapter *adapter = mc7->adapter;
1764 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1767 mc7->stats.corr_err++;
1768 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1769 "data 0x%x 0x%x 0x%x\n", mc7->name,
1770 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1771 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1772 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1773 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1777 mc7->stats.uncorr_err++;
1778 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1779 "data 0x%x 0x%x 0x%x\n", mc7->name,
1780 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1781 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1782 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1783 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1787 mc7->stats.parity_err++;
1788 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1789 mc7->name, G_PE(cause));
1795 if (adapter->params.rev > 0)
1796 addr = t3_read_reg(adapter,
1797 mc7->offset + A_MC7_ERR_ADDR);
1798 mc7->stats.addr_err++;
1799 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1803 if (cause & MC7_INTR_FATAL)
1804 t3_fatal_err(adapter);
1806 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1809 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1810 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1812 * XGMAC interrupt handler.
1814 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1816 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1818 * We mask out interrupt causes for which we're not taking interrupts.
1819 * This allows us to use polling logic to monitor some of the other
1820 * conditions when taking interrupts would impose too much load on the
1823 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1826 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1827 mac->stats.tx_fifo_parity_err++;
1828 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1830 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1831 mac->stats.rx_fifo_parity_err++;
1832 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1834 if (cause & F_TXFIFO_UNDERRUN)
1835 mac->stats.tx_fifo_urun++;
1836 if (cause & F_RXFIFO_OVERFLOW)
1837 mac->stats.rx_fifo_ovfl++;
1838 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1839 mac->stats.serdes_signal_loss++;
1840 if (cause & F_XAUIPCSCTCERR)
1841 mac->stats.xaui_pcs_ctc_err++;
1842 if (cause & F_XAUIPCSALIGNCHANGE)
1843 mac->stats.xaui_pcs_align_change++;
1844 if (cause & F_XGM_INT) {
1845 t3_set_reg_field(adap,
1846 A_XGM_INT_ENABLE + mac->offset,
1848 mac->stats.link_faults++;
1850 t3_os_link_fault_handler(adap, idx);
1853 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1855 if (cause & XGM_INTR_FATAL)
1862 * Interrupt handler for PHY events.
1864 int t3_phy_intr_handler(struct adapter *adapter)
1866 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1868 for_each_port(adapter, i) {
1869 struct port_info *p = adap2pinfo(adapter, i);
1871 if (!(p->phy.caps & SUPPORTED_IRQ))
1874 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1875 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1877 if (phy_cause & cphy_cause_link_change)
1878 t3_link_changed(adapter, i);
1879 if (phy_cause & cphy_cause_fifo_error)
1880 p->phy.fifo_errors++;
1881 if (phy_cause & cphy_cause_module_change)
1882 t3_os_phymod_changed(adapter, i);
1886 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1891 * T3 slow path (non-data) interrupt handler.
1893 int t3_slow_intr_handler(struct adapter *adapter)
1895 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1897 cause &= adapter->slow_intr_mask;
1900 if (cause & F_PCIM0) {
1901 if (is_pcie(adapter))
1902 pcie_intr_handler(adapter);
1904 pci_intr_handler(adapter);
1907 t3_sge_err_intr_handler(adapter);
1908 if (cause & F_MC7_PMRX)
1909 mc7_intr_handler(&adapter->pmrx);
1910 if (cause & F_MC7_PMTX)
1911 mc7_intr_handler(&adapter->pmtx);
1912 if (cause & F_MC7_CM)
1913 mc7_intr_handler(&adapter->cm);
1915 cim_intr_handler(adapter);
1917 tp_intr_handler(adapter);
1918 if (cause & F_ULP2_RX)
1919 ulprx_intr_handler(adapter);
1920 if (cause & F_ULP2_TX)
1921 ulptx_intr_handler(adapter);
1922 if (cause & F_PM1_RX)
1923 pmrx_intr_handler(adapter);
1924 if (cause & F_PM1_TX)
1925 pmtx_intr_handler(adapter);
1926 if (cause & F_CPL_SWITCH)
1927 cplsw_intr_handler(adapter);
1929 mps_intr_handler(adapter);
1931 t3_mc5_intr_handler(&adapter->mc5);
1932 if (cause & F_XGMAC0_0)
1933 mac_intr_handler(adapter, 0);
1934 if (cause & F_XGMAC0_1)
1935 mac_intr_handler(adapter, 1);
1936 if (cause & F_T3DBG)
1937 t3_os_ext_intr_handler(adapter);
1939 /* Clear the interrupts just processed. */
1940 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1941 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1945 static unsigned int calc_gpio_intr(struct adapter *adap)
1947 unsigned int i, gpi_intr = 0;
1949 for_each_port(adap, i)
1950 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1951 adapter_info(adap)->gpio_intr[i])
1952 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1957 * t3_intr_enable - enable interrupts
1958 * @adapter: the adapter whose interrupts should be enabled
1960 * Enable interrupts by setting the interrupt enable registers of the
1961 * various HW modules and then enabling the top-level interrupt
1964 void t3_intr_enable(struct adapter *adapter)
1966 static const struct addr_val_pair intr_en_avp[] = {
1967 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1968 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1969 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1971 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1973 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1974 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1975 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1976 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1977 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1978 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1981 adapter->slow_intr_mask = PL_INTR_MASK;
1983 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1984 t3_write_reg(adapter, A_TP_INT_ENABLE,
1985 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1987 if (adapter->params.rev > 0) {
1988 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1989 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1990 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1991 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1992 F_PBL_BOUND_ERR_CH1);
1994 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1995 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1998 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2000 if (is_pcie(adapter))
2001 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2003 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2004 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2005 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2009 * t3_intr_disable - disable a card's interrupts
2010 * @adapter: the adapter whose interrupts should be disabled
2012 * Disable interrupts. We only disable the top-level interrupt
2013 * concentrator and the SGE data interrupts.
2015 void t3_intr_disable(struct adapter *adapter)
2017 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2018 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2019 adapter->slow_intr_mask = 0;
2023 * t3_intr_clear - clear all interrupts
2024 * @adapter: the adapter whose interrupts should be cleared
2026 * Clears all interrupts.
2028 void t3_intr_clear(struct adapter *adapter)
2030 static const unsigned int cause_reg_addr[] = {
2032 A_SG_RSPQ_FL_STATUS,
2035 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2036 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2037 A_CIM_HOST_INT_CAUSE,
2050 /* Clear PHY and MAC interrupts for each port. */
2051 for_each_port(adapter, i)
2052 t3_port_intr_clear(adapter, i);
2054 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2055 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2057 if (is_pcie(adapter))
2058 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2059 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2060 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2063 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2065 struct port_info *pi = adap2pinfo(adapter, idx);
2067 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2068 XGM_EXTRA_INTR_MASK);
2071 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2073 struct port_info *pi = adap2pinfo(adapter, idx);
2075 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2080 * t3_port_intr_enable - enable port-specific interrupts
2081 * @adapter: associated adapter
2082 * @idx: index of port whose interrupts should be enabled
2084 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2087 void t3_port_intr_enable(struct adapter *adapter, int idx)
2089 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2091 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2092 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2093 phy->ops->intr_enable(phy);
2097 * t3_port_intr_disable - disable port-specific interrupts
2098 * @adapter: associated adapter
2099 * @idx: index of port whose interrupts should be disabled
2101 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2104 void t3_port_intr_disable(struct adapter *adapter, int idx)
2106 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2108 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2109 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2110 phy->ops->intr_disable(phy);
2114 * t3_port_intr_clear - clear port-specific interrupts
2115 * @adapter: associated adapter
2116 * @idx: index of port whose interrupts to clear
2118 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2121 void t3_port_intr_clear(struct adapter *adapter, int idx)
2123 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2125 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2126 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2127 phy->ops->intr_clear(phy);
2130 #define SG_CONTEXT_CMD_ATTEMPTS 100
2133 * t3_sge_write_context - write an SGE context
2134 * @adapter: the adapter
2135 * @id: the context id
2136 * @type: the context type
2138 * Program an SGE context with the values already loaded in the
2139 * CONTEXT_DATA? registers.
2141 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2144 if (type == F_RESPONSEQ) {
2146 * Can't write the Response Queue Context bits for
2147 * Interrupt Armed or the Reserve bits after the chip
2148 * has been initialized out of reset. Writing to these
2149 * bits can confuse the hardware.
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2152 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2153 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2156 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2161 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2162 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2163 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2164 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2168 * clear_sge_ctxt - completely clear an SGE context
2169 * @adapter: the adapter
2170 * @id: the context id
2171 * @type: the context type
2173 * Completely clear an SGE context. Used predominantly at post-reset
2174 * initialization. Note in particular that we don't skip writing to any
2175 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2178 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2181 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2182 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2183 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2184 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2185 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2186 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2187 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2188 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2189 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2190 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2191 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2192 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2196 * t3_sge_init_ecntxt - initialize an SGE egress context
2197 * @adapter: the adapter to configure
2198 * @id: the context id
2199 * @gts_enable: whether to enable GTS for the context
2200 * @type: the egress context type
2201 * @respq: associated response queue
2202 * @base_addr: base address of queue
2203 * @size: number of queue entries
2205 * @gen: initial generation value for the context
2206 * @cidx: consumer pointer
2208 * Initialize an SGE egress context and make it ready for use. If the
2209 * platform allows concurrent context operations, the caller is
2210 * responsible for appropriate locking.
2212 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2213 enum sge_context_type type, int respq, u64 base_addr,
2214 unsigned int size, unsigned int token, int gen,
2217 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2219 if (base_addr & 0xfff) /* must be 4K aligned */
2221 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2226 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2227 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2228 V_EC_BASE_LO(base_addr & 0xffff));
2230 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2232 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2233 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2234 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2236 return t3_sge_write_context(adapter, id, F_EGRESS);
2240 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2241 * @adapter: the adapter to configure
2242 * @id: the context id
2243 * @gts_enable: whether to enable GTS for the context
2244 * @base_addr: base address of queue
2245 * @size: number of queue entries
2246 * @bsize: size of each buffer for this queue
2247 * @cong_thres: threshold to signal congestion to upstream producers
2248 * @gen: initial generation value for the context
2249 * @cidx: consumer pointer
2251 * Initialize an SGE free list context and make it ready for use. The
2252 * caller is responsible for ensuring only one context operation occurs
2255 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2256 int gts_enable, u64 base_addr, unsigned int size,
2257 unsigned int bsize, unsigned int cong_thres, int gen,
2260 if (base_addr & 0xfff) /* must be 4K aligned */
2262 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2266 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2268 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2269 V_FL_BASE_HI((u32) base_addr) |
2270 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2271 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2272 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2273 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2274 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2275 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2276 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2277 return t3_sge_write_context(adapter, id, F_FREELIST);
2281 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2282 * @adapter: the adapter to configure
2283 * @id: the context id
2284 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2285 * @base_addr: base address of queue
2286 * @size: number of queue entries
2287 * @fl_thres: threshold for selecting the normal or jumbo free list
2288 * @gen: initial generation value for the context
2289 * @cidx: consumer pointer
2291 * Initialize an SGE response queue context and make it ready for use.
2292 * The caller is responsible for ensuring only one context operation
2295 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2296 int irq_vec_idx, u64 base_addr, unsigned int size,
2297 unsigned int fl_thres, int gen, unsigned int cidx)
2299 unsigned int intr = 0;
2301 if (base_addr & 0xfff) /* must be 4K aligned */
2303 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2307 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2309 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2311 if (irq_vec_idx >= 0)
2312 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2313 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2314 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2315 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2316 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2320 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2321 * @adapter: the adapter to configure
2322 * @id: the context id
2323 * @base_addr: base address of queue
2324 * @size: number of queue entries
2325 * @rspq: response queue for async notifications
2326 * @ovfl_mode: CQ overflow mode
2327 * @credits: completion queue credits
2328 * @credit_thres: the credit threshold
2330 * Initialize an SGE completion queue context and make it ready for use.
2331 * The caller is responsible for ensuring only one context operation
2334 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2335 unsigned int size, int rspq, int ovfl_mode,
2336 unsigned int credits, unsigned int credit_thres)
2338 if (base_addr & 0xfff) /* must be 4K aligned */
2340 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2344 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2345 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2347 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2348 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2349 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2350 V_CQ_ERR(ovfl_mode));
2351 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2352 V_CQ_CREDIT_THRES(credit_thres));
2353 return t3_sge_write_context(adapter, id, F_CQ);
2357 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2358 * @adapter: the adapter
2359 * @id: the egress context id
2360 * @enable: enable (1) or disable (0) the context
2362 * Enable or disable an SGE egress context. The caller is responsible for
2363 * ensuring only one context operation occurs at a time.
2365 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2367 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2370 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2371 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2372 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2373 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2374 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2375 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2376 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2377 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2378 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2382 * t3_sge_disable_fl - disable an SGE free-buffer list
2383 * @adapter: the adapter
2384 * @id: the free list context id
2386 * Disable an SGE free-buffer list. The caller is responsible for
2387 * ensuring only one context operation occurs at a time.
2389 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2391 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2394 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2395 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2396 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2397 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2398 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2399 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2400 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2401 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2402 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2406 * t3_sge_disable_rspcntxt - disable an SGE response queue
2407 * @adapter: the adapter
2408 * @id: the response queue context id
2410 * Disable an SGE response queue. The caller is responsible for
2411 * ensuring only one context operation occurs at a time.
2413 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2415 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2418 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2419 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2420 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2421 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2422 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2423 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2424 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2425 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2426 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2430 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2431 * @adapter: the adapter
2432 * @id: the completion queue context id
2434 * Disable an SGE completion queue. The caller is responsible for
2435 * ensuring only one context operation occurs at a time.
2437 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2439 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2442 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2443 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2444 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2445 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2446 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2447 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2448 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2449 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2450 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2454 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2455 * @adapter: the adapter
2456 * @id: the context id
2457 * @op: the operation to perform
2459 * Perform the selected operation on an SGE completion queue context.
2460 * The caller is responsible for ensuring only one context operation
2463 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2464 unsigned int credits)
2468 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2471 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2472 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2473 V_CONTEXT(id) | F_CQ);
2474 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2475 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2478 if (op >= 2 && op < 7) {
2479 if (adapter->params.rev > 0)
2480 return G_CQ_INDEX(val);
2482 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2483 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2484 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2485 F_CONTEXT_CMD_BUSY, 0,
2486 SG_CONTEXT_CMD_ATTEMPTS, 1))
2488 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2494 * t3_sge_read_context - read an SGE context
2495 * @type: the context type
2496 * @adapter: the adapter
2497 * @id: the context id
2498 * @data: holds the retrieved context
2500 * Read an SGE egress context. The caller is responsible for ensuring
2501 * only one context operation occurs at a time.
2503 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2504 unsigned int id, u32 data[4])
2506 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2509 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2510 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2511 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2512 SG_CONTEXT_CMD_ATTEMPTS, 1))
2514 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2515 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2516 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2517 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2522 * t3_sge_read_ecntxt - read an SGE egress context
2523 * @adapter: the adapter
2524 * @id: the context id
2525 * @data: holds the retrieved context
2527 * Read an SGE egress context. The caller is responsible for ensuring
2528 * only one context operation occurs at a time.
2530 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2534 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2538 * t3_sge_read_cq - read an SGE CQ context
2539 * @adapter: the adapter
2540 * @id: the context id
2541 * @data: holds the retrieved context
2543 * Read an SGE CQ context. The caller is responsible for ensuring
2544 * only one context operation occurs at a time.
2546 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2550 return t3_sge_read_context(F_CQ, adapter, id, data);
2554 * t3_sge_read_fl - read an SGE free-list context
2555 * @adapter: the adapter
2556 * @id: the context id
2557 * @data: holds the retrieved context
2559 * Read an SGE free-list context. The caller is responsible for ensuring
2560 * only one context operation occurs at a time.
2562 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2564 if (id >= SGE_QSETS * 2)
2566 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2570 * t3_sge_read_rspq - read an SGE response queue context
2571 * @adapter: the adapter
2572 * @id: the context id
2573 * @data: holds the retrieved context
2575 * Read an SGE response queue context. The caller is responsible for
2576 * ensuring only one context operation occurs at a time.
2578 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2580 if (id >= SGE_QSETS)
2582 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2586 * t3_config_rss - configure Rx packet steering
2587 * @adapter: the adapter
2588 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2589 * @cpus: values for the CPU lookup table (0xff terminated)
2590 * @rspq: values for the response queue lookup table (0xffff terminated)
2592 * Programs the receive packet steering logic. @cpus and @rspq provide
2593 * the values for the CPU and response queue lookup tables. If they
2594 * provide fewer values than the size of the tables the supplied values
2595 * are used repeatedly until the tables are fully populated.
2597 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2598 const u8 * cpus, const u16 *rspq)
2600 int i, j, cpu_idx = 0, q_idx = 0;
2603 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2606 for (j = 0; j < 2; ++j) {
2607 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2608 if (cpus[cpu_idx] == 0xff)
2611 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2615 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2616 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2617 (i << 16) | rspq[q_idx++]);
2618 if (rspq[q_idx] == 0xffff)
2622 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2626 * t3_read_rss - read the contents of the RSS tables
2627 * @adapter: the adapter
2628 * @lkup: holds the contents of the RSS lookup table
2629 * @map: holds the contents of the RSS map table
2631 * Reads the contents of the receive packet steering tables.
2633 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2639 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2640 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2642 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2643 if (!(val & 0x80000000))
2646 *lkup++ = (val >> 8);
2650 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2651 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2653 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2654 if (!(val & 0x80000000))
2662 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2663 * @adap: the adapter
2664 * @enable: 1 to select offload mode, 0 for regular NIC
2666 * Switches TP to NIC/offload mode.
2668 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2670 if (is_offload(adap) || !enable)
2671 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2672 V_NICMODE(!enable));
2676 * pm_num_pages - calculate the number of pages of the payload memory
2677 * @mem_size: the size of the payload memory
2678 * @pg_size: the size of each payload memory page
2680 * Calculate the number of pages, each of the given size, that fit in a
2681 * memory of the specified size, respecting the HW requirement that the
2682 * number of pages must be a multiple of 24.
2684 static inline unsigned int pm_num_pages(unsigned int mem_size,
2685 unsigned int pg_size)
2687 unsigned int n = mem_size / pg_size;
2692 #define mem_region(adap, start, size, reg) \
2693 t3_write_reg((adap), A_ ## reg, (start)); \
2697 * partition_mem - partition memory and configure TP memory settings
2698 * @adap: the adapter
2699 * @p: the TP parameters
2701 * Partitions context and payload memory and configures TP's memory
2704 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2706 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2707 unsigned int timers = 0, timers_shift = 22;
2709 if (adap->params.rev > 0) {
2710 if (tids <= 16 * 1024) {
2713 } else if (tids <= 64 * 1024) {
2716 } else if (tids <= 256 * 1024) {
2722 t3_write_reg(adap, A_TP_PMM_SIZE,
2723 p->chan_rx_size | (p->chan_tx_size >> 16));
2725 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2726 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2727 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2728 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2729 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2731 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2732 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2733 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2735 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2736 /* Add a bit of headroom and make multiple of 24 */
2738 pstructs -= pstructs % 24;
2739 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2741 m = tids * TCB_SIZE;
2742 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2743 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2744 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2745 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2746 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2747 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2748 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2749 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2751 m = (m + 4095) & ~0xfff;
2752 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2753 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2755 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2756 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2757 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2759 adap->params.mc5.nservers += m - tids;
2762 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2765 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2766 t3_write_reg(adap, A_TP_PIO_DATA, val);
2769 static void tp_config(struct adapter *adap, const struct tp_params *p)
2771 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2772 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2773 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2774 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2775 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2776 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2777 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2778 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2779 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2780 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2781 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2782 F_IPV6ENABLE | F_NICMODE);
2783 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2784 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2785 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2786 adap->params.rev > 0 ? F_ENABLEESND :
2789 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2791 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2792 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2793 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2794 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2795 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2796 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2797 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2799 if (adap->params.rev > 0) {
2800 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2801 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2803 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2804 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2806 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2808 if (adap->params.rev == T3_REV_C)
2809 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2810 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2811 V_TABLELATENCYDELTA(4));
2813 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2814 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2815 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2816 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2819 /* Desired TP timer resolution in usec */
2820 #define TP_TMR_RES 50
2822 /* TCP timer values in ms */
2823 #define TP_DACK_TIMER 50
2824 #define TP_RTO_MIN 250
2827 * tp_set_timers - set TP timing parameters
2828 * @adap: the adapter to set
2829 * @core_clk: the core clock frequency in Hz
2831 * Set TP's timing parameters, such as the various timer resolutions and
2832 * the TCP timer values.
2834 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2836 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2837 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2838 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2839 unsigned int tps = core_clk >> tre;
2841 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2842 V_DELAYEDACKRESOLUTION(dack_re) |
2843 V_TIMESTAMPRESOLUTION(tstamp_re));
2844 t3_write_reg(adap, A_TP_DACK_TIMER,
2845 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2846 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2847 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2848 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2849 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2850 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2851 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2852 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2855 #define SECONDS * tps
2857 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2858 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2859 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2860 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2861 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2862 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2863 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2864 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2865 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2871 * t3_tp_set_coalescing_size - set receive coalescing size
2872 * @adap: the adapter
2873 * @size: the receive coalescing size
2874 * @psh: whether a set PSH bit should deliver coalesced data
2876 * Set the receive coalescing size and PSH bit handling.
2878 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2882 if (size > MAX_RX_COALESCING_LEN)
2885 val = t3_read_reg(adap, A_TP_PARA_REG3);
2886 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2889 val |= F_RXCOALESCEENABLE;
2891 val |= F_RXCOALESCEPSHEN;
2892 size = min(MAX_RX_COALESCING_LEN, size);
2893 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2894 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2896 t3_write_reg(adap, A_TP_PARA_REG3, val);
2901 * t3_tp_set_max_rxsize - set the max receive size
2902 * @adap: the adapter
2903 * @size: the max receive size
2905 * Set TP's max receive size. This is the limit that applies when
2906 * receive coalescing is disabled.
2908 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2910 t3_write_reg(adap, A_TP_PARA_REG7,
2911 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2914 static void init_mtus(unsigned short mtus[])
2917 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2918 * it can accomodate max size TCP/IP headers when SACK and timestamps
2919 * are enabled and still have at least 8 bytes of payload.
2940 * Initial congestion control parameters.
2942 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2944 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2969 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2972 b[13] = b[14] = b[15] = b[16] = 3;
2973 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2974 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2979 /* The minimum additive increment value for the congestion control table */
2980 #define CC_MIN_INCR 2U
2983 * t3_load_mtus - write the MTU and congestion control HW tables
2984 * @adap: the adapter
2985 * @mtus: the unrestricted values for the MTU table
2986 * @alphs: the values for the congestion control alpha parameter
2987 * @beta: the values for the congestion control beta parameter
2988 * @mtu_cap: the maximum permitted effective MTU
2990 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2991 * Update the high-speed congestion control table with the supplied alpha,
2994 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2995 unsigned short alpha[NCCTRL_WIN],
2996 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2998 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2999 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3000 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3001 28672, 40960, 57344, 81920, 114688, 163840, 229376
3006 for (i = 0; i < NMTUS; ++i) {
3007 unsigned int mtu = min(mtus[i], mtu_cap);
3008 unsigned int log2 = fls(mtu);
3010 if (!(mtu & ((1 << log2) >> 2))) /* round */
3012 t3_write_reg(adap, A_TP_MTU_TABLE,
3013 (i << 24) | (log2 << 16) | mtu);
3015 for (w = 0; w < NCCTRL_WIN; ++w) {
3018 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3021 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3022 (w << 16) | (beta[w] << 13) | inc);
3028 * t3_read_hw_mtus - returns the values in the HW MTU table
3029 * @adap: the adapter
3030 * @mtus: where to store the HW MTU values
3032 * Reads the HW MTU table.
3034 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3038 for (i = 0; i < NMTUS; ++i) {
3041 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3042 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3043 mtus[i] = val & 0x3fff;
3048 * t3_get_cong_cntl_tab - reads the congestion control table
3049 * @adap: the adapter
3050 * @incr: where to store the alpha values
3052 * Reads the additive increments programmed into the HW congestion
3055 void t3_get_cong_cntl_tab(struct adapter *adap,
3056 unsigned short incr[NMTUS][NCCTRL_WIN])
3058 unsigned int mtu, w;
3060 for (mtu = 0; mtu < NMTUS; ++mtu)
3061 for (w = 0; w < NCCTRL_WIN; ++w) {
3062 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3063 0xffff0000 | (mtu << 5) | w);
3064 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3070 * t3_tp_get_mib_stats - read TP's MIB counters
3071 * @adap: the adapter
3072 * @tps: holds the returned counter values
3074 * Returns the values of TP's MIB counters.
3076 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3078 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3079 sizeof(*tps) / sizeof(u32), 0);
3082 #define ulp_region(adap, name, start, len) \
3083 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3084 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3085 (start) + (len) - 1); \
3088 #define ulptx_region(adap, name, start, len) \
3089 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3090 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3091 (start) + (len) - 1)
3093 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3095 unsigned int m = p->chan_rx_size;
3097 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3098 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3099 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3100 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3101 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3102 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3103 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3104 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3108 * t3_set_proto_sram - set the contents of the protocol sram
3109 * @adapter: the adapter
3110 * @data: the protocol image
3112 * Write the contents of the protocol SRAM.
3114 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3117 const __be32 *buf = (const __be32 *)data;
3119 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3120 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3122 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3124 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3126 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3127 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3130 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3135 void t3_config_trace_filter(struct adapter *adapter,
3136 const struct trace_params *tp, int filter_index,
3137 int invert, int enable)
3139 u32 addr, key[4], mask[4];
3141 key[0] = tp->sport | (tp->sip << 16);
3142 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3144 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3146 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3147 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3148 mask[2] = tp->dip_mask;
3149 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3152 key[3] |= (1 << 29);
3154 key[3] |= (1 << 28);
3156 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3157 tp_wr_indirect(adapter, addr++, key[0]);
3158 tp_wr_indirect(adapter, addr++, mask[0]);
3159 tp_wr_indirect(adapter, addr++, key[1]);
3160 tp_wr_indirect(adapter, addr++, mask[1]);
3161 tp_wr_indirect(adapter, addr++, key[2]);
3162 tp_wr_indirect(adapter, addr++, mask[2]);
3163 tp_wr_indirect(adapter, addr++, key[3]);
3164 tp_wr_indirect(adapter, addr, mask[3]);
3165 t3_read_reg(adapter, A_TP_PIO_DATA);
3169 * t3_config_sched - configure a HW traffic scheduler
3170 * @adap: the adapter
3171 * @kbps: target rate in Kbps
3172 * @sched: the scheduler index
3174 * Configure a HW scheduler for the target rate
3176 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3178 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3179 unsigned int clk = adap->params.vpd.cclk * 1000;
3180 unsigned int selected_cpt = 0, selected_bpt = 0;
3183 kbps *= 125; /* -> bytes */
3184 for (cpt = 1; cpt <= 255; cpt++) {
3186 bpt = (kbps + tps / 2) / tps;
3187 if (bpt > 0 && bpt <= 255) {
3189 delta = v >= kbps ? v - kbps : kbps - v;
3190 if (delta <= mindelta) {
3195 } else if (selected_cpt)
3201 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3202 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3203 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3205 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3207 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3208 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3212 static int tp_init(struct adapter *adap, const struct tp_params *p)
3217 t3_set_vlan_accel(adap, 3, 0);
3219 if (is_offload(adap)) {
3220 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3221 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3222 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3225 CH_ERR(adap, "TP initialization timed out\n");
3229 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3233 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3235 if (port_mask & ~((1 << adap->params.nports) - 1))
3237 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3238 port_mask << S_PORT0ACTIVE);
3243 * Perform the bits of HW initialization that are dependent on the Tx
3244 * channels being used.
3246 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3250 if (chan_map != 3) { /* one channel */
3251 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3252 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3253 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3254 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3255 F_TPTXPORT1EN | F_PORT1ACTIVE));
3256 t3_write_reg(adap, A_PM1_TX_CFG,
3257 chan_map == 1 ? 0xffffffff : 0);
3258 } else { /* two channels */
3259 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3260 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3261 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3262 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3263 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3264 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3266 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3267 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3268 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3269 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3270 for (i = 0; i < 16; i++)
3271 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3272 (i << 16) | 0x1010);
3276 static int calibrate_xgm(struct adapter *adapter)
3278 if (uses_xaui(adapter)) {
3281 for (i = 0; i < 5; ++i) {
3282 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3283 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3285 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3286 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3287 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3288 V_XAUIIMP(G_CALIMP(v) >> 2));
3292 CH_ERR(adapter, "MAC calibration failed\n");
3295 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3296 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3297 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3298 F_XGM_IMPSETUPDATE);
3303 static void calibrate_xgm_t3b(struct adapter *adapter)
3305 if (!uses_xaui(adapter)) {
3306 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3307 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3308 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3309 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3310 F_XGM_IMPSETUPDATE);
3311 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3313 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3314 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3318 struct mc7_timing_params {
3319 unsigned char ActToPreDly;
3320 unsigned char ActToRdWrDly;
3321 unsigned char PreCyc;
3322 unsigned char RefCyc[5];
3323 unsigned char BkCyc;
3324 unsigned char WrToRdDly;
3325 unsigned char RdToWrDly;
3329 * Write a value to a register and check that the write completed. These
3330 * writes normally complete in a cycle or two, so one read should suffice.
3331 * The very first read exists to flush the posted write to the device.
3333 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3335 t3_write_reg(adapter, addr, val);
3336 t3_read_reg(adapter, addr); /* flush */
3337 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3339 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3343 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3345 static const unsigned int mc7_mode[] = {
3346 0x632, 0x642, 0x652, 0x432, 0x442
3348 static const struct mc7_timing_params mc7_timings[] = {
3349 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3350 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3351 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3352 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3353 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3357 unsigned int width, density, slow, attempts;
3358 struct adapter *adapter = mc7->adapter;
3359 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3364 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3365 slow = val & F_SLOW;
3366 width = G_WIDTH(val);
3367 density = G_DEN(val);
3369 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3370 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3374 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3375 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3377 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3378 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3379 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3385 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3386 V_ACTTOPREDLY(p->ActToPreDly) |
3387 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3388 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3389 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3391 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3392 val | F_CLKEN | F_TERM150);
3393 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3396 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3401 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3402 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3403 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3404 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3408 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3409 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3413 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3414 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3415 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3416 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3417 mc7_mode[mem_type]) ||
3418 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3419 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3422 /* clock value is in KHz */
3423 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3424 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3426 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3427 F_PERREFEN | V_PREREFDIV(mc7_clock));
3428 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3430 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3431 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3432 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3433 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3434 (mc7->size << width) - 1);
3435 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3436 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3441 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3442 } while ((val & F_BUSY) && --attempts);
3444 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3448 /* Enable normal memory accesses. */
3449 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3456 static void config_pcie(struct adapter *adap)
3458 static const u16 ack_lat[4][6] = {
3459 {237, 416, 559, 1071, 2095, 4143},
3460 {128, 217, 289, 545, 1057, 2081},
3461 {73, 118, 154, 282, 538, 1050},
3462 {67, 107, 86, 150, 278, 534}
3464 static const u16 rpl_tmr[4][6] = {
3465 {711, 1248, 1677, 3213, 6285, 12429},
3466 {384, 651, 867, 1635, 3171, 6243},
3467 {219, 354, 462, 846, 1614, 3150},
3468 {201, 321, 258, 450, 834, 1602}
3472 unsigned int log2_width, pldsize;
3473 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3475 pci_read_config_word(adap->pdev,
3476 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3478 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3480 pci_read_config_word(adap->pdev, 0x2, &devid);
3481 if (devid == 0x37) {
3482 pci_write_config_word(adap->pdev,
3483 adap->params.pci.pcie_cap_addr +
3485 val & ~PCI_EXP_DEVCTL_READRQ &
3486 ~PCI_EXP_DEVCTL_PAYLOAD);
3490 pci_read_config_word(adap->pdev,
3491 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3494 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3495 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3496 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3497 log2_width = fls(adap->params.pci.width) - 1;
3498 acklat = ack_lat[log2_width][pldsize];
3499 if (val & 1) /* check LOsEnable */
3500 acklat += fst_trn_tx * 4;
3501 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3503 if (adap->params.rev == 0)
3504 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3505 V_T3A_ACKLAT(M_T3A_ACKLAT),
3506 V_T3A_ACKLAT(acklat));
3508 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3511 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3512 V_REPLAYLMT(rpllmt));
3514 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3515 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3516 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3517 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3521 * Initialize and configure T3 HW modules. This performs the
3522 * initialization steps that need to be done once after a card is reset.
3523 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3525 * fw_params are passed to FW and their value is platform dependent. Only the
3526 * top 8 bits are available for use, the rest must be 0.
3528 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3530 int err = -EIO, attempts, i;
3531 const struct vpd_params *vpd = &adapter->params.vpd;
3533 if (adapter->params.rev > 0)
3534 calibrate_xgm_t3b(adapter);
3535 else if (calibrate_xgm(adapter))
3539 partition_mem(adapter, &adapter->params.tp);
3541 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3542 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3543 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3544 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3545 adapter->params.mc5.nfilters,
3546 adapter->params.mc5.nroutes))
3549 for (i = 0; i < 32; i++)
3550 if (clear_sge_ctxt(adapter, i, F_CQ))
3554 if (tp_init(adapter, &adapter->params.tp))
3557 t3_tp_set_coalescing_size(adapter,
3558 min(adapter->params.sge.max_pkt_size,
3559 MAX_RX_COALESCING_LEN), 1);
3560 t3_tp_set_max_rxsize(adapter,
3561 min(adapter->params.sge.max_pkt_size, 16384U));
3562 ulp_config(adapter, &adapter->params.tp);
3564 if (is_pcie(adapter))
3565 config_pcie(adapter);
3567 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3568 F_DMASTOPEN | F_CLIDECEN);
3570 if (adapter->params.rev == T3_REV_C)
3571 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3572 F_CFG_CQE_SOP_MASK);
3574 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3575 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3576 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3577 chan_init_hw(adapter, adapter->params.chan_map);
3578 t3_sge_init(adapter, &adapter->params.sge);
3580 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3582 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3583 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3584 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3585 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3588 do { /* wait for uP to initialize */
3590 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3592 CH_ERR(adapter, "uP initialization timed out\n");
3602 * get_pci_mode - determine a card's PCI mode
3603 * @adapter: the adapter
3604 * @p: where to store the PCI settings
3606 * Determines a card's PCI mode and associated parameters, such as speed
3609 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3611 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3612 u32 pci_mode, pcie_cap;
3614 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3618 p->variant = PCI_VARIANT_PCIE;
3619 p->pcie_cap_addr = pcie_cap;
3620 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3622 p->width = (val >> 4) & 0x3f;
3626 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3627 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3628 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3629 pci_mode = G_PCIXINITPAT(pci_mode);
3631 p->variant = PCI_VARIANT_PCI;
3632 else if (pci_mode < 4)
3633 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3634 else if (pci_mode < 8)
3635 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3637 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3641 * init_link_config - initialize a link's SW state
3642 * @lc: structure holding the link state
3643 * @ai: information about the current card
3645 * Initializes the SW state maintained for each link, including the link's
3646 * capabilities and default speed/duplex/flow-control/autonegotiation
3649 static void init_link_config(struct link_config *lc, unsigned int caps)
3651 lc->supported = caps;
3652 lc->requested_speed = lc->speed = SPEED_INVALID;
3653 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3654 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3655 if (lc->supported & SUPPORTED_Autoneg) {
3656 lc->advertising = lc->supported;
3657 lc->autoneg = AUTONEG_ENABLE;
3658 lc->requested_fc |= PAUSE_AUTONEG;
3660 lc->advertising = 0;
3661 lc->autoneg = AUTONEG_DISABLE;
3666 * mc7_calc_size - calculate MC7 memory size
3667 * @cfg: the MC7 configuration
3669 * Calculates the size of an MC7 memory in bytes from the value of its
3670 * configuration register.
3672 static unsigned int mc7_calc_size(u32 cfg)
3674 unsigned int width = G_WIDTH(cfg);
3675 unsigned int banks = !!(cfg & F_BKS) + 1;
3676 unsigned int org = !!(cfg & F_ORG) + 1;
3677 unsigned int density = G_DEN(cfg);
3678 unsigned int MBs = ((256 << density) * banks) / (org << width);
3683 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3684 unsigned int base_addr, const char *name)
3688 mc7->adapter = adapter;
3690 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3691 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3692 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3693 mc7->width = G_WIDTH(cfg);
3696 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3700 mac->adapter = adapter;
3701 pci_read_config_word(adapter->pdev, 0x2, &devid);
3703 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3705 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3708 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3709 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3710 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3711 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3716 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3718 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3720 mi1_init(adapter, ai);
3721 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3722 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3723 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3724 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3725 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3726 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3728 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3731 /* Enable MAC clocks so we can access the registers */
3732 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3733 t3_read_reg(adapter, A_XGM_PORT_CFG);
3735 val |= F_CLKDIVRESET_;
3736 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3737 t3_read_reg(adapter, A_XGM_PORT_CFG);
3738 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3739 t3_read_reg(adapter, A_XGM_PORT_CFG);
3743 * Reset the adapter.
3744 * Older PCIe cards lose their config space during reset, PCI-X
3747 int t3_reset_adapter(struct adapter *adapter)
3749 int i, save_and_restore_pcie =
3750 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3753 if (save_and_restore_pcie)
3754 pci_save_state(adapter->pdev);
3755 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3758 * Delay. Give Some time to device to reset fully.
3759 * XXX The delay time should be modified.
3761 for (i = 0; i < 10; i++) {
3763 pci_read_config_word(adapter->pdev, 0x00, &devid);
3764 if (devid == 0x1425)
3768 if (devid != 0x1425)
3771 if (save_and_restore_pcie)
3772 pci_restore_state(adapter->pdev);
3776 static int init_parity(struct adapter *adap)
3780 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3783 for (err = i = 0; !err && i < 16; i++)
3784 err = clear_sge_ctxt(adap, i, F_EGRESS);
3785 for (i = 0xfff0; !err && i <= 0xffff; i++)
3786 err = clear_sge_ctxt(adap, i, F_EGRESS);
3787 for (i = 0; !err && i < SGE_QSETS; i++)
3788 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3792 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3793 for (i = 0; i < 4; i++)
3794 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3795 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3796 F_IBQDBGWR | V_IBQDBGQID(i) |
3797 V_IBQDBGADDR(addr));
3798 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3799 F_IBQDBGBUSY, 0, 2, 1);
3807 * Initialize adapter SW state for the various HW modules, set initial values
3808 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3811 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3815 unsigned int i, j = -1;
3817 get_pci_mode(adapter, &adapter->params.pci);
3819 adapter->params.info = ai;
3820 adapter->params.nports = ai->nports0 + ai->nports1;
3821 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3822 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3824 * We used to only run the "adapter check task" once a second if
3825 * we had PHYs which didn't support interrupts (we would check
3826 * their link status once a second). Now we check other conditions
3827 * in that routine which could potentially impose a very high
3828 * interrupt load on the system. As such, we now always scan the
3829 * adapter state once a second ...
3831 adapter->params.linkpoll_period = 10;
3832 adapter->params.stats_update_period = is_10G(adapter) ?
3833 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3834 adapter->params.pci.vpd_cap_addr =
3835 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3836 ret = get_vpd_params(adapter, &adapter->params.vpd);
3840 if (reset && t3_reset_adapter(adapter))
3843 t3_sge_prep(adapter, &adapter->params.sge);
3845 if (adapter->params.vpd.mclk) {
3846 struct tp_params *p = &adapter->params.tp;
3848 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3849 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3850 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3852 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3853 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3854 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3855 p->cm_size = t3_mc7_size(&adapter->cm);
3856 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3857 p->chan_tx_size = p->pmtx_size / p->nchan;
3858 p->rx_pg_size = 64 * 1024;
3859 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3860 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3861 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3862 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3863 adapter->params.rev > 0 ? 12 : 6;
3866 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3867 t3_mc7_size(&adapter->pmtx) &&
3868 t3_mc7_size(&adapter->cm);
3870 if (is_offload(adapter)) {
3871 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3872 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3873 DEFAULT_NFILTERS : 0;
3874 adapter->params.mc5.nroutes = 0;
3875 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3877 init_mtus(adapter->params.mtus);
3878 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3881 early_hw_init(adapter, ai);
3882 ret = init_parity(adapter);
3886 for_each_port(adapter, i) {
3888 const struct port_type_info *pti;
3889 struct port_info *p = adap2pinfo(adapter, i);
3891 while (!adapter->params.vpd.port_type[++j])
3894 pti = &port_types[adapter->params.vpd.port_type[j]];
3895 if (!pti->phy_prep) {
3896 CH_ALERT(adapter, "Invalid port type index %d\n",
3897 adapter->params.vpd.port_type[j]);
3901 p->phy.mdio.dev = adapter->port[i];
3902 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3906 mac_prep(&p->mac, adapter, j);
3909 * The VPD EEPROM stores the base Ethernet address for the
3910 * card. A port's address is derived from the base by adding
3911 * the port's index to the base's low octet.
3913 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3914 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3916 memcpy(adapter->port[i]->dev_addr, hw_addr,
3918 memcpy(adapter->port[i]->perm_addr, hw_addr,
3920 init_link_config(&p->link_config, p->phy.caps);
3921 p->phy.ops->power_down(&p->phy, 1);
3924 * If the PHY doesn't support interrupts for link status
3925 * changes, schedule a scan of the adapter links at least
3928 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3929 adapter->params.linkpoll_period > 10)
3930 adapter->params.linkpoll_period = 10;
3936 void t3_led_ready(struct adapter *adapter)
3938 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3942 int t3_replay_prep_adapter(struct adapter *adapter)
3944 const struct adapter_info *ai = adapter->params.info;
3945 unsigned int i, j = -1;
3948 early_hw_init(adapter, ai);
3949 ret = init_parity(adapter);
3953 for_each_port(adapter, i) {
3954 const struct port_type_info *pti;
3955 struct port_info *p = adap2pinfo(adapter, i);
3957 while (!adapter->params.vpd.port_type[++j])
3960 pti = &port_types[adapter->params.vpd.port_type[j]];
3961 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3964 p->phy.ops->power_down(&p->phy, 1);