]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
cxgb4: handle serial flash interrupt
[karo-tx-linux.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/delay.h>
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_values.h"
39 #include "t4fw_api.h"
40 #include "t4fw_version.h"
41
42 /**
43  *      t4_wait_op_done_val - wait until an operation is completed
44  *      @adapter: the adapter performing the operation
45  *      @reg: the register to check for completion
46  *      @mask: a single-bit field within @reg that indicates completion
47  *      @polarity: the value of the field when the operation is completed
48  *      @attempts: number of check iterations
49  *      @delay: delay in usecs between iterations
50  *      @valp: where to store the value of the register at completion time
51  *
52  *      Wait until an operation is completed by checking a bit in a register
53  *      up to @attempts times.  If @valp is not NULL the value of the register
54  *      at the time it indicated completion is stored there.  Returns 0 if the
55  *      operation completes and -EAGAIN otherwise.
56  */
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58                                int polarity, int attempts, int delay, u32 *valp)
59 {
60         while (1) {
61                 u32 val = t4_read_reg(adapter, reg);
62
63                 if (!!(val & mask) == polarity) {
64                         if (valp)
65                                 *valp = val;
66                         return 0;
67                 }
68                 if (--attempts == 0)
69                         return -EAGAIN;
70                 if (delay)
71                         udelay(delay);
72         }
73 }
74
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76                                   int polarity, int attempts, int delay)
77 {
78         return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79                                    delay, NULL);
80 }
81
82 /**
83  *      t4_set_reg_field - set a register field to a value
84  *      @adapter: the adapter to program
85  *      @addr: the register address
86  *      @mask: specifies the portion of the register to modify
87  *      @val: the new value for the register field
88  *
89  *      Sets a register field specified by the supplied mask to the
90  *      given value.
91  */
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93                       u32 val)
94 {
95         u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97         t4_write_reg(adapter, addr, v | val);
98         (void) t4_read_reg(adapter, addr);      /* flush */
99 }
100
101 /**
102  *      t4_read_indirect - read indirectly addressed registers
103  *      @adap: the adapter
104  *      @addr_reg: register holding the indirect address
105  *      @data_reg: register holding the value of the indirect register
106  *      @vals: where the read register values are stored
107  *      @nregs: how many indirect registers to read
108  *      @start_idx: index of first indirect register to read
109  *
110  *      Reads registers that are accessed indirectly through an address/data
111  *      register pair.
112  */
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114                              unsigned int data_reg, u32 *vals,
115                              unsigned int nregs, unsigned int start_idx)
116 {
117         while (nregs--) {
118                 t4_write_reg(adap, addr_reg, start_idx);
119                 *vals++ = t4_read_reg(adap, data_reg);
120                 start_idx++;
121         }
122 }
123
124 /**
125  *      t4_write_indirect - write indirectly addressed registers
126  *      @adap: the adapter
127  *      @addr_reg: register holding the indirect addresses
128  *      @data_reg: register holding the value for the indirect registers
129  *      @vals: values to write
130  *      @nregs: how many indirect registers to write
131  *      @start_idx: address of first indirect register to write
132  *
133  *      Writes a sequential block of registers that are accessed indirectly
134  *      through an address/data register pair.
135  */
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137                        unsigned int data_reg, const u32 *vals,
138                        unsigned int nregs, unsigned int start_idx)
139 {
140         while (nregs--) {
141                 t4_write_reg(adap, addr_reg, start_idx++);
142                 t4_write_reg(adap, data_reg, *vals++);
143         }
144 }
145
146 /*
147  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148  * mechanism.  This guarantees that we get the real value even if we're
149  * operating within a Virtual Machine and the Hypervisor is trapping our
150  * Configuration Space accesses.
151  */
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153 {
154         u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155
156         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157                 req |= ENABLE_F;
158         else
159                 req |= T6_ENABLE_F;
160
161         if (is_t4(adap->params.chip))
162                 req |= LOCALCFG_F;
163
164         t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165         *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
166
167         /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168          * Configuration Space read.  (None of the other fields matter when
169          * ENABLE is 0 so a simple register write is easier than a
170          * read-modify-write via t4_set_reg_field().)
171          */
172         t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
173 }
174
175 /*
176  * t4_report_fw_error - report firmware error
177  * @adap: the adapter
178  *
179  * The adapter firmware can indicate error conditions to the host.
180  * If the firmware has indicated an error, print out the reason for
181  * the firmware error.
182  */
183 static void t4_report_fw_error(struct adapter *adap)
184 {
185         static const char *const reason[] = {
186                 "Crash",                        /* PCIE_FW_EVAL_CRASH */
187                 "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
188                 "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
189                 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190                 "Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191                 "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
192                 "Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193                 "Reserved",                     /* reserved */
194         };
195         u32 pcie_fw;
196
197         pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198         if (pcie_fw & PCIE_FW_ERR_F)
199                 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200                         reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 }
202
203 /*
204  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
205  */
206 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
207                          u32 mbox_addr)
208 {
209         for ( ; nflit; nflit--, mbox_addr += 8)
210                 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
211 }
212
213 /*
214  * Handle a FW assertion reported in a mailbox.
215  */
216 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
217 {
218         struct fw_debug_cmd asrt;
219
220         get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
221         dev_alert(adap->pdev_dev,
222                   "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
223                   asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
224                   be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
225 }
226
227 /**
228  *      t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
229  *      @adapter: the adapter
230  *      @cmd: the Firmware Mailbox Command or Reply
231  *      @size: command length in bytes
232  *      @access: the time (ms) needed to access the Firmware Mailbox
233  *      @execute: the time (ms) the command spent being executed
234  */
235 static void t4_record_mbox(struct adapter *adapter,
236                            const __be64 *cmd, unsigned int size,
237                            int access, int execute)
238 {
239         struct mbox_cmd_log *log = adapter->mbox_log;
240         struct mbox_cmd *entry;
241         int i;
242
243         entry = mbox_cmd_log_entry(log, log->cursor++);
244         if (log->cursor == log->size)
245                 log->cursor = 0;
246
247         for (i = 0; i < size / 8; i++)
248                 entry->cmd[i] = be64_to_cpu(cmd[i]);
249         while (i < MBOX_LEN / 8)
250                 entry->cmd[i++] = 0;
251         entry->timestamp = jiffies;
252         entry->seqno = log->seqno++;
253         entry->access = access;
254         entry->execute = execute;
255 }
256
257 /**
258  *      t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
259  *      @adap: the adapter
260  *      @mbox: index of the mailbox to use
261  *      @cmd: the command to write
262  *      @size: command length in bytes
263  *      @rpl: where to optionally store the reply
264  *      @sleep_ok: if true we may sleep while awaiting command completion
265  *      @timeout: time to wait for command to finish before timing out
266  *
267  *      Sends the given command to FW through the selected mailbox and waits
268  *      for the FW to execute the command.  If @rpl is not %NULL it is used to
269  *      store the FW's reply to the command.  The command and its optional
270  *      reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
271  *      to respond.  @sleep_ok determines whether we may sleep while awaiting
272  *      the response.  If sleeping is allowed we use progressive backoff
273  *      otherwise we spin.
274  *
275  *      The return value is 0 on success or a negative errno on failure.  A
276  *      failure can happen either because we are not able to execute the
277  *      command or FW executes it but signals an error.  In the latter case
278  *      the return value is the error code indicated by FW (negated).
279  */
280 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
281                             int size, void *rpl, bool sleep_ok, int timeout)
282 {
283         static const int delay[] = {
284                 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
285         };
286
287         struct mbox_list entry;
288         u16 access = 0;
289         u16 execute = 0;
290         u32 v;
291         u64 res;
292         int i, ms, delay_idx, ret;
293         const __be64 *p = cmd;
294         u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
295         u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
296         __be64 cmd_rpl[MBOX_LEN / 8];
297         u32 pcie_fw;
298
299         if ((size & 15) || size > MBOX_LEN)
300                 return -EINVAL;
301
302         /*
303          * If the device is off-line, as in EEH, commands will time out.
304          * Fail them early so we don't waste time waiting.
305          */
306         if (adap->pdev->error_state != pci_channel_io_normal)
307                 return -EIO;
308
309         /* If we have a negative timeout, that implies that we can't sleep. */
310         if (timeout < 0) {
311                 sleep_ok = false;
312                 timeout = -timeout;
313         }
314
315         /* Queue ourselves onto the mailbox access list.  When our entry is at
316          * the front of the list, we have rights to access the mailbox.  So we
317          * wait [for a while] till we're at the front [or bail out with an
318          * EBUSY] ...
319          */
320         spin_lock(&adap->mbox_lock);
321         list_add_tail(&entry.list, &adap->mlist.list);
322         spin_unlock(&adap->mbox_lock);
323
324         delay_idx = 0;
325         ms = delay[0];
326
327         for (i = 0; ; i += ms) {
328                 /* If we've waited too long, return a busy indication.  This
329                  * really ought to be based on our initial position in the
330                  * mailbox access list but this is a start.  We very rearely
331                  * contend on access to the mailbox ...
332                  */
333                 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
334                 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
335                         spin_lock(&adap->mbox_lock);
336                         list_del(&entry.list);
337                         spin_unlock(&adap->mbox_lock);
338                         ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
339                         t4_record_mbox(adap, cmd, size, access, ret);
340                         return ret;
341                 }
342
343                 /* If we're at the head, break out and start the mailbox
344                  * protocol.
345                  */
346                 if (list_first_entry(&adap->mlist.list, struct mbox_list,
347                                      list) == &entry)
348                         break;
349
350                 /* Delay for a bit before checking again ... */
351                 if (sleep_ok) {
352                         ms = delay[delay_idx];  /* last element may repeat */
353                         if (delay_idx < ARRAY_SIZE(delay) - 1)
354                                 delay_idx++;
355                         msleep(ms);
356                 } else {
357                         mdelay(ms);
358                 }
359         }
360
361         /* Loop trying to get ownership of the mailbox.  Return an error
362          * if we can't gain ownership.
363          */
364         v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
365         for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
366                 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367         if (v != MBOX_OWNER_DRV) {
368                 spin_lock(&adap->mbox_lock);
369                 list_del(&entry.list);
370                 spin_unlock(&adap->mbox_lock);
371                 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372                 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
373                 return ret;
374         }
375
376         /* Copy in the new mailbox command and send it on its way ... */
377         t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
378         for (i = 0; i < size; i += 8)
379                 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
380
381         t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
382         t4_read_reg(adap, ctl_reg);          /* flush write */
383
384         delay_idx = 0;
385         ms = delay[0];
386
387         for (i = 0;
388              !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
389              i < timeout;
390              i += ms) {
391                 if (sleep_ok) {
392                         ms = delay[delay_idx];  /* last element may repeat */
393                         if (delay_idx < ARRAY_SIZE(delay) - 1)
394                                 delay_idx++;
395                         msleep(ms);
396                 } else
397                         mdelay(ms);
398
399                 v = t4_read_reg(adap, ctl_reg);
400                 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
401                         if (!(v & MBMSGVALID_F)) {
402                                 t4_write_reg(adap, ctl_reg, 0);
403                                 continue;
404                         }
405
406                         get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
407                         res = be64_to_cpu(cmd_rpl[0]);
408
409                         if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
410                                 fw_asrt(adap, data_reg);
411                                 res = FW_CMD_RETVAL_V(EIO);
412                         } else if (rpl) {
413                                 memcpy(rpl, cmd_rpl, size);
414                         }
415
416                         t4_write_reg(adap, ctl_reg, 0);
417
418                         execute = i + ms;
419                         t4_record_mbox(adap, cmd_rpl,
420                                        MBOX_LEN, access, execute);
421                         spin_lock(&adap->mbox_lock);
422                         list_del(&entry.list);
423                         spin_unlock(&adap->mbox_lock);
424                         return -FW_CMD_RETVAL_G((int)res);
425                 }
426         }
427
428         ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429         t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
430         dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431                 *(const u8 *)cmd, mbox);
432         t4_report_fw_error(adap);
433         spin_lock(&adap->mbox_lock);
434         list_del(&entry.list);
435         spin_unlock(&adap->mbox_lock);
436         t4_fatal_err(adap);
437         return ret;
438 }
439
440 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
441                     void *rpl, bool sleep_ok)
442 {
443         return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
444                                        FW_CMD_MAX_TIMEOUT);
445 }
446
447 static int t4_edc_err_read(struct adapter *adap, int idx)
448 {
449         u32 edc_ecc_err_addr_reg;
450         u32 rdata_reg;
451
452         if (is_t4(adap->params.chip)) {
453                 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
454                 return 0;
455         }
456         if (idx != 0 && idx != 1) {
457                 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
458                 return 0;
459         }
460
461         edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
462         rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
463
464         CH_WARN(adap,
465                 "edc%d err addr 0x%x: 0x%x.\n",
466                 idx, edc_ecc_err_addr_reg,
467                 t4_read_reg(adap, edc_ecc_err_addr_reg));
468         CH_WARN(adap,
469                 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
470                 rdata_reg,
471                 (unsigned long long)t4_read_reg64(adap, rdata_reg),
472                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
473                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
474                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
475                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
476                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
477                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
478                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
479                 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
480
481         return 0;
482 }
483
484 /**
485  *      t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
486  *      @adap: the adapter
487  *      @win: PCI-E Memory Window to use
488  *      @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
489  *      @addr: address within indicated memory type
490  *      @len: amount of memory to transfer
491  *      @hbuf: host memory buffer
492  *      @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
493  *
494  *      Reads/writes an [almost] arbitrary memory region in the firmware: the
495  *      firmware memory address and host buffer must be aligned on 32-bit
496  *      boudaries; the length may be arbitrary.  The memory is transferred as
497  *      a raw byte sequence from/to the firmware's memory.  If this memory
498  *      contains data structures which contain multi-byte integers, it's the
499  *      caller's responsibility to perform appropriate byte order conversions.
500  */
501 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
502                  u32 len, void *hbuf, int dir)
503 {
504         u32 pos, offset, resid, memoffset;
505         u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
506         u32 *buf;
507
508         /* Argument sanity checks ...
509          */
510         if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
511                 return -EINVAL;
512         buf = (u32 *)hbuf;
513
514         /* It's convenient to be able to handle lengths which aren't a
515          * multiple of 32-bits because we often end up transferring files to
516          * the firmware.  So we'll handle that by normalizing the length here
517          * and then handling any residual transfer at the end.
518          */
519         resid = len & 0x3;
520         len -= resid;
521
522         /* Offset into the region of memory which is being accessed
523          * MEM_EDC0 = 0
524          * MEM_EDC1 = 1
525          * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
526          * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
527          */
528         edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
529         if (mtype != MEM_MC1)
530                 memoffset = (mtype * (edc_size * 1024 * 1024));
531         else {
532                 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
533                                                       MA_EXT_MEMORY0_BAR_A));
534                 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
535         }
536
537         /* Determine the PCIE_MEM_ACCESS_OFFSET */
538         addr = addr + memoffset;
539
540         /* Each PCI-E Memory Window is programmed with a window size -- or
541          * "aperture" -- which controls the granularity of its mapping onto
542          * adapter memory.  We need to grab that aperture in order to know
543          * how to use the specified window.  The window is also programmed
544          * with the base address of the Memory Window in BAR0's address
545          * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
546          * the address is relative to BAR0.
547          */
548         mem_reg = t4_read_reg(adap,
549                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
550                                                   win));
551         mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
552         mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
553         if (is_t4(adap->params.chip))
554                 mem_base -= adap->t4_bar0;
555         win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
556
557         /* Calculate our initial PCI-E Memory Window Position and Offset into
558          * that Window.
559          */
560         pos = addr & ~(mem_aperture-1);
561         offset = addr - pos;
562
563         /* Set up initial PCI-E Memory Window to cover the start of our
564          * transfer.  (Read it back to ensure that changes propagate before we
565          * attempt to use the new value.)
566          */
567         t4_write_reg(adap,
568                      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
569                      pos | win_pf);
570         t4_read_reg(adap,
571                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
572
573         /* Transfer data to/from the adapter as long as there's an integral
574          * number of 32-bit transfers to complete.
575          *
576          * A note on Endianness issues:
577          *
578          * The "register" reads and writes below from/to the PCI-E Memory
579          * Window invoke the standard adapter Big-Endian to PCI-E Link
580          * Little-Endian "swizzel."  As a result, if we have the following
581          * data in adapter memory:
582          *
583          *     Memory:  ... | b0 | b1 | b2 | b3 | ...
584          *     Address:      i+0  i+1  i+2  i+3
585          *
586          * Then a read of the adapter memory via the PCI-E Memory Window
587          * will yield:
588          *
589          *     x = readl(i)
590          *         31                  0
591          *         [ b3 | b2 | b1 | b0 ]
592          *
593          * If this value is stored into local memory on a Little-Endian system
594          * it will show up correctly in local memory as:
595          *
596          *     ( ..., b0, b1, b2, b3, ... )
597          *
598          * But on a Big-Endian system, the store will show up in memory
599          * incorrectly swizzled as:
600          *
601          *     ( ..., b3, b2, b1, b0, ... )
602          *
603          * So we need to account for this in the reads and writes to the
604          * PCI-E Memory Window below by undoing the register read/write
605          * swizzels.
606          */
607         while (len > 0) {
608                 if (dir == T4_MEMORY_READ)
609                         *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
610                                                 mem_base + offset));
611                 else
612                         t4_write_reg(adap, mem_base + offset,
613                                      (__force u32)cpu_to_le32(*buf++));
614                 offset += sizeof(__be32);
615                 len -= sizeof(__be32);
616
617                 /* If we've reached the end of our current window aperture,
618                  * move the PCI-E Memory Window on to the next.  Note that
619                  * doing this here after "len" may be 0 allows us to set up
620                  * the PCI-E Memory Window for a possible final residual
621                  * transfer below ...
622                  */
623                 if (offset == mem_aperture) {
624                         pos += mem_aperture;
625                         offset = 0;
626                         t4_write_reg(adap,
627                                 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
628                                                     win), pos | win_pf);
629                         t4_read_reg(adap,
630                                 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
631                                                     win));
632                 }
633         }
634
635         /* If the original transfer had a length which wasn't a multiple of
636          * 32-bits, now's where we need to finish off the transfer of the
637          * residual amount.  The PCI-E Memory Window has already been moved
638          * above (if necessary) to cover this final transfer.
639          */
640         if (resid) {
641                 union {
642                         u32 word;
643                         char byte[4];
644                 } last;
645                 unsigned char *bp;
646                 int i;
647
648                 if (dir == T4_MEMORY_READ) {
649                         last.word = le32_to_cpu(
650                                         (__force __le32)t4_read_reg(adap,
651                                                 mem_base + offset));
652                         for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
653                                 bp[i] = last.byte[i];
654                 } else {
655                         last.word = *buf;
656                         for (i = resid; i < 4; i++)
657                                 last.byte[i] = 0;
658                         t4_write_reg(adap, mem_base + offset,
659                                      (__force u32)cpu_to_le32(last.word));
660                 }
661         }
662
663         return 0;
664 }
665
666 /* Return the specified PCI-E Configuration Space register from our Physical
667  * Function.  We try first via a Firmware LDST Command since we prefer to let
668  * the firmware own all of these registers, but if that fails we go for it
669  * directly ourselves.
670  */
671 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
672 {
673         u32 val, ldst_addrspace;
674
675         /* If fw_attach != 0, construct and send the Firmware LDST Command to
676          * retrieve the specified PCI-E Configuration Space register.
677          */
678         struct fw_ldst_cmd ldst_cmd;
679         int ret;
680
681         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
682         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
683         ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
684                                                FW_CMD_REQUEST_F |
685                                                FW_CMD_READ_F |
686                                                ldst_addrspace);
687         ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
688         ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
689         ldst_cmd.u.pcie.ctrl_to_fn =
690                 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
691         ldst_cmd.u.pcie.r = reg;
692
693         /* If the LDST Command succeeds, return the result, otherwise
694          * fall through to reading it directly ourselves ...
695          */
696         ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
697                          &ldst_cmd);
698         if (ret == 0)
699                 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
700         else
701                 /* Read the desired Configuration Space register via the PCI-E
702                  * Backdoor mechanism.
703                  */
704                 t4_hw_pci_read_cfg4(adap, reg, &val);
705         return val;
706 }
707
708 /* Get the window based on base passed to it.
709  * Window aperture is currently unhandled, but there is no use case for it
710  * right now
711  */
712 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
713                          u32 memwin_base)
714 {
715         u32 ret;
716
717         if (is_t4(adap->params.chip)) {
718                 u32 bar0;
719
720                 /* Truncation intentional: we only read the bottom 32-bits of
721                  * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
722                  * mechanism to read BAR0 instead of using
723                  * pci_resource_start() because we could be operating from
724                  * within a Virtual Machine which is trapping our accesses to
725                  * our Configuration Space and we need to set up the PCI-E
726                  * Memory Window decoders with the actual addresses which will
727                  * be coming across the PCI-E link.
728                  */
729                 bar0 = t4_read_pcie_cfg4(adap, pci_base);
730                 bar0 &= pci_mask;
731                 adap->t4_bar0 = bar0;
732
733                 ret = bar0 + memwin_base;
734         } else {
735                 /* For T5, only relative offset inside the PCIe BAR is passed */
736                 ret = memwin_base;
737         }
738         return ret;
739 }
740
741 /* Get the default utility window (win0) used by everyone */
742 u32 t4_get_util_window(struct adapter *adap)
743 {
744         return t4_get_window(adap, PCI_BASE_ADDRESS_0,
745                              PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
746 }
747
748 /* Set up memory window for accessing adapter memory ranges.  (Read
749  * back MA register to ensure that changes propagate before we attempt
750  * to use the new values.)
751  */
752 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
753 {
754         t4_write_reg(adap,
755                      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
756                      memwin_base | BIR_V(0) |
757                      WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
758         t4_read_reg(adap,
759                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
760 }
761
762 /**
763  *      t4_get_regs_len - return the size of the chips register set
764  *      @adapter: the adapter
765  *
766  *      Returns the size of the chip's BAR0 register space.
767  */
768 unsigned int t4_get_regs_len(struct adapter *adapter)
769 {
770         unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
771
772         switch (chip_version) {
773         case CHELSIO_T4:
774                 return T4_REGMAP_SIZE;
775
776         case CHELSIO_T5:
777         case CHELSIO_T6:
778                 return T5_REGMAP_SIZE;
779         }
780
781         dev_err(adapter->pdev_dev,
782                 "Unsupported chip version %d\n", chip_version);
783         return 0;
784 }
785
786 /**
787  *      t4_get_regs - read chip registers into provided buffer
788  *      @adap: the adapter
789  *      @buf: register buffer
790  *      @buf_size: size (in bytes) of register buffer
791  *
792  *      If the provided register buffer isn't large enough for the chip's
793  *      full register range, the register dump will be truncated to the
794  *      register buffer's size.
795  */
796 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
797 {
798         static const unsigned int t4_reg_ranges[] = {
799                 0x1008, 0x1108,
800                 0x1180, 0x1184,
801                 0x1190, 0x1194,
802                 0x11a0, 0x11a4,
803                 0x11b0, 0x11b4,
804                 0x11fc, 0x123c,
805                 0x1300, 0x173c,
806                 0x1800, 0x18fc,
807                 0x3000, 0x30d8,
808                 0x30e0, 0x30e4,
809                 0x30ec, 0x5910,
810                 0x5920, 0x5924,
811                 0x5960, 0x5960,
812                 0x5968, 0x5968,
813                 0x5970, 0x5970,
814                 0x5978, 0x5978,
815                 0x5980, 0x5980,
816                 0x5988, 0x5988,
817                 0x5990, 0x5990,
818                 0x5998, 0x5998,
819                 0x59a0, 0x59d4,
820                 0x5a00, 0x5ae0,
821                 0x5ae8, 0x5ae8,
822                 0x5af0, 0x5af0,
823                 0x5af8, 0x5af8,
824                 0x6000, 0x6098,
825                 0x6100, 0x6150,
826                 0x6200, 0x6208,
827                 0x6240, 0x6248,
828                 0x6280, 0x62b0,
829                 0x62c0, 0x6338,
830                 0x6370, 0x638c,
831                 0x6400, 0x643c,
832                 0x6500, 0x6524,
833                 0x6a00, 0x6a04,
834                 0x6a14, 0x6a38,
835                 0x6a60, 0x6a70,
836                 0x6a78, 0x6a78,
837                 0x6b00, 0x6b0c,
838                 0x6b1c, 0x6b84,
839                 0x6bf0, 0x6bf8,
840                 0x6c00, 0x6c0c,
841                 0x6c1c, 0x6c84,
842                 0x6cf0, 0x6cf8,
843                 0x6d00, 0x6d0c,
844                 0x6d1c, 0x6d84,
845                 0x6df0, 0x6df8,
846                 0x6e00, 0x6e0c,
847                 0x6e1c, 0x6e84,
848                 0x6ef0, 0x6ef8,
849                 0x6f00, 0x6f0c,
850                 0x6f1c, 0x6f84,
851                 0x6ff0, 0x6ff8,
852                 0x7000, 0x700c,
853                 0x701c, 0x7084,
854                 0x70f0, 0x70f8,
855                 0x7100, 0x710c,
856                 0x711c, 0x7184,
857                 0x71f0, 0x71f8,
858                 0x7200, 0x720c,
859                 0x721c, 0x7284,
860                 0x72f0, 0x72f8,
861                 0x7300, 0x730c,
862                 0x731c, 0x7384,
863                 0x73f0, 0x73f8,
864                 0x7400, 0x7450,
865                 0x7500, 0x7530,
866                 0x7600, 0x760c,
867                 0x7614, 0x761c,
868                 0x7680, 0x76cc,
869                 0x7700, 0x7798,
870                 0x77c0, 0x77fc,
871                 0x7900, 0x79fc,
872                 0x7b00, 0x7b58,
873                 0x7b60, 0x7b84,
874                 0x7b8c, 0x7c38,
875                 0x7d00, 0x7d38,
876                 0x7d40, 0x7d80,
877                 0x7d8c, 0x7ddc,
878                 0x7de4, 0x7e04,
879                 0x7e10, 0x7e1c,
880                 0x7e24, 0x7e38,
881                 0x7e40, 0x7e44,
882                 0x7e4c, 0x7e78,
883                 0x7e80, 0x7ea4,
884                 0x7eac, 0x7edc,
885                 0x7ee8, 0x7efc,
886                 0x8dc0, 0x8e04,
887                 0x8e10, 0x8e1c,
888                 0x8e30, 0x8e78,
889                 0x8ea0, 0x8eb8,
890                 0x8ec0, 0x8f6c,
891                 0x8fc0, 0x9008,
892                 0x9010, 0x9058,
893                 0x9060, 0x9060,
894                 0x9068, 0x9074,
895                 0x90fc, 0x90fc,
896                 0x9400, 0x9408,
897                 0x9410, 0x9458,
898                 0x9600, 0x9600,
899                 0x9608, 0x9638,
900                 0x9640, 0x96bc,
901                 0x9800, 0x9808,
902                 0x9820, 0x983c,
903                 0x9850, 0x9864,
904                 0x9c00, 0x9c6c,
905                 0x9c80, 0x9cec,
906                 0x9d00, 0x9d6c,
907                 0x9d80, 0x9dec,
908                 0x9e00, 0x9e6c,
909                 0x9e80, 0x9eec,
910                 0x9f00, 0x9f6c,
911                 0x9f80, 0x9fec,
912                 0xd004, 0xd004,
913                 0xd010, 0xd03c,
914                 0xdfc0, 0xdfe0,
915                 0xe000, 0xea7c,
916                 0xf000, 0x11190,
917                 0x19040, 0x1906c,
918                 0x19078, 0x19080,
919                 0x1908c, 0x190e4,
920                 0x190f0, 0x190f8,
921                 0x19100, 0x19110,
922                 0x19120, 0x19124,
923                 0x19150, 0x19194,
924                 0x1919c, 0x191b0,
925                 0x191d0, 0x191e8,
926                 0x19238, 0x1924c,
927                 0x193f8, 0x1943c,
928                 0x1944c, 0x19474,
929                 0x19490, 0x194e0,
930                 0x194f0, 0x194f8,
931                 0x19800, 0x19c08,
932                 0x19c10, 0x19c90,
933                 0x19ca0, 0x19ce4,
934                 0x19cf0, 0x19d40,
935                 0x19d50, 0x19d94,
936                 0x19da0, 0x19de8,
937                 0x19df0, 0x19e40,
938                 0x19e50, 0x19e90,
939                 0x19ea0, 0x19f4c,
940                 0x1a000, 0x1a004,
941                 0x1a010, 0x1a06c,
942                 0x1a0b0, 0x1a0e4,
943                 0x1a0ec, 0x1a0f4,
944                 0x1a100, 0x1a108,
945                 0x1a114, 0x1a120,
946                 0x1a128, 0x1a130,
947                 0x1a138, 0x1a138,
948                 0x1a190, 0x1a1c4,
949                 0x1a1fc, 0x1a1fc,
950                 0x1e040, 0x1e04c,
951                 0x1e284, 0x1e28c,
952                 0x1e2c0, 0x1e2c0,
953                 0x1e2e0, 0x1e2e0,
954                 0x1e300, 0x1e384,
955                 0x1e3c0, 0x1e3c8,
956                 0x1e440, 0x1e44c,
957                 0x1e684, 0x1e68c,
958                 0x1e6c0, 0x1e6c0,
959                 0x1e6e0, 0x1e6e0,
960                 0x1e700, 0x1e784,
961                 0x1e7c0, 0x1e7c8,
962                 0x1e840, 0x1e84c,
963                 0x1ea84, 0x1ea8c,
964                 0x1eac0, 0x1eac0,
965                 0x1eae0, 0x1eae0,
966                 0x1eb00, 0x1eb84,
967                 0x1ebc0, 0x1ebc8,
968                 0x1ec40, 0x1ec4c,
969                 0x1ee84, 0x1ee8c,
970                 0x1eec0, 0x1eec0,
971                 0x1eee0, 0x1eee0,
972                 0x1ef00, 0x1ef84,
973                 0x1efc0, 0x1efc8,
974                 0x1f040, 0x1f04c,
975                 0x1f284, 0x1f28c,
976                 0x1f2c0, 0x1f2c0,
977                 0x1f2e0, 0x1f2e0,
978                 0x1f300, 0x1f384,
979                 0x1f3c0, 0x1f3c8,
980                 0x1f440, 0x1f44c,
981                 0x1f684, 0x1f68c,
982                 0x1f6c0, 0x1f6c0,
983                 0x1f6e0, 0x1f6e0,
984                 0x1f700, 0x1f784,
985                 0x1f7c0, 0x1f7c8,
986                 0x1f840, 0x1f84c,
987                 0x1fa84, 0x1fa8c,
988                 0x1fac0, 0x1fac0,
989                 0x1fae0, 0x1fae0,
990                 0x1fb00, 0x1fb84,
991                 0x1fbc0, 0x1fbc8,
992                 0x1fc40, 0x1fc4c,
993                 0x1fe84, 0x1fe8c,
994                 0x1fec0, 0x1fec0,
995                 0x1fee0, 0x1fee0,
996                 0x1ff00, 0x1ff84,
997                 0x1ffc0, 0x1ffc8,
998                 0x20000, 0x2002c,
999                 0x20100, 0x2013c,
1000                 0x20190, 0x201a0,
1001                 0x201a8, 0x201b8,
1002                 0x201c4, 0x201c8,
1003                 0x20200, 0x20318,
1004                 0x20400, 0x204b4,
1005                 0x204c0, 0x20528,
1006                 0x20540, 0x20614,
1007                 0x21000, 0x21040,
1008                 0x2104c, 0x21060,
1009                 0x210c0, 0x210ec,
1010                 0x21200, 0x21268,
1011                 0x21270, 0x21284,
1012                 0x212fc, 0x21388,
1013                 0x21400, 0x21404,
1014                 0x21500, 0x21500,
1015                 0x21510, 0x21518,
1016                 0x2152c, 0x21530,
1017                 0x2153c, 0x2153c,
1018                 0x21550, 0x21554,
1019                 0x21600, 0x21600,
1020                 0x21608, 0x2161c,
1021                 0x21624, 0x21628,
1022                 0x21630, 0x21634,
1023                 0x2163c, 0x2163c,
1024                 0x21700, 0x2171c,
1025                 0x21780, 0x2178c,
1026                 0x21800, 0x21818,
1027                 0x21820, 0x21828,
1028                 0x21830, 0x21848,
1029                 0x21850, 0x21854,
1030                 0x21860, 0x21868,
1031                 0x21870, 0x21870,
1032                 0x21878, 0x21898,
1033                 0x218a0, 0x218a8,
1034                 0x218b0, 0x218c8,
1035                 0x218d0, 0x218d4,
1036                 0x218e0, 0x218e8,
1037                 0x218f0, 0x218f0,
1038                 0x218f8, 0x21a18,
1039                 0x21a20, 0x21a28,
1040                 0x21a30, 0x21a48,
1041                 0x21a50, 0x21a54,
1042                 0x21a60, 0x21a68,
1043                 0x21a70, 0x21a70,
1044                 0x21a78, 0x21a98,
1045                 0x21aa0, 0x21aa8,
1046                 0x21ab0, 0x21ac8,
1047                 0x21ad0, 0x21ad4,
1048                 0x21ae0, 0x21ae8,
1049                 0x21af0, 0x21af0,
1050                 0x21af8, 0x21c18,
1051                 0x21c20, 0x21c20,
1052                 0x21c28, 0x21c30,
1053                 0x21c38, 0x21c38,
1054                 0x21c80, 0x21c98,
1055                 0x21ca0, 0x21ca8,
1056                 0x21cb0, 0x21cc8,
1057                 0x21cd0, 0x21cd4,
1058                 0x21ce0, 0x21ce8,
1059                 0x21cf0, 0x21cf0,
1060                 0x21cf8, 0x21d7c,
1061                 0x21e00, 0x21e04,
1062                 0x22000, 0x2202c,
1063                 0x22100, 0x2213c,
1064                 0x22190, 0x221a0,
1065                 0x221a8, 0x221b8,
1066                 0x221c4, 0x221c8,
1067                 0x22200, 0x22318,
1068                 0x22400, 0x224b4,
1069                 0x224c0, 0x22528,
1070                 0x22540, 0x22614,
1071                 0x23000, 0x23040,
1072                 0x2304c, 0x23060,
1073                 0x230c0, 0x230ec,
1074                 0x23200, 0x23268,
1075                 0x23270, 0x23284,
1076                 0x232fc, 0x23388,
1077                 0x23400, 0x23404,
1078                 0x23500, 0x23500,
1079                 0x23510, 0x23518,
1080                 0x2352c, 0x23530,
1081                 0x2353c, 0x2353c,
1082                 0x23550, 0x23554,
1083                 0x23600, 0x23600,
1084                 0x23608, 0x2361c,
1085                 0x23624, 0x23628,
1086                 0x23630, 0x23634,
1087                 0x2363c, 0x2363c,
1088                 0x23700, 0x2371c,
1089                 0x23780, 0x2378c,
1090                 0x23800, 0x23818,
1091                 0x23820, 0x23828,
1092                 0x23830, 0x23848,
1093                 0x23850, 0x23854,
1094                 0x23860, 0x23868,
1095                 0x23870, 0x23870,
1096                 0x23878, 0x23898,
1097                 0x238a0, 0x238a8,
1098                 0x238b0, 0x238c8,
1099                 0x238d0, 0x238d4,
1100                 0x238e0, 0x238e8,
1101                 0x238f0, 0x238f0,
1102                 0x238f8, 0x23a18,
1103                 0x23a20, 0x23a28,
1104                 0x23a30, 0x23a48,
1105                 0x23a50, 0x23a54,
1106                 0x23a60, 0x23a68,
1107                 0x23a70, 0x23a70,
1108                 0x23a78, 0x23a98,
1109                 0x23aa0, 0x23aa8,
1110                 0x23ab0, 0x23ac8,
1111                 0x23ad0, 0x23ad4,
1112                 0x23ae0, 0x23ae8,
1113                 0x23af0, 0x23af0,
1114                 0x23af8, 0x23c18,
1115                 0x23c20, 0x23c20,
1116                 0x23c28, 0x23c30,
1117                 0x23c38, 0x23c38,
1118                 0x23c80, 0x23c98,
1119                 0x23ca0, 0x23ca8,
1120                 0x23cb0, 0x23cc8,
1121                 0x23cd0, 0x23cd4,
1122                 0x23ce0, 0x23ce8,
1123                 0x23cf0, 0x23cf0,
1124                 0x23cf8, 0x23d7c,
1125                 0x23e00, 0x23e04,
1126                 0x24000, 0x2402c,
1127                 0x24100, 0x2413c,
1128                 0x24190, 0x241a0,
1129                 0x241a8, 0x241b8,
1130                 0x241c4, 0x241c8,
1131                 0x24200, 0x24318,
1132                 0x24400, 0x244b4,
1133                 0x244c0, 0x24528,
1134                 0x24540, 0x24614,
1135                 0x25000, 0x25040,
1136                 0x2504c, 0x25060,
1137                 0x250c0, 0x250ec,
1138                 0x25200, 0x25268,
1139                 0x25270, 0x25284,
1140                 0x252fc, 0x25388,
1141                 0x25400, 0x25404,
1142                 0x25500, 0x25500,
1143                 0x25510, 0x25518,
1144                 0x2552c, 0x25530,
1145                 0x2553c, 0x2553c,
1146                 0x25550, 0x25554,
1147                 0x25600, 0x25600,
1148                 0x25608, 0x2561c,
1149                 0x25624, 0x25628,
1150                 0x25630, 0x25634,
1151                 0x2563c, 0x2563c,
1152                 0x25700, 0x2571c,
1153                 0x25780, 0x2578c,
1154                 0x25800, 0x25818,
1155                 0x25820, 0x25828,
1156                 0x25830, 0x25848,
1157                 0x25850, 0x25854,
1158                 0x25860, 0x25868,
1159                 0x25870, 0x25870,
1160                 0x25878, 0x25898,
1161                 0x258a0, 0x258a8,
1162                 0x258b0, 0x258c8,
1163                 0x258d0, 0x258d4,
1164                 0x258e0, 0x258e8,
1165                 0x258f0, 0x258f0,
1166                 0x258f8, 0x25a18,
1167                 0x25a20, 0x25a28,
1168                 0x25a30, 0x25a48,
1169                 0x25a50, 0x25a54,
1170                 0x25a60, 0x25a68,
1171                 0x25a70, 0x25a70,
1172                 0x25a78, 0x25a98,
1173                 0x25aa0, 0x25aa8,
1174                 0x25ab0, 0x25ac8,
1175                 0x25ad0, 0x25ad4,
1176                 0x25ae0, 0x25ae8,
1177                 0x25af0, 0x25af0,
1178                 0x25af8, 0x25c18,
1179                 0x25c20, 0x25c20,
1180                 0x25c28, 0x25c30,
1181                 0x25c38, 0x25c38,
1182                 0x25c80, 0x25c98,
1183                 0x25ca0, 0x25ca8,
1184                 0x25cb0, 0x25cc8,
1185                 0x25cd0, 0x25cd4,
1186                 0x25ce0, 0x25ce8,
1187                 0x25cf0, 0x25cf0,
1188                 0x25cf8, 0x25d7c,
1189                 0x25e00, 0x25e04,
1190                 0x26000, 0x2602c,
1191                 0x26100, 0x2613c,
1192                 0x26190, 0x261a0,
1193                 0x261a8, 0x261b8,
1194                 0x261c4, 0x261c8,
1195                 0x26200, 0x26318,
1196                 0x26400, 0x264b4,
1197                 0x264c0, 0x26528,
1198                 0x26540, 0x26614,
1199                 0x27000, 0x27040,
1200                 0x2704c, 0x27060,
1201                 0x270c0, 0x270ec,
1202                 0x27200, 0x27268,
1203                 0x27270, 0x27284,
1204                 0x272fc, 0x27388,
1205                 0x27400, 0x27404,
1206                 0x27500, 0x27500,
1207                 0x27510, 0x27518,
1208                 0x2752c, 0x27530,
1209                 0x2753c, 0x2753c,
1210                 0x27550, 0x27554,
1211                 0x27600, 0x27600,
1212                 0x27608, 0x2761c,
1213                 0x27624, 0x27628,
1214                 0x27630, 0x27634,
1215                 0x2763c, 0x2763c,
1216                 0x27700, 0x2771c,
1217                 0x27780, 0x2778c,
1218                 0x27800, 0x27818,
1219                 0x27820, 0x27828,
1220                 0x27830, 0x27848,
1221                 0x27850, 0x27854,
1222                 0x27860, 0x27868,
1223                 0x27870, 0x27870,
1224                 0x27878, 0x27898,
1225                 0x278a0, 0x278a8,
1226                 0x278b0, 0x278c8,
1227                 0x278d0, 0x278d4,
1228                 0x278e0, 0x278e8,
1229                 0x278f0, 0x278f0,
1230                 0x278f8, 0x27a18,
1231                 0x27a20, 0x27a28,
1232                 0x27a30, 0x27a48,
1233                 0x27a50, 0x27a54,
1234                 0x27a60, 0x27a68,
1235                 0x27a70, 0x27a70,
1236                 0x27a78, 0x27a98,
1237                 0x27aa0, 0x27aa8,
1238                 0x27ab0, 0x27ac8,
1239                 0x27ad0, 0x27ad4,
1240                 0x27ae0, 0x27ae8,
1241                 0x27af0, 0x27af0,
1242                 0x27af8, 0x27c18,
1243                 0x27c20, 0x27c20,
1244                 0x27c28, 0x27c30,
1245                 0x27c38, 0x27c38,
1246                 0x27c80, 0x27c98,
1247                 0x27ca0, 0x27ca8,
1248                 0x27cb0, 0x27cc8,
1249                 0x27cd0, 0x27cd4,
1250                 0x27ce0, 0x27ce8,
1251                 0x27cf0, 0x27cf0,
1252                 0x27cf8, 0x27d7c,
1253                 0x27e00, 0x27e04,
1254         };
1255
1256         static const unsigned int t5_reg_ranges[] = {
1257                 0x1008, 0x10c0,
1258                 0x10cc, 0x10f8,
1259                 0x1100, 0x1100,
1260                 0x110c, 0x1148,
1261                 0x1180, 0x1184,
1262                 0x1190, 0x1194,
1263                 0x11a0, 0x11a4,
1264                 0x11b0, 0x11b4,
1265                 0x11fc, 0x123c,
1266                 0x1280, 0x173c,
1267                 0x1800, 0x18fc,
1268                 0x3000, 0x3028,
1269                 0x3060, 0x30b0,
1270                 0x30b8, 0x30d8,
1271                 0x30e0, 0x30fc,
1272                 0x3140, 0x357c,
1273                 0x35a8, 0x35cc,
1274                 0x35ec, 0x35ec,
1275                 0x3600, 0x5624,
1276                 0x56cc, 0x56ec,
1277                 0x56f4, 0x5720,
1278                 0x5728, 0x575c,
1279                 0x580c, 0x5814,
1280                 0x5890, 0x589c,
1281                 0x58a4, 0x58ac,
1282                 0x58b8, 0x58bc,
1283                 0x5940, 0x59c8,
1284                 0x59d0, 0x59dc,
1285                 0x59fc, 0x5a18,
1286                 0x5a60, 0x5a70,
1287                 0x5a80, 0x5a9c,
1288                 0x5b94, 0x5bfc,
1289                 0x6000, 0x6020,
1290                 0x6028, 0x6040,
1291                 0x6058, 0x609c,
1292                 0x60a8, 0x614c,
1293                 0x7700, 0x7798,
1294                 0x77c0, 0x78fc,
1295                 0x7b00, 0x7b58,
1296                 0x7b60, 0x7b84,
1297                 0x7b8c, 0x7c54,
1298                 0x7d00, 0x7d38,
1299                 0x7d40, 0x7d80,
1300                 0x7d8c, 0x7ddc,
1301                 0x7de4, 0x7e04,
1302                 0x7e10, 0x7e1c,
1303                 0x7e24, 0x7e38,
1304                 0x7e40, 0x7e44,
1305                 0x7e4c, 0x7e78,
1306                 0x7e80, 0x7edc,
1307                 0x7ee8, 0x7efc,
1308                 0x8dc0, 0x8de0,
1309                 0x8df8, 0x8e04,
1310                 0x8e10, 0x8e84,
1311                 0x8ea0, 0x8f84,
1312                 0x8fc0, 0x9058,
1313                 0x9060, 0x9060,
1314                 0x9068, 0x90f8,
1315                 0x9400, 0x9408,
1316                 0x9410, 0x9470,
1317                 0x9600, 0x9600,
1318                 0x9608, 0x9638,
1319                 0x9640, 0x96f4,
1320                 0x9800, 0x9808,
1321                 0x9820, 0x983c,
1322                 0x9850, 0x9864,
1323                 0x9c00, 0x9c6c,
1324                 0x9c80, 0x9cec,
1325                 0x9d00, 0x9d6c,
1326                 0x9d80, 0x9dec,
1327                 0x9e00, 0x9e6c,
1328                 0x9e80, 0x9eec,
1329                 0x9f00, 0x9f6c,
1330                 0x9f80, 0xa020,
1331                 0xd004, 0xd004,
1332                 0xd010, 0xd03c,
1333                 0xdfc0, 0xdfe0,
1334                 0xe000, 0x1106c,
1335                 0x11074, 0x11088,
1336                 0x1109c, 0x1117c,
1337                 0x11190, 0x11204,
1338                 0x19040, 0x1906c,
1339                 0x19078, 0x19080,
1340                 0x1908c, 0x190e8,
1341                 0x190f0, 0x190f8,
1342                 0x19100, 0x19110,
1343                 0x19120, 0x19124,
1344                 0x19150, 0x19194,
1345                 0x1919c, 0x191b0,
1346                 0x191d0, 0x191e8,
1347                 0x19238, 0x19290,
1348                 0x193f8, 0x19428,
1349                 0x19430, 0x19444,
1350                 0x1944c, 0x1946c,
1351                 0x19474, 0x19474,
1352                 0x19490, 0x194cc,
1353                 0x194f0, 0x194f8,
1354                 0x19c00, 0x19c08,
1355                 0x19c10, 0x19c60,
1356                 0x19c94, 0x19ce4,
1357                 0x19cf0, 0x19d40,
1358                 0x19d50, 0x19d94,
1359                 0x19da0, 0x19de8,
1360                 0x19df0, 0x19e10,
1361                 0x19e50, 0x19e90,
1362                 0x19ea0, 0x19f24,
1363                 0x19f34, 0x19f34,
1364                 0x19f40, 0x19f50,
1365                 0x19f90, 0x19fb4,
1366                 0x19fc4, 0x19fe4,
1367                 0x1a000, 0x1a004,
1368                 0x1a010, 0x1a06c,
1369                 0x1a0b0, 0x1a0e4,
1370                 0x1a0ec, 0x1a0f8,
1371                 0x1a100, 0x1a108,
1372                 0x1a114, 0x1a120,
1373                 0x1a128, 0x1a130,
1374                 0x1a138, 0x1a138,
1375                 0x1a190, 0x1a1c4,
1376                 0x1a1fc, 0x1a1fc,
1377                 0x1e008, 0x1e00c,
1378                 0x1e040, 0x1e044,
1379                 0x1e04c, 0x1e04c,
1380                 0x1e284, 0x1e290,
1381                 0x1e2c0, 0x1e2c0,
1382                 0x1e2e0, 0x1e2e0,
1383                 0x1e300, 0x1e384,
1384                 0x1e3c0, 0x1e3c8,
1385                 0x1e408, 0x1e40c,
1386                 0x1e440, 0x1e444,
1387                 0x1e44c, 0x1e44c,
1388                 0x1e684, 0x1e690,
1389                 0x1e6c0, 0x1e6c0,
1390                 0x1e6e0, 0x1e6e0,
1391                 0x1e700, 0x1e784,
1392                 0x1e7c0, 0x1e7c8,
1393                 0x1e808, 0x1e80c,
1394                 0x1e840, 0x1e844,
1395                 0x1e84c, 0x1e84c,
1396                 0x1ea84, 0x1ea90,
1397                 0x1eac0, 0x1eac0,
1398                 0x1eae0, 0x1eae0,
1399                 0x1eb00, 0x1eb84,
1400                 0x1ebc0, 0x1ebc8,
1401                 0x1ec08, 0x1ec0c,
1402                 0x1ec40, 0x1ec44,
1403                 0x1ec4c, 0x1ec4c,
1404                 0x1ee84, 0x1ee90,
1405                 0x1eec0, 0x1eec0,
1406                 0x1eee0, 0x1eee0,
1407                 0x1ef00, 0x1ef84,
1408                 0x1efc0, 0x1efc8,
1409                 0x1f008, 0x1f00c,
1410                 0x1f040, 0x1f044,
1411                 0x1f04c, 0x1f04c,
1412                 0x1f284, 0x1f290,
1413                 0x1f2c0, 0x1f2c0,
1414                 0x1f2e0, 0x1f2e0,
1415                 0x1f300, 0x1f384,
1416                 0x1f3c0, 0x1f3c8,
1417                 0x1f408, 0x1f40c,
1418                 0x1f440, 0x1f444,
1419                 0x1f44c, 0x1f44c,
1420                 0x1f684, 0x1f690,
1421                 0x1f6c0, 0x1f6c0,
1422                 0x1f6e0, 0x1f6e0,
1423                 0x1f700, 0x1f784,
1424                 0x1f7c0, 0x1f7c8,
1425                 0x1f808, 0x1f80c,
1426                 0x1f840, 0x1f844,
1427                 0x1f84c, 0x1f84c,
1428                 0x1fa84, 0x1fa90,
1429                 0x1fac0, 0x1fac0,
1430                 0x1fae0, 0x1fae0,
1431                 0x1fb00, 0x1fb84,
1432                 0x1fbc0, 0x1fbc8,
1433                 0x1fc08, 0x1fc0c,
1434                 0x1fc40, 0x1fc44,
1435                 0x1fc4c, 0x1fc4c,
1436                 0x1fe84, 0x1fe90,
1437                 0x1fec0, 0x1fec0,
1438                 0x1fee0, 0x1fee0,
1439                 0x1ff00, 0x1ff84,
1440                 0x1ffc0, 0x1ffc8,
1441                 0x30000, 0x30030,
1442                 0x30038, 0x30038,
1443                 0x30040, 0x30040,
1444                 0x30100, 0x30144,
1445                 0x30190, 0x301a0,
1446                 0x301a8, 0x301b8,
1447                 0x301c4, 0x301c8,
1448                 0x301d0, 0x301d0,
1449                 0x30200, 0x30318,
1450                 0x30400, 0x304b4,
1451                 0x304c0, 0x3052c,
1452                 0x30540, 0x3061c,
1453                 0x30800, 0x30828,
1454                 0x30834, 0x30834,
1455                 0x308c0, 0x30908,
1456                 0x30910, 0x309ac,
1457                 0x30a00, 0x30a14,
1458                 0x30a1c, 0x30a2c,
1459                 0x30a44, 0x30a50,
1460                 0x30a74, 0x30a74,
1461                 0x30a7c, 0x30afc,
1462                 0x30b08, 0x30c24,
1463                 0x30d00, 0x30d00,
1464                 0x30d08, 0x30d14,
1465                 0x30d1c, 0x30d20,
1466                 0x30d3c, 0x30d3c,
1467                 0x30d48, 0x30d50,
1468                 0x31200, 0x3120c,
1469                 0x31220, 0x31220,
1470                 0x31240, 0x31240,
1471                 0x31600, 0x3160c,
1472                 0x31a00, 0x31a1c,
1473                 0x31e00, 0x31e20,
1474                 0x31e38, 0x31e3c,
1475                 0x31e80, 0x31e80,
1476                 0x31e88, 0x31ea8,
1477                 0x31eb0, 0x31eb4,
1478                 0x31ec8, 0x31ed4,
1479                 0x31fb8, 0x32004,
1480                 0x32200, 0x32200,
1481                 0x32208, 0x32240,
1482                 0x32248, 0x32280,
1483                 0x32288, 0x322c0,
1484                 0x322c8, 0x322fc,
1485                 0x32600, 0x32630,
1486                 0x32a00, 0x32abc,
1487                 0x32b00, 0x32b10,
1488                 0x32b20, 0x32b30,
1489                 0x32b40, 0x32b50,
1490                 0x32b60, 0x32b70,
1491                 0x33000, 0x33028,
1492                 0x33030, 0x33048,
1493                 0x33060, 0x33068,
1494                 0x33070, 0x3309c,
1495                 0x330f0, 0x33128,
1496                 0x33130, 0x33148,
1497                 0x33160, 0x33168,
1498                 0x33170, 0x3319c,
1499                 0x331f0, 0x33238,
1500                 0x33240, 0x33240,
1501                 0x33248, 0x33250,
1502                 0x3325c, 0x33264,
1503                 0x33270, 0x332b8,
1504                 0x332c0, 0x332e4,
1505                 0x332f8, 0x33338,
1506                 0x33340, 0x33340,
1507                 0x33348, 0x33350,
1508                 0x3335c, 0x33364,
1509                 0x33370, 0x333b8,
1510                 0x333c0, 0x333e4,
1511                 0x333f8, 0x33428,
1512                 0x33430, 0x33448,
1513                 0x33460, 0x33468,
1514                 0x33470, 0x3349c,
1515                 0x334f0, 0x33528,
1516                 0x33530, 0x33548,
1517                 0x33560, 0x33568,
1518                 0x33570, 0x3359c,
1519                 0x335f0, 0x33638,
1520                 0x33640, 0x33640,
1521                 0x33648, 0x33650,
1522                 0x3365c, 0x33664,
1523                 0x33670, 0x336b8,
1524                 0x336c0, 0x336e4,
1525                 0x336f8, 0x33738,
1526                 0x33740, 0x33740,
1527                 0x33748, 0x33750,
1528                 0x3375c, 0x33764,
1529                 0x33770, 0x337b8,
1530                 0x337c0, 0x337e4,
1531                 0x337f8, 0x337fc,
1532                 0x33814, 0x33814,
1533                 0x3382c, 0x3382c,
1534                 0x33880, 0x3388c,
1535                 0x338e8, 0x338ec,
1536                 0x33900, 0x33928,
1537                 0x33930, 0x33948,
1538                 0x33960, 0x33968,
1539                 0x33970, 0x3399c,
1540                 0x339f0, 0x33a38,
1541                 0x33a40, 0x33a40,
1542                 0x33a48, 0x33a50,
1543                 0x33a5c, 0x33a64,
1544                 0x33a70, 0x33ab8,
1545                 0x33ac0, 0x33ae4,
1546                 0x33af8, 0x33b10,
1547                 0x33b28, 0x33b28,
1548                 0x33b3c, 0x33b50,
1549                 0x33bf0, 0x33c10,
1550                 0x33c28, 0x33c28,
1551                 0x33c3c, 0x33c50,
1552                 0x33cf0, 0x33cfc,
1553                 0x34000, 0x34030,
1554                 0x34038, 0x34038,
1555                 0x34040, 0x34040,
1556                 0x34100, 0x34144,
1557                 0x34190, 0x341a0,
1558                 0x341a8, 0x341b8,
1559                 0x341c4, 0x341c8,
1560                 0x341d0, 0x341d0,
1561                 0x34200, 0x34318,
1562                 0x34400, 0x344b4,
1563                 0x344c0, 0x3452c,
1564                 0x34540, 0x3461c,
1565                 0x34800, 0x34828,
1566                 0x34834, 0x34834,
1567                 0x348c0, 0x34908,
1568                 0x34910, 0x349ac,
1569                 0x34a00, 0x34a14,
1570                 0x34a1c, 0x34a2c,
1571                 0x34a44, 0x34a50,
1572                 0x34a74, 0x34a74,
1573                 0x34a7c, 0x34afc,
1574                 0x34b08, 0x34c24,
1575                 0x34d00, 0x34d00,
1576                 0x34d08, 0x34d14,
1577                 0x34d1c, 0x34d20,
1578                 0x34d3c, 0x34d3c,
1579                 0x34d48, 0x34d50,
1580                 0x35200, 0x3520c,
1581                 0x35220, 0x35220,
1582                 0x35240, 0x35240,
1583                 0x35600, 0x3560c,
1584                 0x35a00, 0x35a1c,
1585                 0x35e00, 0x35e20,
1586                 0x35e38, 0x35e3c,
1587                 0x35e80, 0x35e80,
1588                 0x35e88, 0x35ea8,
1589                 0x35eb0, 0x35eb4,
1590                 0x35ec8, 0x35ed4,
1591                 0x35fb8, 0x36004,
1592                 0x36200, 0x36200,
1593                 0x36208, 0x36240,
1594                 0x36248, 0x36280,
1595                 0x36288, 0x362c0,
1596                 0x362c8, 0x362fc,
1597                 0x36600, 0x36630,
1598                 0x36a00, 0x36abc,
1599                 0x36b00, 0x36b10,
1600                 0x36b20, 0x36b30,
1601                 0x36b40, 0x36b50,
1602                 0x36b60, 0x36b70,
1603                 0x37000, 0x37028,
1604                 0x37030, 0x37048,
1605                 0x37060, 0x37068,
1606                 0x37070, 0x3709c,
1607                 0x370f0, 0x37128,
1608                 0x37130, 0x37148,
1609                 0x37160, 0x37168,
1610                 0x37170, 0x3719c,
1611                 0x371f0, 0x37238,
1612                 0x37240, 0x37240,
1613                 0x37248, 0x37250,
1614                 0x3725c, 0x37264,
1615                 0x37270, 0x372b8,
1616                 0x372c0, 0x372e4,
1617                 0x372f8, 0x37338,
1618                 0x37340, 0x37340,
1619                 0x37348, 0x37350,
1620                 0x3735c, 0x37364,
1621                 0x37370, 0x373b8,
1622                 0x373c0, 0x373e4,
1623                 0x373f8, 0x37428,
1624                 0x37430, 0x37448,
1625                 0x37460, 0x37468,
1626                 0x37470, 0x3749c,
1627                 0x374f0, 0x37528,
1628                 0x37530, 0x37548,
1629                 0x37560, 0x37568,
1630                 0x37570, 0x3759c,
1631                 0x375f0, 0x37638,
1632                 0x37640, 0x37640,
1633                 0x37648, 0x37650,
1634                 0x3765c, 0x37664,
1635                 0x37670, 0x376b8,
1636                 0x376c0, 0x376e4,
1637                 0x376f8, 0x37738,
1638                 0x37740, 0x37740,
1639                 0x37748, 0x37750,
1640                 0x3775c, 0x37764,
1641                 0x37770, 0x377b8,
1642                 0x377c0, 0x377e4,
1643                 0x377f8, 0x377fc,
1644                 0x37814, 0x37814,
1645                 0x3782c, 0x3782c,
1646                 0x37880, 0x3788c,
1647                 0x378e8, 0x378ec,
1648                 0x37900, 0x37928,
1649                 0x37930, 0x37948,
1650                 0x37960, 0x37968,
1651                 0x37970, 0x3799c,
1652                 0x379f0, 0x37a38,
1653                 0x37a40, 0x37a40,
1654                 0x37a48, 0x37a50,
1655                 0x37a5c, 0x37a64,
1656                 0x37a70, 0x37ab8,
1657                 0x37ac0, 0x37ae4,
1658                 0x37af8, 0x37b10,
1659                 0x37b28, 0x37b28,
1660                 0x37b3c, 0x37b50,
1661                 0x37bf0, 0x37c10,
1662                 0x37c28, 0x37c28,
1663                 0x37c3c, 0x37c50,
1664                 0x37cf0, 0x37cfc,
1665                 0x38000, 0x38030,
1666                 0x38038, 0x38038,
1667                 0x38040, 0x38040,
1668                 0x38100, 0x38144,
1669                 0x38190, 0x381a0,
1670                 0x381a8, 0x381b8,
1671                 0x381c4, 0x381c8,
1672                 0x381d0, 0x381d0,
1673                 0x38200, 0x38318,
1674                 0x38400, 0x384b4,
1675                 0x384c0, 0x3852c,
1676                 0x38540, 0x3861c,
1677                 0x38800, 0x38828,
1678                 0x38834, 0x38834,
1679                 0x388c0, 0x38908,
1680                 0x38910, 0x389ac,
1681                 0x38a00, 0x38a14,
1682                 0x38a1c, 0x38a2c,
1683                 0x38a44, 0x38a50,
1684                 0x38a74, 0x38a74,
1685                 0x38a7c, 0x38afc,
1686                 0x38b08, 0x38c24,
1687                 0x38d00, 0x38d00,
1688                 0x38d08, 0x38d14,
1689                 0x38d1c, 0x38d20,
1690                 0x38d3c, 0x38d3c,
1691                 0x38d48, 0x38d50,
1692                 0x39200, 0x3920c,
1693                 0x39220, 0x39220,
1694                 0x39240, 0x39240,
1695                 0x39600, 0x3960c,
1696                 0x39a00, 0x39a1c,
1697                 0x39e00, 0x39e20,
1698                 0x39e38, 0x39e3c,
1699                 0x39e80, 0x39e80,
1700                 0x39e88, 0x39ea8,
1701                 0x39eb0, 0x39eb4,
1702                 0x39ec8, 0x39ed4,
1703                 0x39fb8, 0x3a004,
1704                 0x3a200, 0x3a200,
1705                 0x3a208, 0x3a240,
1706                 0x3a248, 0x3a280,
1707                 0x3a288, 0x3a2c0,
1708                 0x3a2c8, 0x3a2fc,
1709                 0x3a600, 0x3a630,
1710                 0x3aa00, 0x3aabc,
1711                 0x3ab00, 0x3ab10,
1712                 0x3ab20, 0x3ab30,
1713                 0x3ab40, 0x3ab50,
1714                 0x3ab60, 0x3ab70,
1715                 0x3b000, 0x3b028,
1716                 0x3b030, 0x3b048,
1717                 0x3b060, 0x3b068,
1718                 0x3b070, 0x3b09c,
1719                 0x3b0f0, 0x3b128,
1720                 0x3b130, 0x3b148,
1721                 0x3b160, 0x3b168,
1722                 0x3b170, 0x3b19c,
1723                 0x3b1f0, 0x3b238,
1724                 0x3b240, 0x3b240,
1725                 0x3b248, 0x3b250,
1726                 0x3b25c, 0x3b264,
1727                 0x3b270, 0x3b2b8,
1728                 0x3b2c0, 0x3b2e4,
1729                 0x3b2f8, 0x3b338,
1730                 0x3b340, 0x3b340,
1731                 0x3b348, 0x3b350,
1732                 0x3b35c, 0x3b364,
1733                 0x3b370, 0x3b3b8,
1734                 0x3b3c0, 0x3b3e4,
1735                 0x3b3f8, 0x3b428,
1736                 0x3b430, 0x3b448,
1737                 0x3b460, 0x3b468,
1738                 0x3b470, 0x3b49c,
1739                 0x3b4f0, 0x3b528,
1740                 0x3b530, 0x3b548,
1741                 0x3b560, 0x3b568,
1742                 0x3b570, 0x3b59c,
1743                 0x3b5f0, 0x3b638,
1744                 0x3b640, 0x3b640,
1745                 0x3b648, 0x3b650,
1746                 0x3b65c, 0x3b664,
1747                 0x3b670, 0x3b6b8,
1748                 0x3b6c0, 0x3b6e4,
1749                 0x3b6f8, 0x3b738,
1750                 0x3b740, 0x3b740,
1751                 0x3b748, 0x3b750,
1752                 0x3b75c, 0x3b764,
1753                 0x3b770, 0x3b7b8,
1754                 0x3b7c0, 0x3b7e4,
1755                 0x3b7f8, 0x3b7fc,
1756                 0x3b814, 0x3b814,
1757                 0x3b82c, 0x3b82c,
1758                 0x3b880, 0x3b88c,
1759                 0x3b8e8, 0x3b8ec,
1760                 0x3b900, 0x3b928,
1761                 0x3b930, 0x3b948,
1762                 0x3b960, 0x3b968,
1763                 0x3b970, 0x3b99c,
1764                 0x3b9f0, 0x3ba38,
1765                 0x3ba40, 0x3ba40,
1766                 0x3ba48, 0x3ba50,
1767                 0x3ba5c, 0x3ba64,
1768                 0x3ba70, 0x3bab8,
1769                 0x3bac0, 0x3bae4,
1770                 0x3baf8, 0x3bb10,
1771                 0x3bb28, 0x3bb28,
1772                 0x3bb3c, 0x3bb50,
1773                 0x3bbf0, 0x3bc10,
1774                 0x3bc28, 0x3bc28,
1775                 0x3bc3c, 0x3bc50,
1776                 0x3bcf0, 0x3bcfc,
1777                 0x3c000, 0x3c030,
1778                 0x3c038, 0x3c038,
1779                 0x3c040, 0x3c040,
1780                 0x3c100, 0x3c144,
1781                 0x3c190, 0x3c1a0,
1782                 0x3c1a8, 0x3c1b8,
1783                 0x3c1c4, 0x3c1c8,
1784                 0x3c1d0, 0x3c1d0,
1785                 0x3c200, 0x3c318,
1786                 0x3c400, 0x3c4b4,
1787                 0x3c4c0, 0x3c52c,
1788                 0x3c540, 0x3c61c,
1789                 0x3c800, 0x3c828,
1790                 0x3c834, 0x3c834,
1791                 0x3c8c0, 0x3c908,
1792                 0x3c910, 0x3c9ac,
1793                 0x3ca00, 0x3ca14,
1794                 0x3ca1c, 0x3ca2c,
1795                 0x3ca44, 0x3ca50,
1796                 0x3ca74, 0x3ca74,
1797                 0x3ca7c, 0x3cafc,
1798                 0x3cb08, 0x3cc24,
1799                 0x3cd00, 0x3cd00,
1800                 0x3cd08, 0x3cd14,
1801                 0x3cd1c, 0x3cd20,
1802                 0x3cd3c, 0x3cd3c,
1803                 0x3cd48, 0x3cd50,
1804                 0x3d200, 0x3d20c,
1805                 0x3d220, 0x3d220,
1806                 0x3d240, 0x3d240,
1807                 0x3d600, 0x3d60c,
1808                 0x3da00, 0x3da1c,
1809                 0x3de00, 0x3de20,
1810                 0x3de38, 0x3de3c,
1811                 0x3de80, 0x3de80,
1812                 0x3de88, 0x3dea8,
1813                 0x3deb0, 0x3deb4,
1814                 0x3dec8, 0x3ded4,
1815                 0x3dfb8, 0x3e004,
1816                 0x3e200, 0x3e200,
1817                 0x3e208, 0x3e240,
1818                 0x3e248, 0x3e280,
1819                 0x3e288, 0x3e2c0,
1820                 0x3e2c8, 0x3e2fc,
1821                 0x3e600, 0x3e630,
1822                 0x3ea00, 0x3eabc,
1823                 0x3eb00, 0x3eb10,
1824                 0x3eb20, 0x3eb30,
1825                 0x3eb40, 0x3eb50,
1826                 0x3eb60, 0x3eb70,
1827                 0x3f000, 0x3f028,
1828                 0x3f030, 0x3f048,
1829                 0x3f060, 0x3f068,
1830                 0x3f070, 0x3f09c,
1831                 0x3f0f0, 0x3f128,
1832                 0x3f130, 0x3f148,
1833                 0x3f160, 0x3f168,
1834                 0x3f170, 0x3f19c,
1835                 0x3f1f0, 0x3f238,
1836                 0x3f240, 0x3f240,
1837                 0x3f248, 0x3f250,
1838                 0x3f25c, 0x3f264,
1839                 0x3f270, 0x3f2b8,
1840                 0x3f2c0, 0x3f2e4,
1841                 0x3f2f8, 0x3f338,
1842                 0x3f340, 0x3f340,
1843                 0x3f348, 0x3f350,
1844                 0x3f35c, 0x3f364,
1845                 0x3f370, 0x3f3b8,
1846                 0x3f3c0, 0x3f3e4,
1847                 0x3f3f8, 0x3f428,
1848                 0x3f430, 0x3f448,
1849                 0x3f460, 0x3f468,
1850                 0x3f470, 0x3f49c,
1851                 0x3f4f0, 0x3f528,
1852                 0x3f530, 0x3f548,
1853                 0x3f560, 0x3f568,
1854                 0x3f570, 0x3f59c,
1855                 0x3f5f0, 0x3f638,
1856                 0x3f640, 0x3f640,
1857                 0x3f648, 0x3f650,
1858                 0x3f65c, 0x3f664,
1859                 0x3f670, 0x3f6b8,
1860                 0x3f6c0, 0x3f6e4,
1861                 0x3f6f8, 0x3f738,
1862                 0x3f740, 0x3f740,
1863                 0x3f748, 0x3f750,
1864                 0x3f75c, 0x3f764,
1865                 0x3f770, 0x3f7b8,
1866                 0x3f7c0, 0x3f7e4,
1867                 0x3f7f8, 0x3f7fc,
1868                 0x3f814, 0x3f814,
1869                 0x3f82c, 0x3f82c,
1870                 0x3f880, 0x3f88c,
1871                 0x3f8e8, 0x3f8ec,
1872                 0x3f900, 0x3f928,
1873                 0x3f930, 0x3f948,
1874                 0x3f960, 0x3f968,
1875                 0x3f970, 0x3f99c,
1876                 0x3f9f0, 0x3fa38,
1877                 0x3fa40, 0x3fa40,
1878                 0x3fa48, 0x3fa50,
1879                 0x3fa5c, 0x3fa64,
1880                 0x3fa70, 0x3fab8,
1881                 0x3fac0, 0x3fae4,
1882                 0x3faf8, 0x3fb10,
1883                 0x3fb28, 0x3fb28,
1884                 0x3fb3c, 0x3fb50,
1885                 0x3fbf0, 0x3fc10,
1886                 0x3fc28, 0x3fc28,
1887                 0x3fc3c, 0x3fc50,
1888                 0x3fcf0, 0x3fcfc,
1889                 0x40000, 0x4000c,
1890                 0x40040, 0x40050,
1891                 0x40060, 0x40068,
1892                 0x4007c, 0x4008c,
1893                 0x40094, 0x400b0,
1894                 0x400c0, 0x40144,
1895                 0x40180, 0x4018c,
1896                 0x40200, 0x40254,
1897                 0x40260, 0x40264,
1898                 0x40270, 0x40288,
1899                 0x40290, 0x40298,
1900                 0x402ac, 0x402c8,
1901                 0x402d0, 0x402e0,
1902                 0x402f0, 0x402f0,
1903                 0x40300, 0x4033c,
1904                 0x403f8, 0x403fc,
1905                 0x41304, 0x413c4,
1906                 0x41400, 0x4140c,
1907                 0x41414, 0x4141c,
1908                 0x41480, 0x414d0,
1909                 0x44000, 0x44054,
1910                 0x4405c, 0x44078,
1911                 0x440c0, 0x44174,
1912                 0x44180, 0x441ac,
1913                 0x441b4, 0x441b8,
1914                 0x441c0, 0x44254,
1915                 0x4425c, 0x44278,
1916                 0x442c0, 0x44374,
1917                 0x44380, 0x443ac,
1918                 0x443b4, 0x443b8,
1919                 0x443c0, 0x44454,
1920                 0x4445c, 0x44478,
1921                 0x444c0, 0x44574,
1922                 0x44580, 0x445ac,
1923                 0x445b4, 0x445b8,
1924                 0x445c0, 0x44654,
1925                 0x4465c, 0x44678,
1926                 0x446c0, 0x44774,
1927                 0x44780, 0x447ac,
1928                 0x447b4, 0x447b8,
1929                 0x447c0, 0x44854,
1930                 0x4485c, 0x44878,
1931                 0x448c0, 0x44974,
1932                 0x44980, 0x449ac,
1933                 0x449b4, 0x449b8,
1934                 0x449c0, 0x449fc,
1935                 0x45000, 0x45004,
1936                 0x45010, 0x45030,
1937                 0x45040, 0x45060,
1938                 0x45068, 0x45068,
1939                 0x45080, 0x45084,
1940                 0x450a0, 0x450b0,
1941                 0x45200, 0x45204,
1942                 0x45210, 0x45230,
1943                 0x45240, 0x45260,
1944                 0x45268, 0x45268,
1945                 0x45280, 0x45284,
1946                 0x452a0, 0x452b0,
1947                 0x460c0, 0x460e4,
1948                 0x47000, 0x4703c,
1949                 0x47044, 0x4708c,
1950                 0x47200, 0x47250,
1951                 0x47400, 0x47408,
1952                 0x47414, 0x47420,
1953                 0x47600, 0x47618,
1954                 0x47800, 0x47814,
1955                 0x48000, 0x4800c,
1956                 0x48040, 0x48050,
1957                 0x48060, 0x48068,
1958                 0x4807c, 0x4808c,
1959                 0x48094, 0x480b0,
1960                 0x480c0, 0x48144,
1961                 0x48180, 0x4818c,
1962                 0x48200, 0x48254,
1963                 0x48260, 0x48264,
1964                 0x48270, 0x48288,
1965                 0x48290, 0x48298,
1966                 0x482ac, 0x482c8,
1967                 0x482d0, 0x482e0,
1968                 0x482f0, 0x482f0,
1969                 0x48300, 0x4833c,
1970                 0x483f8, 0x483fc,
1971                 0x49304, 0x493c4,
1972                 0x49400, 0x4940c,
1973                 0x49414, 0x4941c,
1974                 0x49480, 0x494d0,
1975                 0x4c000, 0x4c054,
1976                 0x4c05c, 0x4c078,
1977                 0x4c0c0, 0x4c174,
1978                 0x4c180, 0x4c1ac,
1979                 0x4c1b4, 0x4c1b8,
1980                 0x4c1c0, 0x4c254,
1981                 0x4c25c, 0x4c278,
1982                 0x4c2c0, 0x4c374,
1983                 0x4c380, 0x4c3ac,
1984                 0x4c3b4, 0x4c3b8,
1985                 0x4c3c0, 0x4c454,
1986                 0x4c45c, 0x4c478,
1987                 0x4c4c0, 0x4c574,
1988                 0x4c580, 0x4c5ac,
1989                 0x4c5b4, 0x4c5b8,
1990                 0x4c5c0, 0x4c654,
1991                 0x4c65c, 0x4c678,
1992                 0x4c6c0, 0x4c774,
1993                 0x4c780, 0x4c7ac,
1994                 0x4c7b4, 0x4c7b8,
1995                 0x4c7c0, 0x4c854,
1996                 0x4c85c, 0x4c878,
1997                 0x4c8c0, 0x4c974,
1998                 0x4c980, 0x4c9ac,
1999                 0x4c9b4, 0x4c9b8,
2000                 0x4c9c0, 0x4c9fc,
2001                 0x4d000, 0x4d004,
2002                 0x4d010, 0x4d030,
2003                 0x4d040, 0x4d060,
2004                 0x4d068, 0x4d068,
2005                 0x4d080, 0x4d084,
2006                 0x4d0a0, 0x4d0b0,
2007                 0x4d200, 0x4d204,
2008                 0x4d210, 0x4d230,
2009                 0x4d240, 0x4d260,
2010                 0x4d268, 0x4d268,
2011                 0x4d280, 0x4d284,
2012                 0x4d2a0, 0x4d2b0,
2013                 0x4e0c0, 0x4e0e4,
2014                 0x4f000, 0x4f03c,
2015                 0x4f044, 0x4f08c,
2016                 0x4f200, 0x4f250,
2017                 0x4f400, 0x4f408,
2018                 0x4f414, 0x4f420,
2019                 0x4f600, 0x4f618,
2020                 0x4f800, 0x4f814,
2021                 0x50000, 0x50084,
2022                 0x50090, 0x500cc,
2023                 0x50400, 0x50400,
2024                 0x50800, 0x50884,
2025                 0x50890, 0x508cc,
2026                 0x50c00, 0x50c00,
2027                 0x51000, 0x5101c,
2028                 0x51300, 0x51308,
2029         };
2030
2031         static const unsigned int t6_reg_ranges[] = {
2032                 0x1008, 0x101c,
2033                 0x1024, 0x10a8,
2034                 0x10b4, 0x10f8,
2035                 0x1100, 0x1114,
2036                 0x111c, 0x112c,
2037                 0x1138, 0x113c,
2038                 0x1144, 0x114c,
2039                 0x1180, 0x1184,
2040                 0x1190, 0x1194,
2041                 0x11a0, 0x11a4,
2042                 0x11b0, 0x11b4,
2043                 0x11fc, 0x1258,
2044                 0x1280, 0x12d4,
2045                 0x12d9, 0x12d9,
2046                 0x12de, 0x12de,
2047                 0x12e3, 0x12e3,
2048                 0x12e8, 0x133c,
2049                 0x1800, 0x18fc,
2050                 0x3000, 0x302c,
2051                 0x3060, 0x30b0,
2052                 0x30b8, 0x30d8,
2053                 0x30e0, 0x30fc,
2054                 0x3140, 0x357c,
2055                 0x35a8, 0x35cc,
2056                 0x35ec, 0x35ec,
2057                 0x3600, 0x5624,
2058                 0x56cc, 0x56ec,
2059                 0x56f4, 0x5720,
2060                 0x5728, 0x575c,
2061                 0x580c, 0x5814,
2062                 0x5890, 0x589c,
2063                 0x58a4, 0x58ac,
2064                 0x58b8, 0x58bc,
2065                 0x5940, 0x595c,
2066                 0x5980, 0x598c,
2067                 0x59b0, 0x59c8,
2068                 0x59d0, 0x59dc,
2069                 0x59fc, 0x5a18,
2070                 0x5a60, 0x5a6c,
2071                 0x5a80, 0x5a8c,
2072                 0x5a94, 0x5a9c,
2073                 0x5b94, 0x5bfc,
2074                 0x5c10, 0x5e48,
2075                 0x5e50, 0x5e94,
2076                 0x5ea0, 0x5eb0,
2077                 0x5ec0, 0x5ec0,
2078                 0x5ec8, 0x5ed0,
2079                 0x6000, 0x6020,
2080                 0x6028, 0x6040,
2081                 0x6058, 0x609c,
2082                 0x60a8, 0x619c,
2083                 0x7700, 0x7798,
2084                 0x77c0, 0x7880,
2085                 0x78cc, 0x78fc,
2086                 0x7b00, 0x7b58,
2087                 0x7b60, 0x7b84,
2088                 0x7b8c, 0x7c54,
2089                 0x7d00, 0x7d38,
2090                 0x7d40, 0x7d84,
2091                 0x7d8c, 0x7ddc,
2092                 0x7de4, 0x7e04,
2093                 0x7e10, 0x7e1c,
2094                 0x7e24, 0x7e38,
2095                 0x7e40, 0x7e44,
2096                 0x7e4c, 0x7e78,
2097                 0x7e80, 0x7edc,
2098                 0x7ee8, 0x7efc,
2099                 0x8dc0, 0x8de4,
2100                 0x8df8, 0x8e04,
2101                 0x8e10, 0x8e84,
2102                 0x8ea0, 0x8f88,
2103                 0x8fb8, 0x9058,
2104                 0x9060, 0x9060,
2105                 0x9068, 0x90f8,
2106                 0x9100, 0x9124,
2107                 0x9400, 0x9470,
2108                 0x9600, 0x9600,
2109                 0x9608, 0x9638,
2110                 0x9640, 0x9704,
2111                 0x9710, 0x971c,
2112                 0x9800, 0x9808,
2113                 0x9820, 0x983c,
2114                 0x9850, 0x9864,
2115                 0x9c00, 0x9c6c,
2116                 0x9c80, 0x9cec,
2117                 0x9d00, 0x9d6c,
2118                 0x9d80, 0x9dec,
2119                 0x9e00, 0x9e6c,
2120                 0x9e80, 0x9eec,
2121                 0x9f00, 0x9f6c,
2122                 0x9f80, 0xa020,
2123                 0xd004, 0xd03c,
2124                 0xd100, 0xd118,
2125                 0xd200, 0xd214,
2126                 0xd220, 0xd234,
2127                 0xd240, 0xd254,
2128                 0xd260, 0xd274,
2129                 0xd280, 0xd294,
2130                 0xd2a0, 0xd2b4,
2131                 0xd2c0, 0xd2d4,
2132                 0xd2e0, 0xd2f4,
2133                 0xd300, 0xd31c,
2134                 0xdfc0, 0xdfe0,
2135                 0xe000, 0xf008,
2136                 0x11000, 0x11014,
2137                 0x11048, 0x1106c,
2138                 0x11074, 0x11088,
2139                 0x11098, 0x11120,
2140                 0x1112c, 0x1117c,
2141                 0x11190, 0x112e0,
2142                 0x11300, 0x1130c,
2143                 0x12000, 0x1206c,
2144                 0x19040, 0x1906c,
2145                 0x19078, 0x19080,
2146                 0x1908c, 0x190e8,
2147                 0x190f0, 0x190f8,
2148                 0x19100, 0x19110,
2149                 0x19120, 0x19124,
2150                 0x19150, 0x19194,
2151                 0x1919c, 0x191b0,
2152                 0x191d0, 0x191e8,
2153                 0x19238, 0x19290,
2154                 0x192a4, 0x192b0,
2155                 0x192bc, 0x192bc,
2156                 0x19348, 0x1934c,
2157                 0x193f8, 0x19418,
2158                 0x19420, 0x19428,
2159                 0x19430, 0x19444,
2160                 0x1944c, 0x1946c,
2161                 0x19474, 0x19474,
2162                 0x19490, 0x194cc,
2163                 0x194f0, 0x194f8,
2164                 0x19c00, 0x19c48,
2165                 0x19c50, 0x19c80,
2166                 0x19c94, 0x19c98,
2167                 0x19ca0, 0x19cbc,
2168                 0x19ce4, 0x19ce4,
2169                 0x19cf0, 0x19cf8,
2170                 0x19d00, 0x19d28,
2171                 0x19d50, 0x19d78,
2172                 0x19d94, 0x19d98,
2173                 0x19da0, 0x19dc8,
2174                 0x19df0, 0x19e10,
2175                 0x19e50, 0x19e6c,
2176                 0x19ea0, 0x19ebc,
2177                 0x19ec4, 0x19ef4,
2178                 0x19f04, 0x19f2c,
2179                 0x19f34, 0x19f34,
2180                 0x19f40, 0x19f50,
2181                 0x19f90, 0x19fac,
2182                 0x19fc4, 0x19fc8,
2183                 0x19fd0, 0x19fe4,
2184                 0x1a000, 0x1a004,
2185                 0x1a010, 0x1a06c,
2186                 0x1a0b0, 0x1a0e4,
2187                 0x1a0ec, 0x1a0f8,
2188                 0x1a100, 0x1a108,
2189                 0x1a114, 0x1a120,
2190                 0x1a128, 0x1a130,
2191                 0x1a138, 0x1a138,
2192                 0x1a190, 0x1a1c4,
2193                 0x1a1fc, 0x1a1fc,
2194                 0x1e008, 0x1e00c,
2195                 0x1e040, 0x1e044,
2196                 0x1e04c, 0x1e04c,
2197                 0x1e284, 0x1e290,
2198                 0x1e2c0, 0x1e2c0,
2199                 0x1e2e0, 0x1e2e0,
2200                 0x1e300, 0x1e384,
2201                 0x1e3c0, 0x1e3c8,
2202                 0x1e408, 0x1e40c,
2203                 0x1e440, 0x1e444,
2204                 0x1e44c, 0x1e44c,
2205                 0x1e684, 0x1e690,
2206                 0x1e6c0, 0x1e6c0,
2207                 0x1e6e0, 0x1e6e0,
2208                 0x1e700, 0x1e784,
2209                 0x1e7c0, 0x1e7c8,
2210                 0x1e808, 0x1e80c,
2211                 0x1e840, 0x1e844,
2212                 0x1e84c, 0x1e84c,
2213                 0x1ea84, 0x1ea90,
2214                 0x1eac0, 0x1eac0,
2215                 0x1eae0, 0x1eae0,
2216                 0x1eb00, 0x1eb84,
2217                 0x1ebc0, 0x1ebc8,
2218                 0x1ec08, 0x1ec0c,
2219                 0x1ec40, 0x1ec44,
2220                 0x1ec4c, 0x1ec4c,
2221                 0x1ee84, 0x1ee90,
2222                 0x1eec0, 0x1eec0,
2223                 0x1eee0, 0x1eee0,
2224                 0x1ef00, 0x1ef84,
2225                 0x1efc0, 0x1efc8,
2226                 0x1f008, 0x1f00c,
2227                 0x1f040, 0x1f044,
2228                 0x1f04c, 0x1f04c,
2229                 0x1f284, 0x1f290,
2230                 0x1f2c0, 0x1f2c0,
2231                 0x1f2e0, 0x1f2e0,
2232                 0x1f300, 0x1f384,
2233                 0x1f3c0, 0x1f3c8,
2234                 0x1f408, 0x1f40c,
2235                 0x1f440, 0x1f444,
2236                 0x1f44c, 0x1f44c,
2237                 0x1f684, 0x1f690,
2238                 0x1f6c0, 0x1f6c0,
2239                 0x1f6e0, 0x1f6e0,
2240                 0x1f700, 0x1f784,
2241                 0x1f7c0, 0x1f7c8,
2242                 0x1f808, 0x1f80c,
2243                 0x1f840, 0x1f844,
2244                 0x1f84c, 0x1f84c,
2245                 0x1fa84, 0x1fa90,
2246                 0x1fac0, 0x1fac0,
2247                 0x1fae0, 0x1fae0,
2248                 0x1fb00, 0x1fb84,
2249                 0x1fbc0, 0x1fbc8,
2250                 0x1fc08, 0x1fc0c,
2251                 0x1fc40, 0x1fc44,
2252                 0x1fc4c, 0x1fc4c,
2253                 0x1fe84, 0x1fe90,
2254                 0x1fec0, 0x1fec0,
2255                 0x1fee0, 0x1fee0,
2256                 0x1ff00, 0x1ff84,
2257                 0x1ffc0, 0x1ffc8,
2258                 0x30000, 0x30030,
2259                 0x30038, 0x30038,
2260                 0x30040, 0x30040,
2261                 0x30048, 0x30048,
2262                 0x30050, 0x30050,
2263                 0x3005c, 0x30060,
2264                 0x30068, 0x30068,
2265                 0x30070, 0x30070,
2266                 0x30100, 0x30168,
2267                 0x30190, 0x301a0,
2268                 0x301a8, 0x301b8,
2269                 0x301c4, 0x301c8,
2270                 0x301d0, 0x301d0,
2271                 0x30200, 0x30320,
2272                 0x30400, 0x304b4,
2273                 0x304c0, 0x3052c,
2274                 0x30540, 0x3061c,
2275                 0x30800, 0x308a0,
2276                 0x308c0, 0x30908,
2277                 0x30910, 0x309b8,
2278                 0x30a00, 0x30a04,
2279                 0x30a0c, 0x30a14,
2280                 0x30a1c, 0x30a2c,
2281                 0x30a44, 0x30a50,
2282                 0x30a74, 0x30a74,
2283                 0x30a7c, 0x30afc,
2284                 0x30b08, 0x30c24,
2285                 0x30d00, 0x30d14,
2286                 0x30d1c, 0x30d3c,
2287                 0x30d44, 0x30d4c,
2288                 0x30d54, 0x30d74,
2289                 0x30d7c, 0x30d7c,
2290                 0x30de0, 0x30de0,
2291                 0x30e00, 0x30ed4,
2292                 0x30f00, 0x30fa4,
2293                 0x30fc0, 0x30fc4,
2294                 0x31000, 0x31004,
2295                 0x31080, 0x310fc,
2296                 0x31208, 0x31220,
2297                 0x3123c, 0x31254,
2298                 0x31300, 0x31300,
2299                 0x31308, 0x3131c,
2300                 0x31338, 0x3133c,
2301                 0x31380, 0x31380,
2302                 0x31388, 0x313a8,
2303                 0x313b4, 0x313b4,
2304                 0x31400, 0x31420,
2305                 0x31438, 0x3143c,
2306                 0x31480, 0x31480,
2307                 0x314a8, 0x314a8,
2308                 0x314b0, 0x314b4,
2309                 0x314c8, 0x314d4,
2310                 0x31a40, 0x31a4c,
2311                 0x31af0, 0x31b20,
2312                 0x31b38, 0x31b3c,
2313                 0x31b80, 0x31b80,
2314                 0x31ba8, 0x31ba8,
2315                 0x31bb0, 0x31bb4,
2316                 0x31bc8, 0x31bd4,
2317                 0x32140, 0x3218c,
2318                 0x321f0, 0x321f4,
2319                 0x32200, 0x32200,
2320                 0x32218, 0x32218,
2321                 0x32400, 0x32400,
2322                 0x32408, 0x3241c,
2323                 0x32618, 0x32620,
2324                 0x32664, 0x32664,
2325                 0x326a8, 0x326a8,
2326                 0x326ec, 0x326ec,
2327                 0x32a00, 0x32abc,
2328                 0x32b00, 0x32b38,
2329                 0x32b40, 0x32b58,
2330                 0x32b60, 0x32b78,
2331                 0x32c00, 0x32c00,
2332                 0x32c08, 0x32c3c,
2333                 0x32e00, 0x32e2c,
2334                 0x32f00, 0x32f2c,
2335                 0x33000, 0x3302c,
2336                 0x33034, 0x33050,
2337                 0x33058, 0x33058,
2338                 0x33060, 0x3308c,
2339                 0x3309c, 0x330ac,
2340                 0x330c0, 0x330c0,
2341                 0x330c8, 0x330d0,
2342                 0x330d8, 0x330e0,
2343                 0x330ec, 0x3312c,
2344                 0x33134, 0x33150,
2345                 0x33158, 0x33158,
2346                 0x33160, 0x3318c,
2347                 0x3319c, 0x331ac,
2348                 0x331c0, 0x331c0,
2349                 0x331c8, 0x331d0,
2350                 0x331d8, 0x331e0,
2351                 0x331ec, 0x33290,
2352                 0x33298, 0x332c4,
2353                 0x332e4, 0x33390,
2354                 0x33398, 0x333c4,
2355                 0x333e4, 0x3342c,
2356                 0x33434, 0x33450,
2357                 0x33458, 0x33458,
2358                 0x33460, 0x3348c,
2359                 0x3349c, 0x334ac,
2360                 0x334c0, 0x334c0,
2361                 0x334c8, 0x334d0,
2362                 0x334d8, 0x334e0,
2363                 0x334ec, 0x3352c,
2364                 0x33534, 0x33550,
2365                 0x33558, 0x33558,
2366                 0x33560, 0x3358c,
2367                 0x3359c, 0x335ac,
2368                 0x335c0, 0x335c0,
2369                 0x335c8, 0x335d0,
2370                 0x335d8, 0x335e0,
2371                 0x335ec, 0x33690,
2372                 0x33698, 0x336c4,
2373                 0x336e4, 0x33790,
2374                 0x33798, 0x337c4,
2375                 0x337e4, 0x337fc,
2376                 0x33814, 0x33814,
2377                 0x33854, 0x33868,
2378                 0x33880, 0x3388c,
2379                 0x338c0, 0x338d0,
2380                 0x338e8, 0x338ec,
2381                 0x33900, 0x3392c,
2382                 0x33934, 0x33950,
2383                 0x33958, 0x33958,
2384                 0x33960, 0x3398c,
2385                 0x3399c, 0x339ac,
2386                 0x339c0, 0x339c0,
2387                 0x339c8, 0x339d0,
2388                 0x339d8, 0x339e0,
2389                 0x339ec, 0x33a90,
2390                 0x33a98, 0x33ac4,
2391                 0x33ae4, 0x33b10,
2392                 0x33b24, 0x33b28,
2393                 0x33b38, 0x33b50,
2394                 0x33bf0, 0x33c10,
2395                 0x33c24, 0x33c28,
2396                 0x33c38, 0x33c50,
2397                 0x33cf0, 0x33cfc,
2398                 0x34000, 0x34030,
2399                 0x34038, 0x34038,
2400                 0x34040, 0x34040,
2401                 0x34048, 0x34048,
2402                 0x34050, 0x34050,
2403                 0x3405c, 0x34060,
2404                 0x34068, 0x34068,
2405                 0x34070, 0x34070,
2406                 0x34100, 0x34168,
2407                 0x34190, 0x341a0,
2408                 0x341a8, 0x341b8,
2409                 0x341c4, 0x341c8,
2410                 0x341d0, 0x341d0,
2411                 0x34200, 0x34320,
2412                 0x34400, 0x344b4,
2413                 0x344c0, 0x3452c,
2414                 0x34540, 0x3461c,
2415                 0x34800, 0x348a0,
2416                 0x348c0, 0x34908,
2417                 0x34910, 0x349b8,
2418                 0x34a00, 0x34a04,
2419                 0x34a0c, 0x34a14,
2420                 0x34a1c, 0x34a2c,
2421                 0x34a44, 0x34a50,
2422                 0x34a74, 0x34a74,
2423                 0x34a7c, 0x34afc,
2424                 0x34b08, 0x34c24,
2425                 0x34d00, 0x34d14,
2426                 0x34d1c, 0x34d3c,
2427                 0x34d44, 0x34d4c,
2428                 0x34d54, 0x34d74,
2429                 0x34d7c, 0x34d7c,
2430                 0x34de0, 0x34de0,
2431                 0x34e00, 0x34ed4,
2432                 0x34f00, 0x34fa4,
2433                 0x34fc0, 0x34fc4,
2434                 0x35000, 0x35004,
2435                 0x35080, 0x350fc,
2436                 0x35208, 0x35220,
2437                 0x3523c, 0x35254,
2438                 0x35300, 0x35300,
2439                 0x35308, 0x3531c,
2440                 0x35338, 0x3533c,
2441                 0x35380, 0x35380,
2442                 0x35388, 0x353a8,
2443                 0x353b4, 0x353b4,
2444                 0x35400, 0x35420,
2445                 0x35438, 0x3543c,
2446                 0x35480, 0x35480,
2447                 0x354a8, 0x354a8,
2448                 0x354b0, 0x354b4,
2449                 0x354c8, 0x354d4,
2450                 0x35a40, 0x35a4c,
2451                 0x35af0, 0x35b20,
2452                 0x35b38, 0x35b3c,
2453                 0x35b80, 0x35b80,
2454                 0x35ba8, 0x35ba8,
2455                 0x35bb0, 0x35bb4,
2456                 0x35bc8, 0x35bd4,
2457                 0x36140, 0x3618c,
2458                 0x361f0, 0x361f4,
2459                 0x36200, 0x36200,
2460                 0x36218, 0x36218,
2461                 0x36400, 0x36400,
2462                 0x36408, 0x3641c,
2463                 0x36618, 0x36620,
2464                 0x36664, 0x36664,
2465                 0x366a8, 0x366a8,
2466                 0x366ec, 0x366ec,
2467                 0x36a00, 0x36abc,
2468                 0x36b00, 0x36b38,
2469                 0x36b40, 0x36b58,
2470                 0x36b60, 0x36b78,
2471                 0x36c00, 0x36c00,
2472                 0x36c08, 0x36c3c,
2473                 0x36e00, 0x36e2c,
2474                 0x36f00, 0x36f2c,
2475                 0x37000, 0x3702c,
2476                 0x37034, 0x37050,
2477                 0x37058, 0x37058,
2478                 0x37060, 0x3708c,
2479                 0x3709c, 0x370ac,
2480                 0x370c0, 0x370c0,
2481                 0x370c8, 0x370d0,
2482                 0x370d8, 0x370e0,
2483                 0x370ec, 0x3712c,
2484                 0x37134, 0x37150,
2485                 0x37158, 0x37158,
2486                 0x37160, 0x3718c,
2487                 0x3719c, 0x371ac,
2488                 0x371c0, 0x371c0,
2489                 0x371c8, 0x371d0,
2490                 0x371d8, 0x371e0,
2491                 0x371ec, 0x37290,
2492                 0x37298, 0x372c4,
2493                 0x372e4, 0x37390,
2494                 0x37398, 0x373c4,
2495                 0x373e4, 0x3742c,
2496                 0x37434, 0x37450,
2497                 0x37458, 0x37458,
2498                 0x37460, 0x3748c,
2499                 0x3749c, 0x374ac,
2500                 0x374c0, 0x374c0,
2501                 0x374c8, 0x374d0,
2502                 0x374d8, 0x374e0,
2503                 0x374ec, 0x3752c,
2504                 0x37534, 0x37550,
2505                 0x37558, 0x37558,
2506                 0x37560, 0x3758c,
2507                 0x3759c, 0x375ac,
2508                 0x375c0, 0x375c0,
2509                 0x375c8, 0x375d0,
2510                 0x375d8, 0x375e0,
2511                 0x375ec, 0x37690,
2512                 0x37698, 0x376c4,
2513                 0x376e4, 0x37790,
2514                 0x37798, 0x377c4,
2515                 0x377e4, 0x377fc,
2516                 0x37814, 0x37814,
2517                 0x37854, 0x37868,
2518                 0x37880, 0x3788c,
2519                 0x378c0, 0x378d0,
2520                 0x378e8, 0x378ec,
2521                 0x37900, 0x3792c,
2522                 0x37934, 0x37950,
2523                 0x37958, 0x37958,
2524                 0x37960, 0x3798c,
2525                 0x3799c, 0x379ac,
2526                 0x379c0, 0x379c0,
2527                 0x379c8, 0x379d0,
2528                 0x379d8, 0x379e0,
2529                 0x379ec, 0x37a90,
2530                 0x37a98, 0x37ac4,
2531                 0x37ae4, 0x37b10,
2532                 0x37b24, 0x37b28,
2533                 0x37b38, 0x37b50,
2534                 0x37bf0, 0x37c10,
2535                 0x37c24, 0x37c28,
2536                 0x37c38, 0x37c50,
2537                 0x37cf0, 0x37cfc,
2538                 0x40040, 0x40040,
2539                 0x40080, 0x40084,
2540                 0x40100, 0x40100,
2541                 0x40140, 0x401bc,
2542                 0x40200, 0x40214,
2543                 0x40228, 0x40228,
2544                 0x40240, 0x40258,
2545                 0x40280, 0x40280,
2546                 0x40304, 0x40304,
2547                 0x40330, 0x4033c,
2548                 0x41304, 0x413b8,
2549                 0x413c0, 0x413c8,
2550                 0x413d0, 0x413dc,
2551                 0x413f0, 0x413f0,
2552                 0x41400, 0x4140c,
2553                 0x41414, 0x4141c,
2554                 0x41480, 0x414d0,
2555                 0x44000, 0x4407c,
2556                 0x440c0, 0x441ac,
2557                 0x441b4, 0x4427c,
2558                 0x442c0, 0x443ac,
2559                 0x443b4, 0x4447c,
2560                 0x444c0, 0x445ac,
2561                 0x445b4, 0x4467c,
2562                 0x446c0, 0x447ac,
2563                 0x447b4, 0x4487c,
2564                 0x448c0, 0x449ac,
2565                 0x449b4, 0x44a7c,
2566                 0x44ac0, 0x44bac,
2567                 0x44bb4, 0x44c7c,
2568                 0x44cc0, 0x44dac,
2569                 0x44db4, 0x44e7c,
2570                 0x44ec0, 0x44fac,
2571                 0x44fb4, 0x4507c,
2572                 0x450c0, 0x451ac,
2573                 0x451b4, 0x451fc,
2574                 0x45800, 0x45804,
2575                 0x45810, 0x45830,
2576                 0x45840, 0x45860,
2577                 0x45868, 0x45868,
2578                 0x45880, 0x45884,
2579                 0x458a0, 0x458b0,
2580                 0x45a00, 0x45a04,
2581                 0x45a10, 0x45a30,
2582                 0x45a40, 0x45a60,
2583                 0x45a68, 0x45a68,
2584                 0x45a80, 0x45a84,
2585                 0x45aa0, 0x45ab0,
2586                 0x460c0, 0x460e4,
2587                 0x47000, 0x4703c,
2588                 0x47044, 0x4708c,
2589                 0x47200, 0x47250,
2590                 0x47400, 0x47408,
2591                 0x47414, 0x47420,
2592                 0x47600, 0x47618,
2593                 0x47800, 0x47814,
2594                 0x47820, 0x4782c,
2595                 0x50000, 0x50084,
2596                 0x50090, 0x500cc,
2597                 0x50300, 0x50384,
2598                 0x50400, 0x50400,
2599                 0x50800, 0x50884,
2600                 0x50890, 0x508cc,
2601                 0x50b00, 0x50b84,
2602                 0x50c00, 0x50c00,
2603                 0x51000, 0x51020,
2604                 0x51028, 0x510b0,
2605                 0x51300, 0x51324,
2606         };
2607
2608         u32 *buf_end = (u32 *)((char *)buf + buf_size);
2609         const unsigned int *reg_ranges;
2610         int reg_ranges_size, range;
2611         unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2612
2613         /* Select the right set of register ranges to dump depending on the
2614          * adapter chip type.
2615          */
2616         switch (chip_version) {
2617         case CHELSIO_T4:
2618                 reg_ranges = t4_reg_ranges;
2619                 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2620                 break;
2621
2622         case CHELSIO_T5:
2623                 reg_ranges = t5_reg_ranges;
2624                 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2625                 break;
2626
2627         case CHELSIO_T6:
2628                 reg_ranges = t6_reg_ranges;
2629                 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2630                 break;
2631
2632         default:
2633                 dev_err(adap->pdev_dev,
2634                         "Unsupported chip version %d\n", chip_version);
2635                 return;
2636         }
2637
2638         /* Clear the register buffer and insert the appropriate register
2639          * values selected by the above register ranges.
2640          */
2641         memset(buf, 0, buf_size);
2642         for (range = 0; range < reg_ranges_size; range += 2) {
2643                 unsigned int reg = reg_ranges[range];
2644                 unsigned int last_reg = reg_ranges[range + 1];
2645                 u32 *bufp = (u32 *)((char *)buf + reg);
2646
2647                 /* Iterate across the register range filling in the register
2648                  * buffer but don't write past the end of the register buffer.
2649                  */
2650                 while (reg <= last_reg && bufp < buf_end) {
2651                         *bufp++ = t4_read_reg(adap, reg);
2652                         reg += sizeof(u32);
2653                 }
2654         }
2655 }
2656
2657 #define EEPROM_STAT_ADDR   0x7bfc
2658 #define VPD_SIZE           0x800
2659 #define VPD_BASE           0x400
2660 #define VPD_BASE_OLD       0
2661 #define VPD_LEN            1024
2662 #define CHELSIO_VPD_UNIQUE_ID 0x82
2663
2664 /**
2665  *      t4_seeprom_wp - enable/disable EEPROM write protection
2666  *      @adapter: the adapter
2667  *      @enable: whether to enable or disable write protection
2668  *
2669  *      Enables or disables write protection on the serial EEPROM.
2670  */
2671 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2672 {
2673         unsigned int v = enable ? 0xc : 0;
2674         int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2675         return ret < 0 ? ret : 0;
2676 }
2677
2678 /**
2679  *      t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2680  *      @adapter: adapter to read
2681  *      @p: where to store the parameters
2682  *
2683  *      Reads card parameters stored in VPD EEPROM.
2684  */
2685 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2686 {
2687         int i, ret = 0, addr;
2688         int ec, sn, pn, na;
2689         u8 *vpd, csum;
2690         unsigned int vpdr_len, kw_offset, id_len;
2691
2692         vpd = vmalloc(VPD_LEN);
2693         if (!vpd)
2694                 return -ENOMEM;
2695
2696         /* We have two VPD data structures stored in the adapter VPD area.
2697          * By default, Linux calculates the size of the VPD area by traversing
2698          * the first VPD area at offset 0x0, so we need to tell the OS what
2699          * our real VPD size is.
2700          */
2701         ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2702         if (ret < 0)
2703                 goto out;
2704
2705         /* Card information normally starts at VPD_BASE but early cards had
2706          * it at 0.
2707          */
2708         ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2709         if (ret < 0)
2710                 goto out;
2711
2712         /* The VPD shall have a unique identifier specified by the PCI SIG.
2713          * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2714          * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2715          * is expected to automatically put this entry at the
2716          * beginning of the VPD.
2717          */
2718         addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2719
2720         ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2721         if (ret < 0)
2722                 goto out;
2723
2724         if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2725                 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2726                 ret = -EINVAL;
2727                 goto out;
2728         }
2729
2730         id_len = pci_vpd_lrdt_size(vpd);
2731         if (id_len > ID_LEN)
2732                 id_len = ID_LEN;
2733
2734         i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2735         if (i < 0) {
2736                 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2737                 ret = -EINVAL;
2738                 goto out;
2739         }
2740
2741         vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2742         kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2743         if (vpdr_len + kw_offset > VPD_LEN) {
2744                 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2745                 ret = -EINVAL;
2746                 goto out;
2747         }
2748
2749 #define FIND_VPD_KW(var, name) do { \
2750         var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2751         if (var < 0) { \
2752                 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2753                 ret = -EINVAL; \
2754                 goto out; \
2755         } \
2756         var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2757 } while (0)
2758
2759         FIND_VPD_KW(i, "RV");
2760         for (csum = 0; i >= 0; i--)
2761                 csum += vpd[i];
2762
2763         if (csum) {
2764                 dev_err(adapter->pdev_dev,
2765                         "corrupted VPD EEPROM, actual csum %u\n", csum);
2766                 ret = -EINVAL;
2767                 goto out;
2768         }
2769
2770         FIND_VPD_KW(ec, "EC");
2771         FIND_VPD_KW(sn, "SN");
2772         FIND_VPD_KW(pn, "PN");
2773         FIND_VPD_KW(na, "NA");
2774 #undef FIND_VPD_KW
2775
2776         memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2777         strim(p->id);
2778         memcpy(p->ec, vpd + ec, EC_LEN);
2779         strim(p->ec);
2780         i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2781         memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2782         strim(p->sn);
2783         i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2784         memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2785         strim(p->pn);
2786         memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2787         strim((char *)p->na);
2788
2789 out:
2790         vfree(vpd);
2791         return ret < 0 ? ret : 0;
2792 }
2793
2794 /**
2795  *      t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2796  *      @adapter: adapter to read
2797  *      @p: where to store the parameters
2798  *
2799  *      Reads card parameters stored in VPD EEPROM and retrieves the Core
2800  *      Clock.  This can only be called after a connection to the firmware
2801  *      is established.
2802  */
2803 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2804 {
2805         u32 cclk_param, cclk_val;
2806         int ret;
2807
2808         /* Grab the raw VPD parameters.
2809          */
2810         ret = t4_get_raw_vpd_params(adapter, p);
2811         if (ret)
2812                 return ret;
2813
2814         /* Ask firmware for the Core Clock since it knows how to translate the
2815          * Reference Clock ('V2') VPD field into a Core Clock value ...
2816          */
2817         cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2818                       FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2819         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2820                               1, &cclk_param, &cclk_val);
2821
2822         if (ret)
2823                 return ret;
2824         p->cclk = cclk_val;
2825
2826         return 0;
2827 }
2828
2829 /* serial flash and firmware constants */
2830 enum {
2831         SF_ATTEMPTS = 10,             /* max retries for SF operations */
2832
2833         /* flash command opcodes */
2834         SF_PROG_PAGE    = 2,          /* program page */
2835         SF_WR_DISABLE   = 4,          /* disable writes */
2836         SF_RD_STATUS    = 5,          /* read status register */
2837         SF_WR_ENABLE    = 6,          /* enable writes */
2838         SF_RD_DATA_FAST = 0xb,        /* read flash */
2839         SF_RD_ID        = 0x9f,       /* read ID */
2840         SF_ERASE_SECTOR = 0xd8,       /* erase sector */
2841
2842         FW_MAX_SIZE = 16 * SF_SEC_SIZE,
2843 };
2844
2845 /**
2846  *      sf1_read - read data from the serial flash
2847  *      @adapter: the adapter
2848  *      @byte_cnt: number of bytes to read
2849  *      @cont: whether another operation will be chained
2850  *      @lock: whether to lock SF for PL access only
2851  *      @valp: where to store the read data
2852  *
2853  *      Reads up to 4 bytes of data from the serial flash.  The location of
2854  *      the read needs to be specified prior to calling this by issuing the
2855  *      appropriate commands to the serial flash.
2856  */
2857 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2858                     int lock, u32 *valp)
2859 {
2860         int ret;
2861
2862         if (!byte_cnt || byte_cnt > 4)
2863                 return -EINVAL;
2864         if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2865                 return -EBUSY;
2866         t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2867                      SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2868         ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2869         if (!ret)
2870                 *valp = t4_read_reg(adapter, SF_DATA_A);
2871         return ret;
2872 }
2873
2874 /**
2875  *      sf1_write - write data to the serial flash
2876  *      @adapter: the adapter
2877  *      @byte_cnt: number of bytes to write
2878  *      @cont: whether another operation will be chained
2879  *      @lock: whether to lock SF for PL access only
2880  *      @val: value to write
2881  *
2882  *      Writes up to 4 bytes of data to the serial flash.  The location of
2883  *      the write needs to be specified prior to calling this by issuing the
2884  *      appropriate commands to the serial flash.
2885  */
2886 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2887                      int lock, u32 val)
2888 {
2889         if (!byte_cnt || byte_cnt > 4)
2890                 return -EINVAL;
2891         if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2892                 return -EBUSY;
2893         t4_write_reg(adapter, SF_DATA_A, val);
2894         t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2895                      SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2896         return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2897 }
2898
2899 /**
2900  *      flash_wait_op - wait for a flash operation to complete
2901  *      @adapter: the adapter
2902  *      @attempts: max number of polls of the status register
2903  *      @delay: delay between polls in ms
2904  *
2905  *      Wait for a flash operation to complete by polling the status register.
2906  */
2907 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2908 {
2909         int ret;
2910         u32 status;
2911
2912         while (1) {
2913                 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2914                     (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2915                         return ret;
2916                 if (!(status & 1))
2917                         return 0;
2918                 if (--attempts == 0)
2919                         return -EAGAIN;
2920                 if (delay)
2921                         msleep(delay);
2922         }
2923 }
2924
2925 /**
2926  *      t4_read_flash - read words from serial flash
2927  *      @adapter: the adapter
2928  *      @addr: the start address for the read
2929  *      @nwords: how many 32-bit words to read
2930  *      @data: where to store the read data
2931  *      @byte_oriented: whether to store data as bytes or as words
2932  *
2933  *      Read the specified number of 32-bit words from the serial flash.
2934  *      If @byte_oriented is set the read data is stored as a byte array
2935  *      (i.e., big-endian), otherwise as 32-bit words in the platform's
2936  *      natural endianness.
2937  */
2938 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2939                   unsigned int nwords, u32 *data, int byte_oriented)
2940 {
2941         int ret;
2942
2943         if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
2944                 return -EINVAL;
2945
2946         addr = swab32(addr) | SF_RD_DATA_FAST;
2947
2948         if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2949             (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2950                 return ret;
2951
2952         for ( ; nwords; nwords--, data++) {
2953                 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2954                 if (nwords == 1)
2955                         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
2956                 if (ret)
2957                         return ret;
2958                 if (byte_oriented)
2959                         *data = (__force __u32)(cpu_to_be32(*data));
2960         }
2961         return 0;
2962 }
2963
2964 /**
2965  *      t4_write_flash - write up to a page of data to the serial flash
2966  *      @adapter: the adapter
2967  *      @addr: the start address to write
2968  *      @n: length of data to write in bytes
2969  *      @data: the data to write
2970  *
2971  *      Writes up to a page of data (256 bytes) to the serial flash starting
2972  *      at the given address.  All the data must be written to the same page.
2973  */
2974 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2975                           unsigned int n, const u8 *data)
2976 {
2977         int ret;
2978         u32 buf[64];
2979         unsigned int i, c, left, val, offset = addr & 0xff;
2980
2981         if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2982                 return -EINVAL;
2983
2984         val = swab32(addr) | SF_PROG_PAGE;
2985
2986         if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2987             (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2988                 goto unlock;
2989
2990         for (left = n; left; left -= c) {
2991                 c = min(left, 4U);
2992                 for (val = 0, i = 0; i < c; ++i)
2993                         val = (val << 8) + *data++;
2994
2995                 ret = sf1_write(adapter, c, c != left, 1, val);
2996                 if (ret)
2997                         goto unlock;
2998         }
2999         ret = flash_wait_op(adapter, 8, 1);
3000         if (ret)
3001                 goto unlock;
3002
3003         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3004
3005         /* Read the page to verify the write succeeded */
3006         ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3007         if (ret)
3008                 return ret;
3009
3010         if (memcmp(data - n, (u8 *)buf + offset, n)) {
3011                 dev_err(adapter->pdev_dev,
3012                         "failed to correctly write the flash page at %#x\n",
3013                         addr);
3014                 return -EIO;
3015         }
3016         return 0;
3017
3018 unlock:
3019         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3020         return ret;
3021 }
3022
3023 /**
3024  *      t4_get_fw_version - read the firmware version
3025  *      @adapter: the adapter
3026  *      @vers: where to place the version
3027  *
3028  *      Reads the FW version from flash.
3029  */
3030 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3031 {
3032         return t4_read_flash(adapter, FLASH_FW_START +
3033                              offsetof(struct fw_hdr, fw_ver), 1,
3034                              vers, 0);
3035 }
3036
3037 /**
3038  *      t4_get_bs_version - read the firmware bootstrap version
3039  *      @adapter: the adapter
3040  *      @vers: where to place the version
3041  *
3042  *      Reads the FW Bootstrap version from flash.
3043  */
3044 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3045 {
3046         return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3047                              offsetof(struct fw_hdr, fw_ver), 1,
3048                              vers, 0);
3049 }
3050
3051 /**
3052  *      t4_get_tp_version - read the TP microcode version
3053  *      @adapter: the adapter
3054  *      @vers: where to place the version
3055  *
3056  *      Reads the TP microcode version from flash.
3057  */
3058 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3059 {
3060         return t4_read_flash(adapter, FLASH_FW_START +
3061                              offsetof(struct fw_hdr, tp_microcode_ver),
3062                              1, vers, 0);
3063 }
3064
3065 /**
3066  *      t4_get_exprom_version - return the Expansion ROM version (if any)
3067  *      @adapter: the adapter
3068  *      @vers: where to place the version
3069  *
3070  *      Reads the Expansion ROM header from FLASH and returns the version
3071  *      number (if present) through the @vers return value pointer.  We return
3072  *      this in the Firmware Version Format since it's convenient.  Return
3073  *      0 on success, -ENOENT if no Expansion ROM is present.
3074  */
3075 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3076 {
3077         struct exprom_header {
3078                 unsigned char hdr_arr[16];      /* must start with 0x55aa */
3079                 unsigned char hdr_ver[4];       /* Expansion ROM version */
3080         } *hdr;
3081         u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3082                                            sizeof(u32))];
3083         int ret;
3084
3085         ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3086                             ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3087                             0);
3088         if (ret)
3089                 return ret;
3090
3091         hdr = (struct exprom_header *)exprom_header_buf;
3092         if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3093                 return -ENOENT;
3094
3095         *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3096                  FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3097                  FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3098                  FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3099         return 0;
3100 }
3101
3102 /**
3103  *      t4_check_fw_version - check if the FW is supported with this driver
3104  *      @adap: the adapter
3105  *
3106  *      Checks if an adapter's FW is compatible with the driver.  Returns 0
3107  *      if there's exact match, a negative error if the version could not be
3108  *      read or there's a major version mismatch
3109  */
3110 int t4_check_fw_version(struct adapter *adap)
3111 {
3112         int i, ret, major, minor, micro;
3113         int exp_major, exp_minor, exp_micro;
3114         unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3115
3116         ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3117         /* Try multiple times before returning error */
3118         for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3119                 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3120
3121         if (ret)
3122                 return ret;
3123
3124         major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3125         minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3126         micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3127
3128         switch (chip_version) {
3129         case CHELSIO_T4:
3130                 exp_major = T4FW_MIN_VERSION_MAJOR;
3131                 exp_minor = T4FW_MIN_VERSION_MINOR;
3132                 exp_micro = T4FW_MIN_VERSION_MICRO;
3133                 break;
3134         case CHELSIO_T5:
3135                 exp_major = T5FW_MIN_VERSION_MAJOR;
3136                 exp_minor = T5FW_MIN_VERSION_MINOR;
3137                 exp_micro = T5FW_MIN_VERSION_MICRO;
3138                 break;
3139         case CHELSIO_T6:
3140                 exp_major = T6FW_MIN_VERSION_MAJOR;
3141                 exp_minor = T6FW_MIN_VERSION_MINOR;
3142                 exp_micro = T6FW_MIN_VERSION_MICRO;
3143                 break;
3144         default:
3145                 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3146                         adap->chip);
3147                 return -EINVAL;
3148         }
3149
3150         if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3151             (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3152                 dev_err(adap->pdev_dev,
3153                         "Card has firmware version %u.%u.%u, minimum "
3154                         "supported firmware is %u.%u.%u.\n", major, minor,
3155                         micro, exp_major, exp_minor, exp_micro);
3156                 return -EFAULT;
3157         }
3158         return 0;
3159 }
3160
3161 /* Is the given firmware API compatible with the one the driver was compiled
3162  * with?
3163  */
3164 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3165 {
3166
3167         /* short circuit if it's the exact same firmware version */
3168         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3169                 return 1;
3170
3171 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3172         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3173             SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3174                 return 1;
3175 #undef SAME_INTF
3176
3177         return 0;
3178 }
3179
3180 /* The firmware in the filesystem is usable, but should it be installed?
3181  * This routine explains itself in detail if it indicates the filesystem
3182  * firmware should be installed.
3183  */
3184 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3185                                 int k, int c)
3186 {
3187         const char *reason;
3188
3189         if (!card_fw_usable) {
3190                 reason = "incompatible or unusable";
3191                 goto install;
3192         }
3193
3194         if (k > c) {
3195                 reason = "older than the version supported with this driver";
3196                 goto install;
3197         }
3198
3199         return 0;
3200
3201 install:
3202         dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3203                 "installing firmware %u.%u.%u.%u on card.\n",
3204                 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3205                 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3206                 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3207                 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3208
3209         return 1;
3210 }
3211
3212 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3213                const u8 *fw_data, unsigned int fw_size,
3214                struct fw_hdr *card_fw, enum dev_state state,
3215                int *reset)
3216 {
3217         int ret, card_fw_usable, fs_fw_usable;
3218         const struct fw_hdr *fs_fw;
3219         const struct fw_hdr *drv_fw;
3220
3221         drv_fw = &fw_info->fw_hdr;
3222
3223         /* Read the header of the firmware on the card */
3224         ret = -t4_read_flash(adap, FLASH_FW_START,
3225                             sizeof(*card_fw) / sizeof(uint32_t),
3226                             (uint32_t *)card_fw, 1);
3227         if (ret == 0) {
3228                 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3229         } else {
3230                 dev_err(adap->pdev_dev,
3231                         "Unable to read card's firmware header: %d\n", ret);
3232                 card_fw_usable = 0;
3233         }
3234
3235         if (fw_data != NULL) {
3236                 fs_fw = (const void *)fw_data;
3237                 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3238         } else {
3239                 fs_fw = NULL;
3240                 fs_fw_usable = 0;
3241         }
3242
3243         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3244             (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3245                 /* Common case: the firmware on the card is an exact match and
3246                  * the filesystem one is an exact match too, or the filesystem
3247                  * one is absent/incompatible.
3248                  */
3249         } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3250                    should_install_fs_fw(adap, card_fw_usable,
3251                                         be32_to_cpu(fs_fw->fw_ver),
3252                                         be32_to_cpu(card_fw->fw_ver))) {
3253                 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3254                                      fw_size, 0);
3255                 if (ret != 0) {
3256                         dev_err(adap->pdev_dev,
3257                                 "failed to install firmware: %d\n", ret);
3258                         goto bye;
3259                 }
3260
3261                 /* Installed successfully, update the cached header too. */
3262                 *card_fw = *fs_fw;
3263                 card_fw_usable = 1;
3264                 *reset = 0;     /* already reset as part of load_fw */
3265         }
3266
3267         if (!card_fw_usable) {
3268                 uint32_t d, c, k;
3269
3270                 d = be32_to_cpu(drv_fw->fw_ver);
3271                 c = be32_to_cpu(card_fw->fw_ver);
3272                 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3273
3274                 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3275                         "chip state %d, "
3276                         "driver compiled with %d.%d.%d.%d, "
3277                         "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3278                         state,
3279                         FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3280                         FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3281                         FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3282                         FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3283                         FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3284                         FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3285                 ret = EINVAL;
3286                 goto bye;
3287         }
3288
3289         /* We're using whatever's on the card and it's known to be good. */
3290         adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3291         adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3292
3293 bye:
3294         return ret;
3295 }
3296
3297 /**
3298  *      t4_flash_erase_sectors - erase a range of flash sectors
3299  *      @adapter: the adapter
3300  *      @start: the first sector to erase
3301  *      @end: the last sector to erase
3302  *
3303  *      Erases the sectors in the given inclusive range.
3304  */
3305 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3306 {
3307         int ret = 0;
3308
3309         if (end >= adapter->params.sf_nsec)
3310                 return -EINVAL;
3311
3312         while (start <= end) {
3313                 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3314                     (ret = sf1_write(adapter, 4, 0, 1,
3315                                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
3316                     (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3317                         dev_err(adapter->pdev_dev,
3318                                 "erase of flash sector %d failed, error %d\n",
3319                                 start, ret);
3320                         break;
3321                 }
3322                 start++;
3323         }
3324         t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3325         return ret;
3326 }
3327
3328 /**
3329  *      t4_flash_cfg_addr - return the address of the flash configuration file
3330  *      @adapter: the adapter
3331  *
3332  *      Return the address within the flash where the Firmware Configuration
3333  *      File is stored.
3334  */
3335 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3336 {
3337         if (adapter->params.sf_size == 0x100000)
3338                 return FLASH_FPGA_CFG_START;
3339         else
3340                 return FLASH_CFG_START;
3341 }
3342
3343 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
3344  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3345  * and emit an error message for mismatched firmware to save our caller the
3346  * effort ...
3347  */
3348 static bool t4_fw_matches_chip(const struct adapter *adap,
3349                                const struct fw_hdr *hdr)
3350 {
3351         /* The expression below will return FALSE for any unsupported adapter
3352          * which will keep us "honest" in the future ...
3353          */
3354         if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3355             (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3356             (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3357                 return true;
3358
3359         dev_err(adap->pdev_dev,
3360                 "FW image (%d) is not suitable for this adapter (%d)\n",
3361                 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3362         return false;
3363 }
3364
3365 /**
3366  *      t4_load_fw - download firmware
3367  *      @adap: the adapter
3368  *      @fw_data: the firmware image to write
3369  *      @size: image size
3370  *
3371  *      Write the supplied firmware image to the card's serial flash.
3372  */
3373 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3374 {
3375         u32 csum;
3376         int ret, addr;
3377         unsigned int i;
3378         u8 first_page[SF_PAGE_SIZE];
3379         const __be32 *p = (const __be32 *)fw_data;
3380         const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3381         unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3382         unsigned int fw_img_start = adap->params.sf_fw_start;
3383         unsigned int fw_start_sec = fw_img_start / sf_sec_size;
3384
3385         if (!size) {
3386                 dev_err(adap->pdev_dev, "FW image has no data\n");
3387                 return -EINVAL;
3388         }
3389         if (size & 511) {
3390                 dev_err(adap->pdev_dev,
3391                         "FW image size not multiple of 512 bytes\n");
3392                 return -EINVAL;
3393         }
3394         if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3395                 dev_err(adap->pdev_dev,
3396                         "FW image size differs from size in FW header\n");
3397                 return -EINVAL;
3398         }
3399         if (size > FW_MAX_SIZE) {
3400                 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3401                         FW_MAX_SIZE);
3402                 return -EFBIG;
3403         }
3404         if (!t4_fw_matches_chip(adap, hdr))
3405                 return -EINVAL;
3406
3407         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3408                 csum += be32_to_cpu(p[i]);
3409
3410         if (csum != 0xffffffff) {
3411                 dev_err(adap->pdev_dev,
3412                         "corrupted firmware image, checksum %#x\n", csum);
3413                 return -EINVAL;
3414         }
3415
3416         i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
3417         ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3418         if (ret)
3419                 goto out;
3420
3421         /*
3422          * We write the correct version at the end so the driver can see a bad
3423          * version if the FW write fails.  Start by writing a copy of the
3424          * first page with a bad version.
3425          */
3426         memcpy(first_page, fw_data, SF_PAGE_SIZE);
3427         ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3428         ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
3429         if (ret)
3430                 goto out;
3431
3432         addr = fw_img_start;
3433         for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3434                 addr += SF_PAGE_SIZE;
3435                 fw_data += SF_PAGE_SIZE;
3436                 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3437                 if (ret)
3438                         goto out;
3439         }
3440
3441         ret = t4_write_flash(adap,
3442                              fw_img_start + offsetof(struct fw_hdr, fw_ver),
3443                              sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3444 out:
3445         if (ret)
3446                 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3447                         ret);
3448         else
3449                 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3450         return ret;
3451 }
3452
3453 /**
3454  *      t4_phy_fw_ver - return current PHY firmware version
3455  *      @adap: the adapter
3456  *      @phy_fw_ver: return value buffer for PHY firmware version
3457  *
3458  *      Returns the current version of external PHY firmware on the
3459  *      adapter.
3460  */
3461 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3462 {
3463         u32 param, val;
3464         int ret;
3465
3466         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3467                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3468                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3469                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3470         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3471                               &param, &val);
3472         if (ret < 0)
3473                 return ret;
3474         *phy_fw_ver = val;
3475         return 0;
3476 }
3477
3478 /**
3479  *      t4_load_phy_fw - download port PHY firmware
3480  *      @adap: the adapter
3481  *      @win: the PCI-E Memory Window index to use for t4_memory_rw()
3482  *      @win_lock: the lock to use to guard the memory copy
3483  *      @phy_fw_version: function to check PHY firmware versions
3484  *      @phy_fw_data: the PHY firmware image to write
3485  *      @phy_fw_size: image size
3486  *
3487  *      Transfer the specified PHY firmware to the adapter.  If a non-NULL
3488  *      @phy_fw_version is supplied, then it will be used to determine if
3489  *      it's necessary to perform the transfer by comparing the version
3490  *      of any existing adapter PHY firmware with that of the passed in
3491  *      PHY firmware image.  If @win_lock is non-NULL then it will be used
3492  *      around the call to t4_memory_rw() which transfers the PHY firmware
3493  *      to the adapter.
3494  *
3495  *      A negative error number will be returned if an error occurs.  If
3496  *      version number support is available and there's no need to upgrade
3497  *      the firmware, 0 will be returned.  If firmware is successfully
3498  *      transferred to the adapter, 1 will be retured.
3499  *
3500  *      NOTE: some adapters only have local RAM to store the PHY firmware.  As
3501  *      a result, a RESET of the adapter would cause that RAM to lose its
3502  *      contents.  Thus, loading PHY firmware on such adapters must happen
3503  *      after any FW_RESET_CMDs ...
3504  */
3505 int t4_load_phy_fw(struct adapter *adap,
3506                    int win, spinlock_t *win_lock,
3507                    int (*phy_fw_version)(const u8 *, size_t),
3508                    const u8 *phy_fw_data, size_t phy_fw_size)
3509 {
3510         unsigned long mtype = 0, maddr = 0;
3511         u32 param, val;
3512         int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3513         int ret;
3514
3515         /* If we have version number support, then check to see if the adapter
3516          * already has up-to-date PHY firmware loaded.
3517          */
3518          if (phy_fw_version) {
3519                 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3520                 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3521                 if (ret < 0)
3522                         return ret;
3523
3524                 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3525                         CH_WARN(adap, "PHY Firmware already up-to-date, "
3526                                 "version %#x\n", cur_phy_fw_ver);
3527                         return 0;
3528                 }
3529         }
3530
3531         /* Ask the firmware where it wants us to copy the PHY firmware image.
3532          * The size of the file requires a special version of the READ coommand
3533          * which will pass the file size via the values field in PARAMS_CMD and
3534          * retrieve the return value from firmware and place it in the same
3535          * buffer values
3536          */
3537         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3538                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3539                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3540                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3541         val = phy_fw_size;
3542         ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3543                                  &param, &val, 1);
3544         if (ret < 0)
3545                 return ret;
3546         mtype = val >> 8;
3547         maddr = (val & 0xff) << 16;
3548
3549         /* Copy the supplied PHY Firmware image to the adapter memory location
3550          * allocated by the adapter firmware.
3551          */
3552         if (win_lock)
3553                 spin_lock_bh(win_lock);
3554         ret = t4_memory_rw(adap, win, mtype, maddr,
3555                            phy_fw_size, (__be32 *)phy_fw_data,
3556                            T4_MEMORY_WRITE);
3557         if (win_lock)
3558                 spin_unlock_bh(win_lock);
3559         if (ret)
3560                 return ret;
3561
3562         /* Tell the firmware that the PHY firmware image has been written to
3563          * RAM and it can now start copying it over to the PHYs.  The chip
3564          * firmware will RESET the affected PHYs as part of this operation
3565          * leaving them running the new PHY firmware image.
3566          */
3567         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3568                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3569                  FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3570                  FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3571         ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3572                                     &param, &val, 30000);
3573
3574         /* If we have version number support, then check to see that the new
3575          * firmware got loaded properly.
3576          */
3577         if (phy_fw_version) {
3578                 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3579                 if (ret < 0)
3580                         return ret;
3581
3582                 if (cur_phy_fw_ver != new_phy_fw_vers) {
3583                         CH_WARN(adap, "PHY Firmware did not update: "
3584                                 "version on adapter %#x, "
3585                                 "version flashed %#x\n",
3586                                 cur_phy_fw_ver, new_phy_fw_vers);
3587                         return -ENXIO;
3588                 }
3589         }
3590
3591         return 1;
3592 }
3593
3594 /**
3595  *      t4_fwcache - firmware cache operation
3596  *      @adap: the adapter
3597  *      @op  : the operation (flush or flush and invalidate)
3598  */
3599 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3600 {
3601         struct fw_params_cmd c;
3602
3603         memset(&c, 0, sizeof(c));
3604         c.op_to_vfn =
3605                 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3606                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3607                             FW_PARAMS_CMD_PFN_V(adap->pf) |
3608                             FW_PARAMS_CMD_VFN_V(0));
3609         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3610         c.param[0].mnem =
3611                 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3612                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3613         c.param[0].val = (__force __be32)op;
3614
3615         return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3616 }
3617
3618 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3619                         unsigned int *pif_req_wrptr,
3620                         unsigned int *pif_rsp_wrptr)
3621 {
3622         int i, j;
3623         u32 cfg, val, req, rsp;
3624
3625         cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3626         if (cfg & LADBGEN_F)
3627                 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3628
3629         val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3630         req = POLADBGWRPTR_G(val);
3631         rsp = PILADBGWRPTR_G(val);
3632         if (pif_req_wrptr)
3633                 *pif_req_wrptr = req;
3634         if (pif_rsp_wrptr)
3635                 *pif_rsp_wrptr = rsp;
3636
3637         for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3638                 for (j = 0; j < 6; j++) {
3639                         t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3640                                      PILADBGRDPTR_V(rsp));
3641                         *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3642                         *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3643                         req++;
3644                         rsp++;
3645                 }
3646                 req = (req + 2) & POLADBGRDPTR_M;
3647                 rsp = (rsp + 2) & PILADBGRDPTR_M;
3648         }
3649         t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3650 }
3651
3652 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3653 {
3654         u32 cfg;
3655         int i, j, idx;
3656
3657         cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3658         if (cfg & LADBGEN_F)
3659                 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3660
3661         for (i = 0; i < CIM_MALA_SIZE; i++) {
3662                 for (j = 0; j < 5; j++) {
3663                         idx = 8 * i + j;
3664                         t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3665                                      PILADBGRDPTR_V(idx));
3666                         *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3667                         *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3668                 }
3669         }
3670         t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3671 }
3672
3673 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3674 {
3675         unsigned int i, j;
3676
3677         for (i = 0; i < 8; i++) {
3678                 u32 *p = la_buf + i;
3679
3680                 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3681                 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3682                 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3683                 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3684                         *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3685         }
3686 }
3687
3688 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3689                      FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
3690                      FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
3691                      FW_PORT_CAP_ANEG)
3692
3693 /**
3694  *      t4_link_l1cfg - apply link configuration to MAC/PHY
3695  *      @phy: the PHY to setup
3696  *      @mac: the MAC to setup
3697  *      @lc: the requested link configuration
3698  *
3699  *      Set up a port's MAC and PHY according to a desired link configuration.
3700  *      - If the PHY can auto-negotiate first decide what to advertise, then
3701  *        enable/disable auto-negotiation as desired, and reset.
3702  *      - If the PHY does not auto-negotiate just reset it.
3703  *      - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3704  *        otherwise do it later based on the outcome of auto-negotiation.
3705  */
3706 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3707                   struct link_config *lc)
3708 {
3709         struct fw_port_cmd c;
3710         unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
3711         unsigned int fc = 0, fec = 0, fw_fec = 0;
3712
3713         lc->link_ok = 0;
3714         if (lc->requested_fc & PAUSE_RX)
3715                 fc |= FW_PORT_CAP_FC_RX;
3716         if (lc->requested_fc & PAUSE_TX)
3717                 fc |= FW_PORT_CAP_FC_TX;
3718
3719         fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec;
3720
3721         if (fec & FEC_RS)
3722                 fw_fec |= FW_PORT_CAP_FEC_RS;
3723         if (fec & FEC_BASER_RS)
3724                 fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
3725
3726         memset(&c, 0, sizeof(c));
3727         c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
3728                                      FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
3729                                      FW_PORT_CMD_PORTID_V(port));
3730         c.action_to_len16 =
3731                 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
3732                             FW_LEN16(c));
3733
3734         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3735                 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3736                                              fc | fw_fec);
3737                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3738         } else if (lc->autoneg == AUTONEG_DISABLE) {
3739                 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
3740                                              fw_fec | mdi);
3741                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3742         } else
3743                 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc |
3744                                              fw_fec | mdi);
3745
3746         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3747 }
3748
3749 /**
3750  *      t4_restart_aneg - restart autonegotiation
3751  *      @adap: the adapter
3752  *      @mbox: mbox to use for the FW command
3753  *      @port: the port id
3754  *
3755  *      Restarts autonegotiation for the selected port.
3756  */
3757 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3758 {
3759         struct fw_port_cmd c;
3760
3761         memset(&c, 0, sizeof(c));
3762         c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
3763                                      FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
3764                                      FW_PORT_CMD_PORTID_V(port));
3765         c.action_to_len16 =
3766                 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
3767                             FW_LEN16(c));
3768         c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3769         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3770 }
3771
3772 typedef void (*int_handler_t)(struct adapter *adap);
3773
3774 struct intr_info {
3775         unsigned int mask;       /* bits to check in interrupt status */
3776         const char *msg;         /* message to print or NULL */
3777         short stat_idx;          /* stat counter to increment or -1 */
3778         unsigned short fatal;    /* whether the condition reported is fatal */
3779         int_handler_t int_handler; /* platform-specific int handler */
3780 };
3781
3782 /**
3783  *      t4_handle_intr_status - table driven interrupt handler
3784  *      @adapter: the adapter that generated the interrupt
3785  *      @reg: the interrupt status register to process
3786  *      @acts: table of interrupt actions
3787  *
3788  *      A table driven interrupt handler that applies a set of masks to an
3789  *      interrupt status word and performs the corresponding actions if the
3790  *      interrupts described by the mask have occurred.  The actions include
3791  *      optionally emitting a warning or alert message.  The table is terminated
3792  *      by an entry specifying mask 0.  Returns the number of fatal interrupt
3793  *      conditions.
3794  */
3795 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3796                                  const struct intr_info *acts)
3797 {
3798         int fatal = 0;
3799         unsigned int mask = 0;
3800         unsigned int status = t4_read_reg(adapter, reg);
3801
3802         for ( ; acts->mask; ++acts) {
3803                 if (!(status & acts->mask))
3804                         continue;
3805                 if (acts->fatal) {
3806                         fatal++;
3807                         dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
3808                                   status & acts->mask);
3809                 } else if (acts->msg && printk_ratelimit())
3810                         dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
3811                                  status & acts->mask);
3812                 if (acts->int_handler)
3813                         acts->int_handler(adapter);
3814                 mask |= acts->mask;
3815         }
3816         status &= mask;
3817         if (status)                           /* clear processed interrupts */
3818                 t4_write_reg(adapter, reg, status);
3819         return fatal;
3820 }
3821
3822 /*
3823  * Interrupt handler for the PCIE module.
3824  */
3825 static void pcie_intr_handler(struct adapter *adapter)
3826 {
3827         static const struct intr_info sysbus_intr_info[] = {
3828                 { RNPP_F, "RXNP array parity error", -1, 1 },
3829                 { RPCP_F, "RXPC array parity error", -1, 1 },
3830                 { RCIP_F, "RXCIF array parity error", -1, 1 },
3831                 { RCCP_F, "Rx completions control array parity error", -1, 1 },
3832                 { RFTP_F, "RXFT array parity error", -1, 1 },
3833                 { 0 }
3834         };
3835         static const struct intr_info pcie_port_intr_info[] = {
3836                 { TPCP_F, "TXPC array parity error", -1, 1 },
3837                 { TNPP_F, "TXNP array parity error", -1, 1 },
3838                 { TFTP_F, "TXFT array parity error", -1, 1 },
3839                 { TCAP_F, "TXCA array parity error", -1, 1 },
3840                 { TCIP_F, "TXCIF array parity error", -1, 1 },
3841                 { RCAP_F, "RXCA array parity error", -1, 1 },
3842                 { OTDD_F, "outbound request TLP discarded", -1, 1 },
3843                 { RDPE_F, "Rx data parity error", -1, 1 },
3844                 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
3845                 { 0 }
3846         };
3847         static const struct intr_info pcie_intr_info[] = {
3848                 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
3849                 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
3850                 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
3851                 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
3852                 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
3853                 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
3854                 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
3855                 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
3856                 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
3857                 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
3858                 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
3859                 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
3860                 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
3861                 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
3862                 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
3863                 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
3864                 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
3865                 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
3866                 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
3867                 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
3868                 { FIDPERR_F, "PCI FID parity error", -1, 1 },
3869                 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
3870                 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
3871                 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
3872                 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
3873                 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
3874                 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
3875                 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
3876                 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
3877                 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
3878                   -1, 0 },
3879                 { 0 }
3880         };
3881
3882         static struct intr_info t5_pcie_intr_info[] = {
3883                 { MSTGRPPERR_F, "Master Response Read Queue parity error",
3884                   -1, 1 },
3885                 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
3886                 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
3887                 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
3888                 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
3889                 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
3890                 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
3891                 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
3892                   -1, 1 },
3893                 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
3894                   -1, 1 },
3895                 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
3896                 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
3897                 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
3898                 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
3899                 { DREQWRPERR_F, "PCI DMA channel write request parity error",
3900                   -1, 1 },
3901                 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
3902                 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
3903                 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
3904                 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
3905                 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
3906                 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
3907                 { FIDPERR_F, "PCI FID parity error", -1, 1 },
3908                 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
3909                 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
3910                 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
3911                 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
3912                   -1, 1 },
3913                 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
3914                   -1, 1 },
3915                 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
3916                 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
3917                 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3918                 { READRSPERR_F, "Outbound read error", -1, 0 },
3919                 { 0 }
3920         };
3921
3922         int fat;
3923
3924         if (is_t4(adapter->params.chip))
3925                 fat = t4_handle_intr_status(adapter,
3926                                 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
3927                                 sysbus_intr_info) +
3928                         t4_handle_intr_status(adapter,
3929                                         PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
3930                                         pcie_port_intr_info) +
3931                         t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
3932                                               pcie_intr_info);
3933         else
3934                 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
3935                                             t5_pcie_intr_info);
3936
3937         if (fat)
3938                 t4_fatal_err(adapter);
3939 }
3940
3941 /*
3942  * TP interrupt handler.
3943  */
3944 static void tp_intr_handler(struct adapter *adapter)
3945 {
3946         static const struct intr_info tp_intr_info[] = {
3947                 { 0x3fffffff, "TP parity error", -1, 1 },
3948                 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
3949                 { 0 }
3950         };
3951
3952         if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
3953                 t4_fatal_err(adapter);
3954 }
3955
3956 /*
3957  * SGE interrupt handler.
3958  */
3959 static void sge_intr_handler(struct adapter *adapter)
3960 {
3961         u64 v;
3962         u32 err;
3963
3964         static const struct intr_info sge_intr_info[] = {
3965                 { ERR_CPL_EXCEED_IQE_SIZE_F,
3966                   "SGE received CPL exceeding IQE size", -1, 1 },
3967                 { ERR_INVALID_CIDX_INC_F,
3968                   "SGE GTS CIDX increment too large", -1, 0 },
3969                 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
3970                 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
3971                 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
3972                   "SGE IQID > 1023 received CPL for FL", -1, 0 },
3973                 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
3974                   0 },
3975                 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
3976                   0 },
3977                 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
3978                   0 },
3979                 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
3980                   0 },
3981                 { ERR_ING_CTXT_PRIO_F,
3982                   "SGE too many priority ingress contexts", -1, 0 },
3983                 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
3984                 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
3985                 { 0 }
3986         };
3987
3988         static struct intr_info t4t5_sge_intr_info[] = {
3989                 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
3990                 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
3991                 { ERR_EGR_CTXT_PRIO_F,
3992                   "SGE too many priority egress contexts", -1, 0 },
3993                 { 0 }
3994         };
3995
3996         v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
3997                 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
3998         if (v) {
3999                 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4000                                 (unsigned long long)v);
4001                 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4002                 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4003         }
4004
4005         v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4006         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4007                 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4008                                            t4t5_sge_intr_info);
4009
4010         err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4011         if (err & ERROR_QID_VALID_F) {
4012                 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4013                         ERROR_QID_G(err));
4014                 if (err & UNCAPTURED_ERROR_F)
4015                         dev_err(adapter->pdev_dev,
4016                                 "SGE UNCAPTURED_ERROR set (clearing)\n");
4017                 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4018                              UNCAPTURED_ERROR_F);
4019         }
4020
4021         if (v != 0)
4022                 t4_fatal_err(adapter);
4023 }
4024
4025 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4026                       OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4027 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4028                       IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4029
4030 /*
4031  * CIM interrupt handler.
4032  */
4033 static void cim_intr_handler(struct adapter *adapter)
4034 {
4035         static const struct intr_info cim_intr_info[] = {
4036                 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4037                 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4038                 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4039                 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4040                 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4041                 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4042                 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4043                 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4044                 { 0 }
4045         };
4046         static const struct intr_info cim_upintr_info[] = {
4047                 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4048                 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4049                 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4050                 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4051                 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4052                 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4053                 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4054                 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4055                 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4056                 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4057                 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4058                 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4059                 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4060                 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4061                 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4062                 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4063                 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4064                 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4065                 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4066                 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4067                 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4068                 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4069                 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4070                 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4071                 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4072                 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4073                 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4074                 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4075                 { 0 }
4076         };
4077
4078         u32 val, fw_err;
4079         int fat;
4080
4081         fw_err = t4_read_reg(adapter, PCIE_FW_A);
4082         if (fw_err & PCIE_FW_ERR_F)
4083                 t4_report_fw_error(adapter);
4084
4085         /* When the Firmware detects an internal error which normally
4086          * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4087          * in order to make sure the Host sees the Firmware Crash.  So
4088          * if we have a Timer0 interrupt and don't see a Firmware Crash,
4089          * ignore the Timer0 interrupt.
4090          */
4091
4092         val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4093         if (val & TIMER0INT_F)
4094                 if (!(fw_err & PCIE_FW_ERR_F) ||
4095                     (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4096                         t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4097                                      TIMER0INT_F);
4098
4099         fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4100                                     cim_intr_info) +
4101               t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4102                                     cim_upintr_info);
4103         if (fat)
4104                 t4_fatal_err(adapter);
4105 }
4106
4107 /*
4108  * ULP RX interrupt handler.
4109  */
4110 static void ulprx_intr_handler(struct adapter *adapter)
4111 {
4112         static const struct intr_info ulprx_intr_info[] = {
4113                 { 0x1800000, "ULPRX context error", -1, 1 },
4114                 { 0x7fffff, "ULPRX parity error", -1, 1 },
4115                 { 0 }
4116         };
4117
4118         if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4119                 t4_fatal_err(adapter);
4120 }
4121
4122 /*
4123  * ULP TX interrupt handler.
4124  */
4125 static void ulptx_intr_handler(struct adapter *adapter)
4126 {
4127         static const struct intr_info ulptx_intr_info[] = {
4128                 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4129                   0 },
4130                 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4131                   0 },
4132                 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4133                   0 },
4134                 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4135                   0 },
4136                 { 0xfffffff, "ULPTX parity error", -1, 1 },
4137                 { 0 }
4138         };
4139
4140         if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4141                 t4_fatal_err(adapter);
4142 }
4143
4144 /*
4145  * PM TX interrupt handler.
4146  */
4147 static void pmtx_intr_handler(struct adapter *adapter)
4148 {
4149         static const struct intr_info pmtx_intr_info[] = {
4150                 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4151                 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4152                 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4153                 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4154                 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4155                 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4156                 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4157                   -1, 1 },
4158                 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4159                 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4160                 { 0 }
4161         };
4162
4163         if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4164                 t4_fatal_err(adapter);
4165 }
4166
4167 /*
4168  * PM RX interrupt handler.
4169  */
4170 static void pmrx_intr_handler(struct adapter *adapter)
4171 {
4172         static const struct intr_info pmrx_intr_info[] = {
4173                 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4174                 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4175                 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4176                 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4177                   -1, 1 },
4178                 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4179                 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4180                 { 0 }
4181         };
4182
4183         if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4184                 t4_fatal_err(adapter);
4185 }
4186
4187 /*
4188  * CPL switch interrupt handler.
4189  */
4190 static void cplsw_intr_handler(struct adapter *adapter)
4191 {
4192         static const struct intr_info cplsw_intr_info[] = {
4193                 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4194                 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4195                 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4196                 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4197                 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4198                 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4199                 { 0 }
4200         };
4201
4202         if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4203                 t4_fatal_err(adapter);
4204 }
4205
4206 /*
4207  * LE interrupt handler.
4208  */
4209 static void le_intr_handler(struct adapter *adap)
4210 {
4211         enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4212         static const struct intr_info le_intr_info[] = {
4213                 { LIPMISS_F, "LE LIP miss", -1, 0 },
4214                 { LIP0_F, "LE 0 LIP error", -1, 0 },
4215                 { PARITYERR_F, "LE parity error", -1, 1 },
4216                 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4217                 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4218                 { 0 }
4219         };
4220
4221         static struct intr_info t6_le_intr_info[] = {
4222                 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4223                 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4224                 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4225                 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4226                 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4227                 { 0 }
4228         };
4229
4230         if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4231                                   (chip <= CHELSIO_T5) ?
4232                                   le_intr_info : t6_le_intr_info))
4233                 t4_fatal_err(adap);
4234 }
4235
4236 /*
4237  * MPS interrupt handler.
4238  */
4239 static void mps_intr_handler(struct adapter *adapter)
4240 {
4241         static const struct intr_info mps_rx_intr_info[] = {
4242                 { 0xffffff, "MPS Rx parity error", -1, 1 },
4243                 { 0 }
4244         };
4245         static const struct intr_info mps_tx_intr_info[] = {
4246                 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4247                 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4248                 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4249                   -1, 1 },
4250                 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4251                   -1, 1 },
4252                 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4253                 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4254                 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4255                 { 0 }
4256         };
4257         static const struct intr_info mps_trc_intr_info[] = {
4258                 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4259                 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4260                   -1, 1 },
4261                 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4262                 { 0 }
4263         };
4264         static const struct intr_info mps_stat_sram_intr_info[] = {
4265                 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4266                 { 0 }
4267         };
4268         static const struct intr_info mps_stat_tx_intr_info[] = {
4269                 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4270                 { 0 }
4271         };
4272         static const struct intr_info mps_stat_rx_intr_info[] = {
4273                 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4274                 { 0 }
4275         };
4276         static const struct intr_info mps_cls_intr_info[] = {
4277                 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4278                 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4279                 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4280                 { 0 }
4281         };
4282
4283         int fat;
4284
4285         fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4286                                     mps_rx_intr_info) +
4287               t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4288                                     mps_tx_intr_info) +
4289               t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4290                                     mps_trc_intr_info) +
4291               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4292                                     mps_stat_sram_intr_info) +
4293               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4294                                     mps_stat_tx_intr_info) +
4295               t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4296                                     mps_stat_rx_intr_info) +
4297               t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4298                                     mps_cls_intr_info);
4299
4300         t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4301         t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
4302         if (fat)
4303                 t4_fatal_err(adapter);
4304 }
4305
4306 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4307                       ECC_UE_INT_CAUSE_F)
4308
4309 /*
4310  * EDC/MC interrupt handler.
4311  */
4312 static void mem_intr_handler(struct adapter *adapter, int idx)
4313 {
4314         static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4315
4316         unsigned int addr, cnt_addr, v;
4317
4318         if (idx <= MEM_EDC1) {
4319                 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4320                 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4321         } else if (idx == MEM_MC) {
4322                 if (is_t4(adapter->params.chip)) {
4323                         addr = MC_INT_CAUSE_A;
4324                         cnt_addr = MC_ECC_STATUS_A;
4325                 } else {
4326                         addr = MC_P_INT_CAUSE_A;
4327                         cnt_addr = MC_P_ECC_STATUS_A;
4328                 }
4329         } else {
4330                 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4331                 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4332         }
4333
4334         v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4335         if (v & PERR_INT_CAUSE_F)
4336                 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4337                           name[idx]);
4338         if (v & ECC_CE_INT_CAUSE_F) {
4339                 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4340
4341                 t4_edc_err_read(adapter, idx);
4342
4343                 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4344                 if (printk_ratelimit())
4345                         dev_warn(adapter->pdev_dev,
4346                                  "%u %s correctable ECC data error%s\n",
4347                                  cnt, name[idx], cnt > 1 ? "s" : "");
4348         }
4349         if (v & ECC_UE_INT_CAUSE_F)
4350                 dev_alert(adapter->pdev_dev,
4351                           "%s uncorrectable ECC data error\n", name[idx]);
4352
4353         t4_write_reg(adapter, addr, v);
4354         if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4355                 t4_fatal_err(adapter);
4356 }
4357
4358 /*
4359  * MA interrupt handler.
4360  */
4361 static void ma_intr_handler(struct adapter *adap)
4362 {
4363         u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4364
4365         if (status & MEM_PERR_INT_CAUSE_F) {
4366                 dev_alert(adap->pdev_dev,
4367                           "MA parity error, parity status %#x\n",
4368                           t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4369                 if (is_t5(adap->params.chip))
4370                         dev_alert(adap->pdev_dev,
4371                                   "MA parity error, parity status %#x\n",
4372                                   t4_read_reg(adap,
4373                                               MA_PARITY_ERROR_STATUS2_A));
4374         }
4375         if (status & MEM_WRAP_INT_CAUSE_F) {
4376                 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4377                 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4378                           "client %u to address %#x\n",
4379                           MEM_WRAP_CLIENT_NUM_G(v),
4380                           MEM_WRAP_ADDRESS_G(v) << 4);
4381         }
4382         t4_write_reg(adap, MA_INT_CAUSE_A, status);
4383         t4_fatal_err(adap);
4384 }
4385
4386 /*
4387  * SMB interrupt handler.
4388  */
4389 static void smb_intr_handler(struct adapter *adap)
4390 {
4391         static const struct intr_info smb_intr_info[] = {
4392                 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4393                 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4394                 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4395                 { 0 }
4396         };
4397
4398         if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4399                 t4_fatal_err(adap);
4400 }
4401
4402 /*
4403  * NC-SI interrupt handler.
4404  */
4405 static void ncsi_intr_handler(struct adapter *adap)
4406 {
4407         static const struct intr_info ncsi_intr_info[] = {
4408                 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4409                 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4410                 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4411                 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4412                 { 0 }
4413         };
4414
4415         if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4416                 t4_fatal_err(adap);
4417 }
4418
4419 /*
4420  * XGMAC interrupt handler.
4421  */
4422 static void xgmac_intr_handler(struct adapter *adap, int port)
4423 {
4424         u32 v, int_cause_reg;
4425
4426         if (is_t4(adap->params.chip))
4427                 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4428         else
4429                 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4430
4431         v = t4_read_reg(adap, int_cause_reg);
4432
4433         v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4434         if (!v)
4435                 return;
4436
4437         if (v & TXFIFO_PRTY_ERR_F)
4438                 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4439                           port);
4440         if (v & RXFIFO_PRTY_ERR_F)
4441                 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4442                           port);
4443         t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4444         t4_fatal_err(adap);
4445 }
4446
4447 /*
4448  * PL interrupt handler.
4449  */
4450 static void pl_intr_handler(struct adapter *adap)
4451 {
4452         static const struct intr_info pl_intr_info[] = {
4453                 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4454                 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4455                 { 0 }
4456         };
4457
4458         if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4459                 t4_fatal_err(adap);
4460 }
4461
4462 #define PF_INTR_MASK (PFSW_F)
4463 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4464                 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4465                 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4466
4467 /**
4468  *      t4_slow_intr_handler - control path interrupt handler
4469  *      @adapter: the adapter
4470  *
4471  *      T4 interrupt handler for non-data global interrupt events, e.g., errors.
4472  *      The designation 'slow' is because it involves register reads, while
4473  *      data interrupts typically don't involve any MMIOs.
4474  */
4475 int t4_slow_intr_handler(struct adapter *adapter)
4476 {
4477         u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4478
4479         if (!(cause & GLBL_INTR_MASK))
4480                 return 0;
4481         if (cause & CIM_F)
4482                 cim_intr_handler(adapter);
4483         if (cause & MPS_F)
4484                 mps_intr_handler(adapter);
4485         if (cause & NCSI_F)
4486                 ncsi_intr_handler(adapter);
4487         if (cause & PL_F)
4488                 pl_intr_handler(adapter);
4489         if (cause & SMB_F)
4490                 smb_intr_handler(adapter);
4491         if (cause & XGMAC0_F)
4492                 xgmac_intr_handler(adapter, 0);
4493         if (cause & XGMAC1_F)
4494                 xgmac_intr_handler(adapter, 1);
4495         if (cause & XGMAC_KR0_F)
4496                 xgmac_intr_handler(adapter, 2);
4497         if (cause & XGMAC_KR1_F)
4498                 xgmac_intr_handler(adapter, 3);
4499         if (cause & PCIE_F)
4500                 pcie_intr_handler(adapter);
4501         if (cause & MC_F)
4502                 mem_intr_handler(adapter, MEM_MC);
4503         if (is_t5(adapter->params.chip) && (cause & MC1_F))
4504                 mem_intr_handler(adapter, MEM_MC1);
4505         if (cause & EDC0_F)
4506                 mem_intr_handler(adapter, MEM_EDC0);
4507         if (cause & EDC1_F)
4508                 mem_intr_handler(adapter, MEM_EDC1);
4509         if (cause & LE_F)
4510                 le_intr_handler(adapter);
4511         if (cause & TP_F)
4512                 tp_intr_handler(adapter);
4513         if (cause & MA_F)
4514                 ma_intr_handler(adapter);
4515         if (cause & PM_TX_F)
4516                 pmtx_intr_handler(adapter);
4517         if (cause & PM_RX_F)
4518                 pmrx_intr_handler(adapter);
4519         if (cause & ULP_RX_F)
4520                 ulprx_intr_handler(adapter);
4521         if (cause & CPL_SWITCH_F)
4522                 cplsw_intr_handler(adapter);
4523         if (cause & SGE_F)
4524                 sge_intr_handler(adapter);
4525         if (cause & ULP_TX_F)
4526                 ulptx_intr_handler(adapter);
4527
4528         /* Clear the interrupts just processed for which we are the master. */
4529         t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4530         (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4531         return 1;
4532 }
4533
4534 /**
4535  *      t4_intr_enable - enable interrupts
4536  *      @adapter: the adapter whose interrupts should be enabled
4537  *
4538  *      Enable PF-specific interrupts for the calling function and the top-level
4539  *      interrupt concentrator for global interrupts.  Interrupts are already
4540  *      enabled at each module, here we just enable the roots of the interrupt
4541  *      hierarchies.
4542  *
4543  *      Note: this function should be called only when the driver manages
4544  *      non PF-specific interrupts from the various HW modules.  Only one PCI
4545  *      function at a time should be doing this.
4546  */
4547 void t4_intr_enable(struct adapter *adapter)
4548 {
4549         u32 val = 0;
4550         u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4551         u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4552                         SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4553
4554         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4555                 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
4556         t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4557                      ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
4558                      ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
4559                      ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4560                      ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4561                      ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
4562                      DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
4563         t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4564         t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
4565 }
4566
4567 /**
4568  *      t4_intr_disable - disable interrupts
4569  *      @adapter: the adapter whose interrupts should be disabled
4570  *
4571  *      Disable interrupts.  We only disable the top-level interrupt
4572  *      concentrators.  The caller must be a PCI function managing global
4573  *      interrupts.
4574  */
4575 void t4_intr_disable(struct adapter *adapter)
4576 {
4577         u32 whoami, pf;
4578
4579         if (pci_channel_offline(adapter->pdev))
4580                 return;
4581
4582         whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4583         pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4584                         SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4585
4586         t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4587         t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
4588 }
4589
4590 /**
4591  *      t4_config_rss_range - configure a portion of the RSS mapping table
4592  *      @adapter: the adapter
4593  *      @mbox: mbox to use for the FW command
4594  *      @viid: virtual interface whose RSS subtable is to be written
4595  *      @start: start entry in the table to write
4596  *      @n: how many table entries to write
4597  *      @rspq: values for the response queue lookup table
4598  *      @nrspq: number of values in @rspq
4599  *
4600  *      Programs the selected part of the VI's RSS mapping table with the
4601  *      provided values.  If @nrspq < @n the supplied values are used repeatedly
4602  *      until the full table range is populated.
4603  *
4604  *      The caller must ensure the values in @rspq are in the range allowed for
4605  *      @viid.
4606  */
4607 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4608                         int start, int n, const u16 *rspq, unsigned int nrspq)
4609 {
4610         int ret;
4611         const u16 *rsp = rspq;
4612         const u16 *rsp_end = rspq + nrspq;
4613         struct fw_rss_ind_tbl_cmd cmd;
4614
4615         memset(&cmd, 0, sizeof(cmd));
4616         cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
4617                                FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4618                                FW_RSS_IND_TBL_CMD_VIID_V(viid));
4619         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4620
4621         /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4622         while (n > 0) {
4623                 int nq = min(n, 32);
4624                 __be32 *qp = &cmd.iq0_to_iq2;
4625
4626                 cmd.niqid = cpu_to_be16(nq);
4627                 cmd.startidx = cpu_to_be16(start);
4628
4629                 start += nq;
4630                 n -= nq;
4631
4632                 while (nq > 0) {
4633                         unsigned int v;
4634
4635                         v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
4636                         if (++rsp >= rsp_end)
4637                                 rsp = rspq;
4638                         v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
4639                         if (++rsp >= rsp_end)
4640                                 rsp = rspq;
4641                         v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
4642                         if (++rsp >= rsp_end)
4643                                 rsp = rspq;
4644
4645                         *qp++ = cpu_to_be32(v);
4646                         nq -= 3;
4647                 }
4648
4649                 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4650                 if (ret)
4651                         return ret;
4652         }
4653         return 0;
4654 }
4655
4656 /**
4657  *      t4_config_glbl_rss - configure the global RSS mode
4658  *      @adapter: the adapter
4659  *      @mbox: mbox to use for the FW command
4660  *      @mode: global RSS mode
4661  *      @flags: mode-specific flags
4662  *
4663  *      Sets the global RSS mode.
4664  */
4665 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4666                        unsigned int flags)
4667 {
4668         struct fw_rss_glb_config_cmd c;
4669
4670         memset(&c, 0, sizeof(c));
4671         c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
4672                                     FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4673         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4674         if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4675                 c.u.manual.mode_pkd =
4676                         cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
4677         } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4678                 c.u.basicvirtual.mode_pkd =
4679                         cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
4680                 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4681         } else
4682                 return -EINVAL;
4683         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4684 }
4685
4686 /**
4687  *      t4_config_vi_rss - configure per VI RSS settings
4688  *      @adapter: the adapter
4689  *      @mbox: mbox to use for the FW command
4690  *      @viid: the VI id
4691  *      @flags: RSS flags
4692  *      @defq: id of the default RSS queue for the VI.
4693  *
4694  *      Configures VI-specific RSS properties.
4695  */
4696 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4697                      unsigned int flags, unsigned int defq)
4698 {
4699         struct fw_rss_vi_config_cmd c;
4700
4701         memset(&c, 0, sizeof(c));
4702         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4703                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4704                                    FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
4705         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4706         c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4707                                         FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
4708         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4709 }
4710
4711 /* Read an RSS table row */
4712 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4713 {
4714         t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
4715         return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
4716                                    5, 0, val);
4717 }
4718
4719 /**
4720  *      t4_read_rss - read the contents of the RSS mapping table
4721  *      @adapter: the adapter
4722  *      @map: holds the contents of the RSS mapping table
4723  *
4724  *      Reads the contents of the RSS hash->queue mapping table.
4725  */
4726 int t4_read_rss(struct adapter *adapter, u16 *map)
4727 {
4728         u32 val;
4729         int i, ret;
4730
4731         for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4732                 ret = rd_rss_row(adapter, i, &val);
4733                 if (ret)
4734                         return ret;
4735                 *map++ = LKPTBLQUEUE0_G(val);
4736                 *map++ = LKPTBLQUEUE1_G(val);
4737         }
4738         return 0;
4739 }
4740
4741 static unsigned int t4_use_ldst(struct adapter *adap)
4742 {
4743         return (adap->flags & FW_OK) || !adap->use_bd;
4744 }
4745
4746 /**
4747  *      t4_fw_tp_pio_rw - Access TP PIO through LDST
4748  *      @adap: the adapter
4749  *      @vals: where the indirect register values are stored/written
4750  *      @nregs: how many indirect registers to read/write
4751  *      @start_idx: index of first indirect register to read/write
4752  *      @rw: Read (1) or Write (0)
4753  *
4754  *      Access TP PIO registers through LDST
4755  */
4756 static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4757                             unsigned int start_index, unsigned int rw)
4758 {
4759         int ret, i;
4760         int cmd = FW_LDST_ADDRSPC_TP_PIO;
4761         struct fw_ldst_cmd c;
4762
4763         for (i = 0 ; i < nregs; i++) {
4764                 memset(&c, 0, sizeof(c));
4765                 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4766                                                 FW_CMD_REQUEST_F |
4767                                                 (rw ? FW_CMD_READ_F :
4768                                                       FW_CMD_WRITE_F) |
4769                                                 FW_LDST_CMD_ADDRSPACE_V(cmd));
4770                 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4771
4772                 c.u.addrval.addr = cpu_to_be32(start_index + i);
4773                 c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
4774                 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4775                 if (!ret && rw)
4776                         vals[i] = be32_to_cpu(c.u.addrval.val);
4777         }
4778 }
4779
4780 /**
4781  *      t4_read_rss_key - read the global RSS key
4782  *      @adap: the adapter
4783  *      @key: 10-entry array holding the 320-bit RSS key
4784  *
4785  *      Reads the global 320-bit RSS key.
4786  */
4787 void t4_read_rss_key(struct adapter *adap, u32 *key)
4788 {
4789         if (t4_use_ldst(adap))
4790                 t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
4791         else
4792                 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
4793                                  TP_RSS_SECRET_KEY0_A);
4794 }
4795
4796 /**
4797  *      t4_write_rss_key - program one of the RSS keys
4798  *      @adap: the adapter
4799  *      @key: 10-entry array holding the 320-bit RSS key
4800  *      @idx: which RSS key to write
4801  *
4802  *      Writes one of the RSS keys with the given 320-bit value.  If @idx is
4803  *      0..15 the corresponding entry in the RSS key table is written,
4804  *      otherwise the global RSS key is written.
4805  */
4806 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
4807 {
4808         u8 rss_key_addr_cnt = 16;
4809         u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
4810
4811         /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4812          * allows access to key addresses 16-63 by using KeyWrAddrX
4813          * as index[5:4](upper 2) into key table
4814          */
4815         if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
4816             (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
4817                 rss_key_addr_cnt = 32;
4818
4819         if (t4_use_ldst(adap))
4820                 t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
4821         else
4822                 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
4823                                   TP_RSS_SECRET_KEY0_A);
4824
4825         if (idx >= 0 && idx < rss_key_addr_cnt) {
4826                 if (rss_key_addr_cnt > 16)
4827                         t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
4828                                      KEYWRADDRX_V(idx >> 4) |
4829                                      T6_VFWRADDR_V(idx) | KEYWREN_F);
4830                 else
4831                         t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
4832                                      KEYWRADDR_V(idx) | KEYWREN_F);
4833         }
4834 }
4835
4836 /**
4837  *      t4_read_rss_pf_config - read PF RSS Configuration Table
4838  *      @adapter: the adapter
4839  *      @index: the entry in the PF RSS table to read
4840  *      @valp: where to store the returned value
4841  *
4842  *      Reads the PF RSS Configuration Table at the specified index and returns
4843  *      the value found there.
4844  */
4845 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4846                            u32 *valp)
4847 {
4848         if (t4_use_ldst(adapter))
4849                 t4_fw_tp_pio_rw(adapter, valp, 1,
4850                                 TP_RSS_PF0_CONFIG_A + index, 1);
4851         else
4852                 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4853                                  valp, 1, TP_RSS_PF0_CONFIG_A + index);
4854 }
4855
4856 /**
4857  *      t4_read_rss_vf_config - read VF RSS Configuration Table
4858  *      @adapter: the adapter
4859  *      @index: the entry in the VF RSS table to read
4860  *      @vfl: where to store the returned VFL
4861  *      @vfh: where to store the returned VFH
4862  *
4863  *      Reads the VF RSS Configuration Table at the specified index and returns
4864  *      the (VFL, VFH) values found there.
4865  */
4866 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4867                            u32 *vfl, u32 *vfh)
4868 {
4869         u32 vrt, mask, data;
4870
4871         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
4872                 mask = VFWRADDR_V(VFWRADDR_M);
4873                 data = VFWRADDR_V(index);
4874         } else {
4875                  mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
4876                  data = T6_VFWRADDR_V(index);
4877         }
4878
4879         /* Request that the index'th VF Table values be read into VFL/VFH.
4880          */
4881         vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
4882         vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
4883         vrt |= data | VFRDEN_F;
4884         t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
4885
4886         /* Grab the VFL/VFH values ...
4887          */
4888         if (t4_use_ldst(adapter)) {
4889                 t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
4890                 t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
4891         } else {
4892                 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4893                                  vfl, 1, TP_RSS_VFL_CONFIG_A);
4894                 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4895                                  vfh, 1, TP_RSS_VFH_CONFIG_A);
4896         }
4897 }
4898
4899 /**
4900  *      t4_read_rss_pf_map - read PF RSS Map
4901  *      @adapter: the adapter
4902  *
4903  *      Reads the PF RSS Map register and returns its value.
4904  */
4905 u32 t4_read_rss_pf_map(struct adapter *adapter)
4906 {
4907         u32 pfmap;
4908
4909         if (t4_use_ldst(adapter))
4910                 t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
4911         else
4912                 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4913                                  &pfmap, 1, TP_RSS_PF_MAP_A);
4914         return pfmap;
4915 }
4916
4917 /**
4918  *      t4_read_rss_pf_mask - read PF RSS Mask
4919  *      @adapter: the adapter
4920  *
4921  *      Reads the PF RSS Mask register and returns its value.
4922  */
4923 u32 t4_read_rss_pf_mask(struct adapter *adapter)
4924 {
4925         u32 pfmask;
4926
4927         if (t4_use_ldst(adapter))
4928                 t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
4929         else
4930                 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4931                                  &pfmask, 1, TP_RSS_PF_MSK_A);
4932         return pfmask;
4933 }
4934
4935 /**
4936  *      t4_tp_get_tcp_stats - read TP's TCP MIB counters
4937  *      @adap: the adapter
4938  *      @v4: holds the TCP/IP counter values
4939  *      @v6: holds the TCP/IPv6 counter values
4940  *
4941  *      Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
4942  *      Either @v4 or @v6 may be %NULL to skip the corresponding stats.
4943  */
4944 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
4945                          struct tp_tcp_stats *v6)
4946 {
4947         u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
4948
4949 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
4950 #define STAT(x)     val[STAT_IDX(x)]
4951 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
4952
4953         if (v4) {
4954                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4955                                  ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
4956                 v4->tcp_out_rsts = STAT(OUT_RST);
4957                 v4->tcp_in_segs  = STAT64(IN_SEG);
4958                 v4->tcp_out_segs = STAT64(OUT_SEG);
4959                 v4->tcp_retrans_segs = STAT64(RXT_SEG);
4960         }
4961         if (v6) {
4962                 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4963                                  ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
4964                 v6->tcp_out_rsts = STAT(OUT_RST);
4965                 v6->tcp_in_segs  = STAT64(IN_SEG);
4966                 v6->tcp_out_segs = STAT64(OUT_SEG);
4967                 v6->tcp_retrans_segs = STAT64(RXT_SEG);
4968         }
4969 #undef STAT64
4970 #undef STAT
4971 #undef STAT_IDX
4972 }
4973
4974 /**
4975  *      t4_tp_get_err_stats - read TP's error MIB counters
4976  *      @adap: the adapter
4977  *      @st: holds the counter values
4978  *
4979  *      Returns the values of TP's error counters.
4980  */
4981 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
4982 {
4983         int nchan = adap->params.arch.nchan;
4984
4985         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4986                          st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
4987         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4988                          st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
4989         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4990                          st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
4991         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4992                          st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
4993         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4994                          st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
4995         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4996                          st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
4997         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4998                          st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
4999         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
5000                          st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
5001
5002         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
5003                          &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
5004 }
5005
5006 /**
5007  *      t4_tp_get_cpl_stats - read TP's CPL MIB counters
5008  *      @adap: the adapter
5009  *      @st: holds the counter values
5010  *
5011  *      Returns the values of TP's CPL counters.
5012  */
5013 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
5014 {
5015         int nchan = adap->params.arch.nchan;
5016
5017         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
5018                          nchan, TP_MIB_CPL_IN_REQ_0_A);
5019         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
5020                          nchan, TP_MIB_CPL_OUT_RSP_0_A);
5021
5022 }
5023
5024 /**
5025  *      t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5026  *      @adap: the adapter
5027  *      @st: holds the counter values
5028  *
5029  *      Returns the values of TP's RDMA counters.
5030  */
5031 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5032 {
5033         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
5034                          2, TP_MIB_RQE_DFR_PKT_A);
5035 }
5036
5037 /**
5038  *      t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5039  *      @adap: the adapter
5040  *      @idx: the port index
5041  *      @st: holds the counter values
5042  *
5043  *      Returns the values of TP's FCoE counters for the selected port.
5044  */
5045 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5046                        struct tp_fcoe_stats *st)
5047 {
5048         u32 val[2];
5049
5050         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
5051                          1, TP_MIB_FCOE_DDP_0_A + idx);
5052         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
5053                          1, TP_MIB_FCOE_DROP_0_A + idx);
5054         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
5055                          2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
5056         st->octets_ddp = ((u64)val[0] << 32) | val[1];
5057 }
5058
5059 /**
5060  *      t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5061  *      @adap: the adapter
5062  *      @st: holds the counter values
5063  *
5064  *      Returns the values of TP's counters for non-TCP directly-placed packets.
5065  */
5066 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5067 {
5068         u32 val[4];
5069
5070         t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
5071                          TP_MIB_USM_PKTS_A);
5072         st->frames = val[0];
5073         st->drops = val[1];
5074         st->octets = ((u64)val[2] << 32) | val[3];
5075 }
5076
5077 /**
5078  *      t4_read_mtu_tbl - returns the values in the HW path MTU table
5079  *      @adap: the adapter
5080  *      @mtus: where to store the MTU values
5081  *      @mtu_log: where to store the MTU base-2 log (may be %NULL)
5082  *
5083  *      Reads the HW path MTU table.
5084  */
5085 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5086 {
5087         u32 v;
5088         int i;
5089
5090         for (i = 0; i < NMTUS; ++i) {
5091                 t4_write_reg(adap, TP_MTU_TABLE_A,
5092                              MTUINDEX_V(0xff) | MTUVALUE_V(i));
5093                 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5094                 mtus[i] = MTUVALUE_G(v);
5095                 if (mtu_log)
5096                         mtu_log[i] = MTUWIDTH_G(v);
5097         }
5098 }
5099
5100 /**
5101  *      t4_read_cong_tbl - reads the congestion control table
5102  *      @adap: the adapter
5103  *      @incr: where to store the alpha values
5104  *
5105  *      Reads the additive increments programmed into the HW congestion
5106  *      control table.
5107  */
5108 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5109 {
5110         unsigned int mtu, w;
5111
5112         for (mtu = 0; mtu < NMTUS; ++mtu)
5113                 for (w = 0; w < NCCTRL_WIN; ++w) {
5114                         t4_write_reg(adap, TP_CCTRL_TABLE_A,
5115                                      ROWINDEX_V(0xffff) | (mtu << 5) | w);
5116                         incr[mtu][w] = (u16)t4_read_reg(adap,
5117                                                 TP_CCTRL_TABLE_A) & 0x1fff;
5118                 }
5119 }
5120
5121 /**
5122  *      t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5123  *      @adap: the adapter
5124  *      @addr: the indirect TP register address
5125  *      @mask: specifies the field within the register to modify
5126  *      @val: new value for the field
5127  *
5128  *      Sets a field of an indirect TP register to the given value.
5129  */
5130 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5131                             unsigned int mask, unsigned int val)
5132 {
5133         t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5134         val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5135         t4_write_reg(adap, TP_PIO_DATA_A, val);
5136 }
5137
5138 /**
5139  *      init_cong_ctrl - initialize congestion control parameters
5140  *      @a: the alpha values for congestion control
5141  *      @b: the beta values for congestion control
5142  *
5143  *      Initialize the congestion control parameters.
5144  */
5145 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5146 {
5147         a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5148         a[9] = 2;
5149         a[10] = 3;
5150         a[11] = 4;
5151         a[12] = 5;
5152         a[13] = 6;
5153         a[14] = 7;
5154         a[15] = 8;
5155         a[16] = 9;
5156         a[17] = 10;
5157         a[18] = 14;
5158         a[19] = 17;
5159         a[20] = 21;
5160         a[21] = 25;
5161         a[22] = 30;
5162         a[23] = 35;
5163         a[24] = 45;
5164         a[25] = 60;
5165         a[26] = 80;
5166         a[27] = 100;
5167         a[28] = 200;
5168         a[29] = 300;
5169         a[30] = 400;
5170         a[31] = 500;
5171
5172         b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5173         b[9] = b[10] = 1;
5174         b[11] = b[12] = 2;
5175         b[13] = b[14] = b[15] = b[16] = 3;
5176         b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5177         b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5178         b[28] = b[29] = 6;
5179         b[30] = b[31] = 7;
5180 }
5181
5182 /* The minimum additive increment value for the congestion control table */
5183 #define CC_MIN_INCR 2U
5184
5185 /**
5186  *      t4_load_mtus - write the MTU and congestion control HW tables
5187  *      @adap: the adapter
5188  *      @mtus: the values for the MTU table
5189  *      @alpha: the values for the congestion control alpha parameter
5190  *      @beta: the values for the congestion control beta parameter
5191  *
5192  *      Write the HW MTU table with the supplied MTUs and the high-speed
5193  *      congestion control table with the supplied alpha, beta, and MTUs.
5194  *      We write the two tables together because the additive increments
5195  *      depend on the MTUs.
5196  */
5197 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5198                   const unsigned short *alpha, const unsigned short *beta)
5199 {
5200         static const unsigned int avg_pkts[NCCTRL_WIN] = {
5201                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5202                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5203                 28672, 40960, 57344, 81920, 114688, 163840, 229376
5204         };
5205
5206         unsigned int i, w;
5207
5208         for (i = 0; i < NMTUS; ++i) {
5209                 unsigned int mtu = mtus[i];
5210                 unsigned int log2 = fls(mtu);
5211
5212                 if (!(mtu & ((1 << log2) >> 2)))     /* round */
5213                         log2--;
5214                 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5215                              MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5216
5217                 for (w = 0; w < NCCTRL_WIN; ++w) {
5218                         unsigned int inc;
5219
5220                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5221                                   CC_MIN_INCR);
5222
5223                         t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5224                                      (w << 16) | (beta[w] << 13) | inc);
5225                 }
5226         }
5227 }
5228
5229 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5230  * clocks.  The formula is
5231  *
5232  * bytes/s = bytes256 * 256 * ClkFreq / 4096
5233  *
5234  * which is equivalent to
5235  *
5236  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5237  */
5238 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5239 {
5240         u64 v = bytes256 * adap->params.vpd.cclk;
5241
5242         return v * 62 + v / 2;
5243 }
5244
5245 /**
5246  *      t4_get_chan_txrate - get the current per channel Tx rates
5247  *      @adap: the adapter
5248  *      @nic_rate: rates for NIC traffic
5249  *      @ofld_rate: rates for offloaded traffic
5250  *
5251  *      Return the current Tx rates in bytes/s for NIC and offloaded traffic
5252  *      for each channel.
5253  */
5254 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5255 {
5256         u32 v;
5257
5258         v = t4_read_reg(adap, TP_TX_TRATE_A);
5259         nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5260         nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5261         if (adap->params.arch.nchan == NCHAN) {
5262                 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5263                 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5264         }
5265
5266         v = t4_read_reg(adap, TP_TX_ORATE_A);
5267         ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5268         ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5269         if (adap->params.arch.nchan == NCHAN) {
5270                 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5271                 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5272         }
5273 }
5274
5275 /**
5276  *      t4_set_trace_filter - configure one of the tracing filters
5277  *      @adap: the adapter
5278  *      @tp: the desired trace filter parameters
5279  *      @idx: which filter to configure
5280  *      @enable: whether to enable or disable the filter
5281  *
5282  *      Configures one of the tracing filters available in HW.  If @enable is
5283  *      %0 @tp is not examined and may be %NULL. The user is responsible to
5284  *      set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5285  */
5286 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5287                         int idx, int enable)
5288 {
5289         int i, ofst = idx * 4;
5290         u32 data_reg, mask_reg, cfg;
5291         u32 multitrc = TRCMULTIFILTER_F;
5292
5293         if (!enable) {
5294                 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5295                 return 0;
5296         }
5297
5298         cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5299         if (cfg & TRCMULTIFILTER_F) {
5300                 /* If multiple tracers are enabled, then maximum
5301                  * capture size is 2.5KB (FIFO size of a single channel)
5302                  * minus 2 flits for CPL_TRACE_PKT header.
5303                  */
5304                 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5305                         return -EINVAL;
5306         } else {
5307                 /* If multiple tracers are disabled, to avoid deadlocks
5308                  * maximum packet capture size of 9600 bytes is recommended.
5309                  * Also in this mode, only trace0 can be enabled and running.
5310                  */
5311                 multitrc = 0;
5312                 if (tp->snap_len > 9600 || idx)
5313                         return -EINVAL;
5314         }
5315
5316         if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5317             tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5318             tp->min_len > TFMINPKTSIZE_M)
5319                 return -EINVAL;
5320
5321         /* stop the tracer we'll be changing */
5322         t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5323
5324         idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5325         data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5326         mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5327
5328         for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5329                 t4_write_reg(adap, data_reg, tp->data[i]);
5330                 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5331         }
5332         t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5333                      TFCAPTUREMAX_V(tp->snap_len) |
5334                      TFMINPKTSIZE_V(tp->min_len));
5335         t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5336                      TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5337                      (is_t4(adap->params.chip) ?
5338                      TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5339                      T5_TFPORT_V(tp->port) | T5_TFEN_F |
5340                      T5_TFINVERTMATCH_V(tp->invert)));
5341
5342         return 0;
5343 }
5344
5345 /**
5346  *      t4_get_trace_filter - query one of the tracing filters
5347  *      @adap: the adapter
5348  *      @tp: the current trace filter parameters
5349  *      @idx: which trace filter to query
5350  *      @enabled: non-zero if the filter is enabled
5351  *
5352  *      Returns the current settings of one of the HW tracing filters.
5353  */
5354 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5355                          int *enabled)
5356 {
5357         u32 ctla, ctlb;
5358         int i, ofst = idx * 4;
5359         u32 data_reg, mask_reg;
5360
5361         ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5362         ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5363
5364         if (is_t4(adap->params.chip)) {
5365                 *enabled = !!(ctla & TFEN_F);
5366                 tp->port =  TFPORT_G(ctla);
5367                 tp->invert = !!(ctla & TFINVERTMATCH_F);
5368         } else {
5369                 *enabled = !!(ctla & T5_TFEN_F);
5370                 tp->port = T5_TFPORT_G(ctla);
5371                 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5372         }
5373         tp->snap_len = TFCAPTUREMAX_G(ctlb);
5374         tp->min_len = TFMINPKTSIZE_G(ctlb);
5375         tp->skip_ofst = TFOFFSET_G(ctla);
5376         tp->skip_len = TFLENGTH_G(ctla);
5377
5378         ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5379         data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5380         mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5381
5382         for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5383                 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5384                 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5385         }
5386 }
5387
5388 /**
5389  *      t4_pmtx_get_stats - returns the HW stats from PMTX
5390  *      @adap: the adapter
5391  *      @cnt: where to store the count statistics
5392  *      @cycles: where to store the cycle statistics
5393  *
5394  *      Returns performance statistics from PMTX.
5395  */
5396 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5397 {
5398         int i;
5399         u32 data[2];
5400
5401         for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5402                 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5403                 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5404                 if (is_t4(adap->params.chip)) {
5405                         cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5406                 } else {
5407                         t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5408                                          PM_TX_DBG_DATA_A, data, 2,
5409                                          PM_TX_DBG_STAT_MSB_A);
5410                         cycles[i] = (((u64)data[0] << 32) | data[1]);
5411                 }
5412         }
5413 }
5414
5415 /**
5416  *      t4_pmrx_get_stats - returns the HW stats from PMRX
5417  *      @adap: the adapter
5418  *      @cnt: where to store the count statistics
5419  *      @cycles: where to store the cycle statistics
5420  *
5421  *      Returns performance statistics from PMRX.
5422  */
5423 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5424 {
5425         int i;
5426         u32 data[2];
5427
5428         for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5429                 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5430                 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5431                 if (is_t4(adap->params.chip)) {
5432                         cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5433                 } else {
5434                         t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5435                                          PM_RX_DBG_DATA_A, data, 2,
5436                                          PM_RX_DBG_STAT_MSB_A);
5437                         cycles[i] = (((u64)data[0] << 32) | data[1]);
5438                 }
5439         }
5440 }
5441
5442 /**
5443  *      t4_get_mps_bg_map - return the buffer groups associated with a port
5444  *      @adap: the adapter
5445  *      @idx: the port index
5446  *
5447  *      Returns a bitmap indicating which MPS buffer groups are associated
5448  *      with the given port.  Bit i is set if buffer group i is used by the
5449  *      port.
5450  */
5451 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5452 {
5453         u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
5454
5455         if (n == 0)
5456                 return idx == 0 ? 0xf : 0;
5457         /* In T6 (which is a 2 port card),
5458          * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
5459          * For 2 port T4/T5 adapter,
5460          * port 0 is mapped to channel 0 and 1,
5461          * port 1 is mapped to channel 2 and 3.
5462          */
5463         if ((n == 1) &&
5464             (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
5465                 return idx < 2 ? (3 << (2 * idx)) : 0;
5466         return 1 << idx;
5467 }
5468
5469 /**
5470  *      t4_get_port_type_description - return Port Type string description
5471  *      @port_type: firmware Port Type enumeration
5472  */
5473 const char *t4_get_port_type_description(enum fw_port_type port_type)
5474 {
5475         static const char *const port_type_description[] = {
5476                 "Fiber_XFI",
5477                 "Fiber_XAUI",
5478                 "BT_SGMII",
5479                 "BT_XFI",
5480                 "BT_XAUI",
5481                 "KX4",
5482                 "CX4",
5483                 "KX",
5484                 "KR",
5485                 "SFP",
5486                 "BP_AP",
5487                 "BP4_AP",
5488                 "QSFP_10G",
5489                 "QSA",
5490                 "QSFP",
5491                 "BP40_BA",
5492                 "KR4_100G",
5493                 "CR4_QSFP",
5494                 "CR_QSFP",
5495                 "CR2_QSFP",
5496                 "SFP28",
5497                 "KR_SFP28",
5498         };
5499
5500         if (port_type < ARRAY_SIZE(port_type_description))
5501                 return port_type_description[port_type];
5502         return "UNKNOWN";
5503 }
5504
5505 /**
5506  *      t4_get_port_stats_offset - collect port stats relative to a previous
5507  *                                 snapshot
5508  *      @adap: The adapter
5509  *      @idx: The port
5510  *      @stats: Current stats to fill
5511  *      @offset: Previous stats snapshot
5512  */
5513 void t4_get_port_stats_offset(struct adapter *adap, int idx,
5514                               struct port_stats *stats,
5515                               struct port_stats *offset)
5516 {
5517         u64 *s, *o;
5518         int i;
5519
5520         t4_get_port_stats(adap, idx, stats);
5521         for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
5522                         i < (sizeof(struct port_stats) / sizeof(u64));
5523                         i++, s++, o++)
5524                 *s -= *o;
5525 }
5526
5527 /**
5528  *      t4_get_port_stats - collect port statistics
5529  *      @adap: the adapter
5530  *      @idx: the port index
5531  *      @p: the stats structure to fill
5532  *
5533  *      Collect statistics related to the given port from HW.
5534  */
5535 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5536 {
5537         u32 bgmap = t4_get_mps_bg_map(adap, idx);
5538         u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
5539
5540 #define GET_STAT(name) \
5541         t4_read_reg64(adap, \
5542         (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
5543         T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
5544 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
5545
5546         p->tx_octets           = GET_STAT(TX_PORT_BYTES);
5547         p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
5548         p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
5549         p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
5550         p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
5551         p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
5552         p->tx_frames_64        = GET_STAT(TX_PORT_64B);
5553         p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
5554         p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
5555         p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
5556         p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
5557         p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5558         p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
5559         p->tx_drop             = GET_STAT(TX_PORT_DROP);
5560         p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
5561         p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
5562         p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
5563         p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
5564         p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
5565         p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
5566         p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
5567         p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
5568         p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
5569
5570         if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
5571                 if (stat_ctl & COUNTPAUSESTATTX_F) {
5572                         p->tx_frames -= p->tx_pause;
5573                         p->tx_octets -= p->tx_pause * 64;
5574                 }
5575                 if (stat_ctl & COUNTPAUSEMCTX_F)
5576                         p->tx_mcast_frames -= p->tx_pause;
5577         }
5578         p->rx_octets           = GET_STAT(RX_PORT_BYTES);
5579         p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
5580         p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
5581         p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
5582         p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
5583         p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
5584         p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5585         p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
5586         p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
5587         p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
5588         p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
5589         p->rx_frames_64        = GET_STAT(RX_PORT_64B);
5590         p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
5591         p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
5592         p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
5593         p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
5594         p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5595         p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
5596         p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
5597         p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
5598         p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
5599         p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
5600         p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
5601         p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
5602         p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
5603         p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
5604         p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
5605
5606         if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
5607                 if (stat_ctl & COUNTPAUSESTATRX_F) {
5608                         p->rx_frames -= p->rx_pause;
5609                         p->rx_octets -= p->rx_pause * 64;
5610                 }
5611                 if (stat_ctl & COUNTPAUSEMCRX_F)
5612                         p->rx_mcast_frames -= p->rx_pause;
5613         }
5614
5615         p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5616         p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5617         p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5618         p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5619         p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5620         p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5621         p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5622         p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5623
5624 #undef GET_STAT
5625 #undef GET_STAT_COM
5626 }
5627
5628 /**
5629  *      t4_get_lb_stats - collect loopback port statistics
5630  *      @adap: the adapter
5631  *      @idx: the loopback port index
5632  *      @p: the stats structure to fill
5633  *
5634  *      Return HW statistics for the given loopback port.
5635  */
5636 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5637 {
5638         u32 bgmap = t4_get_mps_bg_map(adap, idx);
5639
5640 #define GET_STAT(name) \
5641         t4_read_reg64(adap, \
5642         (is_t4(adap->params.chip) ? \
5643         PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
5644         T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
5645 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
5646
5647         p->octets           = GET_STAT(BYTES);
5648         p->frames           = GET_STAT(FRAMES);
5649         p->bcast_frames     = GET_STAT(BCAST);
5650         p->mcast_frames     = GET_STAT(MCAST);
5651         p->ucast_frames     = GET_STAT(UCAST);
5652         p->error_frames     = GET_STAT(ERROR);
5653
5654         p->frames_64        = GET_STAT(64B);
5655         p->frames_65_127    = GET_STAT(65B_127B);
5656         p->frames_128_255   = GET_STAT(128B_255B);
5657         p->frames_256_511   = GET_STAT(256B_511B);
5658         p->frames_512_1023  = GET_STAT(512B_1023B);
5659         p->frames_1024_1518 = GET_STAT(1024B_1518B);
5660         p->frames_1519_max  = GET_STAT(1519B_MAX);
5661         p->drop             = GET_STAT(DROP_FRAMES);
5662
5663         p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5664         p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5665         p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5666         p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5667         p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5668         p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5669         p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5670         p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5671
5672 #undef GET_STAT
5673 #undef GET_STAT_COM
5674 }
5675
5676 /*     t4_mk_filtdelwr - create a delete filter WR
5677  *     @ftid: the filter ID
5678  *     @wr: the filter work request to populate
5679  *     @qid: ingress queue to receive the delete notification
5680  *
5681  *     Creates a filter work request to delete the supplied filter.  If @qid is
5682  *     negative the delete notification is suppressed.
5683  */
5684 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
5685 {
5686         memset(wr, 0, sizeof(*wr));
5687         wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
5688         wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
5689         wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
5690                                     FW_FILTER_WR_NOREPLY_V(qid < 0));
5691         wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
5692         if (qid >= 0)
5693                 wr->rx_chan_rx_rpl_iq =
5694                         cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
5695 }
5696
5697 #define INIT_CMD(var, cmd, rd_wr) do { \
5698         (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
5699                                         FW_CMD_REQUEST_F | \
5700                                         FW_CMD_##rd_wr##_F); \
5701         (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
5702 } while (0)
5703
5704 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
5705                           u32 addr, u32 val)
5706 {
5707         u32 ldst_addrspace;
5708         struct fw_ldst_cmd c;
5709
5710         memset(&c, 0, sizeof(c));
5711         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
5712         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5713                                         FW_CMD_REQUEST_F |
5714                                         FW_CMD_WRITE_F |
5715                                         ldst_addrspace);
5716         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5717         c.u.addrval.addr = cpu_to_be32(addr);
5718         c.u.addrval.val = cpu_to_be32(val);
5719
5720         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5721 }
5722
5723 /**
5724  *      t4_mdio_rd - read a PHY register through MDIO
5725  *      @adap: the adapter
5726  *      @mbox: mailbox to use for the FW command
5727  *      @phy_addr: the PHY address
5728  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5729  *      @reg: the register to read
5730  *      @valp: where to store the value
5731  *
5732  *      Issues a FW command through the given mailbox to read a PHY register.
5733  */
5734 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5735                unsigned int mmd, unsigned int reg, u16 *valp)
5736 {
5737         int ret;
5738         u32 ldst_addrspace;
5739         struct fw_ldst_cmd c;
5740
5741         memset(&c, 0, sizeof(c));
5742         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
5743         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5744                                         FW_CMD_REQUEST_F | FW_CMD_READ_F |
5745                                         ldst_addrspace);
5746         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5747         c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
5748                                          FW_LDST_CMD_MMD_V(mmd));
5749         c.u.mdio.raddr = cpu_to_be16(reg);
5750
5751         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5752         if (ret == 0)
5753                 *valp = be16_to_cpu(c.u.mdio.rval);
5754         return ret;
5755 }
5756
5757 /**
5758  *      t4_mdio_wr - write a PHY register through MDIO
5759  *      @adap: the adapter
5760  *      @mbox: mailbox to use for the FW command
5761  *      @phy_addr: the PHY address
5762  *      @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5763  *      @reg: the register to write
5764  *      @valp: value to write
5765  *
5766  *      Issues a FW command through the given mailbox to write a PHY register.
5767  */
5768 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5769                unsigned int mmd, unsigned int reg, u16 val)
5770 {
5771         u32 ldst_addrspace;
5772         struct fw_ldst_cmd c;
5773
5774         memset(&c, 0, sizeof(c));
5775         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
5776         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5777                                         FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5778                                         ldst_addrspace);
5779         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5780         c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
5781                                          FW_LDST_CMD_MMD_V(mmd));
5782         c.u.mdio.raddr = cpu_to_be16(reg);
5783         c.u.mdio.rval = cpu_to_be16(val);
5784
5785         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5786 }
5787
5788 /**
5789  *      t4_sge_decode_idma_state - decode the idma state
5790  *      @adap: the adapter
5791  *      @state: the state idma is stuck in
5792  */
5793 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
5794 {
5795         static const char * const t4_decode[] = {
5796                 "IDMA_IDLE",
5797                 "IDMA_PUSH_MORE_CPL_FIFO",
5798                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5799                 "Not used",
5800                 "IDMA_PHYSADDR_SEND_PCIEHDR",
5801                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5802                 "IDMA_PHYSADDR_SEND_PAYLOAD",
5803                 "IDMA_SEND_FIFO_TO_IMSG",
5804                 "IDMA_FL_REQ_DATA_FL_PREP",
5805                 "IDMA_FL_REQ_DATA_FL",
5806                 "IDMA_FL_DROP",
5807                 "IDMA_FL_H_REQ_HEADER_FL",
5808                 "IDMA_FL_H_SEND_PCIEHDR",
5809                 "IDMA_FL_H_PUSH_CPL_FIFO",
5810                 "IDMA_FL_H_SEND_CPL",
5811                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5812                 "IDMA_FL_H_SEND_IP_HDR",
5813                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5814                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5815                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5816                 "IDMA_FL_D_SEND_PCIEHDR",
5817                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5818                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5819                 "IDMA_FL_SEND_PCIEHDR",
5820                 "IDMA_FL_PUSH_CPL_FIFO",
5821                 "IDMA_FL_SEND_CPL",
5822                 "IDMA_FL_SEND_PAYLOAD_FIRST",
5823                 "IDMA_FL_SEND_PAYLOAD",
5824                 "IDMA_FL_REQ_NEXT_DATA_FL",
5825                 "IDMA_FL_SEND_NEXT_PCIEHDR",
5826                 "IDMA_FL_SEND_PADDING",
5827                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5828                 "IDMA_FL_SEND_FIFO_TO_IMSG",
5829                 "IDMA_FL_REQ_DATAFL_DONE",
5830                 "IDMA_FL_REQ_HEADERFL_DONE",
5831         };
5832         static const char * const t5_decode[] = {
5833                 "IDMA_IDLE",
5834                 "IDMA_ALMOST_IDLE",
5835                 "IDMA_PUSH_MORE_CPL_FIFO",
5836                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5837                 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
5838                 "IDMA_PHYSADDR_SEND_PCIEHDR",
5839                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5840                 "IDMA_PHYSADDR_SEND_PAYLOAD",
5841                 "IDMA_SEND_FIFO_TO_IMSG",
5842                 "IDMA_FL_REQ_DATA_FL",
5843                 "IDMA_FL_DROP",
5844                 "IDMA_FL_DROP_SEND_INC",
5845                 "IDMA_FL_H_REQ_HEADER_FL",
5846                 "IDMA_FL_H_SEND_PCIEHDR",
5847                 "IDMA_FL_H_PUSH_CPL_FIFO",
5848                 "IDMA_FL_H_SEND_CPL",
5849                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5850                 "IDMA_FL_H_SEND_IP_HDR",
5851                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5852                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5853                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5854                 "IDMA_FL_D_SEND_PCIEHDR",
5855                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5856                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5857                 "IDMA_FL_SEND_PCIEHDR",
5858                 "IDMA_FL_PUSH_CPL_FIFO",
5859                 "IDMA_FL_SEND_CPL",
5860                 "IDMA_FL_SEND_PAYLOAD_FIRST",
5861                 "IDMA_FL_SEND_PAYLOAD",
5862                 "IDMA_FL_REQ_NEXT_DATA_FL",
5863                 "IDMA_FL_SEND_NEXT_PCIEHDR",
5864                 "IDMA_FL_SEND_PADDING",
5865                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5866         };
5867         static const char * const t6_decode[] = {
5868                 "IDMA_IDLE",
5869                 "IDMA_PUSH_MORE_CPL_FIFO",
5870                 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5871                 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
5872                 "IDMA_PHYSADDR_SEND_PCIEHDR",
5873                 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5874                 "IDMA_PHYSADDR_SEND_PAYLOAD",
5875                 "IDMA_FL_REQ_DATA_FL",
5876                 "IDMA_FL_DROP",
5877                 "IDMA_FL_DROP_SEND_INC",
5878                 "IDMA_FL_H_REQ_HEADER_FL",
5879                 "IDMA_FL_H_SEND_PCIEHDR",
5880                 "IDMA_FL_H_PUSH_CPL_FIFO",
5881                 "IDMA_FL_H_SEND_CPL",
5882                 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5883                 "IDMA_FL_H_SEND_IP_HDR",
5884                 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5885                 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5886                 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5887                 "IDMA_FL_D_SEND_PCIEHDR",
5888                 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5889                 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5890                 "IDMA_FL_SEND_PCIEHDR",
5891                 "IDMA_FL_PUSH_CPL_FIFO",
5892                 "IDMA_FL_SEND_CPL",
5893                 "IDMA_FL_SEND_PAYLOAD_FIRST",
5894                 "IDMA_FL_SEND_PAYLOAD",
5895                 "IDMA_FL_REQ_NEXT_DATA_FL",
5896                 "IDMA_FL_SEND_NEXT_PCIEHDR",
5897                 "IDMA_FL_SEND_PADDING",
5898                 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5899         };
5900         static const u32 sge_regs[] = {
5901                 SGE_DEBUG_DATA_LOW_INDEX_2_A,
5902                 SGE_DEBUG_DATA_LOW_INDEX_3_A,
5903                 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
5904         };
5905         const char **sge_idma_decode;
5906         int sge_idma_decode_nstates;
5907         int i;
5908         unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5909
5910         /* Select the right set of decode strings to dump depending on the
5911          * adapter chip type.
5912          */
5913         switch (chip_version) {
5914         case CHELSIO_T4:
5915                 sge_idma_decode = (const char **)t4_decode;
5916                 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
5917                 break;
5918
5919         case CHELSIO_T5:
5920                 sge_idma_decode = (const char **)t5_decode;
5921                 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
5922                 break;
5923
5924         case CHELSIO_T6:
5925                 sge_idma_decode = (const char **)t6_decode;
5926                 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
5927                 break;
5928
5929         default:
5930                 dev_err(adapter->pdev_dev,
5931                         "Unsupported chip version %d\n", chip_version);
5932                 return;
5933         }
5934
5935         if (is_t4(adapter->params.chip)) {
5936                 sge_idma_decode = (const char **)t4_decode;
5937                 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
5938         } else {
5939                 sge_idma_decode = (const char **)t5_decode;
5940                 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
5941         }
5942
5943         if (state < sge_idma_decode_nstates)
5944                 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
5945         else
5946                 CH_WARN(adapter, "idma state %d unknown\n", state);
5947
5948         for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
5949                 CH_WARN(adapter, "SGE register %#x value %#x\n",
5950                         sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
5951 }
5952
5953 /**
5954  *      t4_sge_ctxt_flush - flush the SGE context cache
5955  *      @adap: the adapter
5956  *      @mbox: mailbox to use for the FW command
5957  *
5958  *      Issues a FW command through the given mailbox to flush the
5959  *      SGE context cache.
5960  */
5961 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
5962 {
5963         int ret;
5964         u32 ldst_addrspace;
5965         struct fw_ldst_cmd c;
5966
5967         memset(&c, 0, sizeof(c));
5968         ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
5969         c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5970                                         FW_CMD_REQUEST_F | FW_CMD_READ_F |
5971                                         ldst_addrspace);
5972         c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5973         c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
5974
5975         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5976         return ret;
5977 }
5978
5979 /**
5980  *      t4_fw_hello - establish communication with FW
5981  *      @adap: the adapter
5982  *      @mbox: mailbox to use for the FW command
5983  *      @evt_mbox: mailbox to receive async FW events
5984  *      @master: specifies the caller's willingness to be the device master
5985  *      @state: returns the current device state (if non-NULL)
5986  *
5987  *      Issues a command to establish communication with FW.  Returns either
5988  *      an error (negative integer) or the mailbox of the Master PF.
5989  */
5990 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
5991                 enum dev_master master, enum dev_state *state)
5992 {
5993         int ret;
5994         struct fw_hello_cmd c;
5995         u32 v;
5996         unsigned int master_mbox;
5997         int retries = FW_CMD_HELLO_RETRIES;
5998
5999 retry:
6000         memset(&c, 0, sizeof(c));
6001         INIT_CMD(c, HELLO, WRITE);
6002         c.err_to_clearinit = cpu_to_be32(
6003                 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6004                 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6005                 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6006                                         mbox : FW_HELLO_CMD_MBMASTER_M) |
6007                 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6008                 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6009                 FW_HELLO_CMD_CLEARINIT_F);
6010
6011         /*
6012          * Issue the HELLO command to the firmware.  If it's not successful
6013          * but indicates that we got a "busy" or "timeout" condition, retry
6014          * the HELLO until we exhaust our retry limit.  If we do exceed our
6015          * retry limit, check to see if the firmware left us any error
6016          * information and report that if so.
6017          */
6018         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6019         if (ret < 0) {
6020                 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6021                         goto retry;
6022                 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6023                         t4_report_fw_error(adap);
6024                 return ret;
6025         }
6026
6027         v = be32_to_cpu(c.err_to_clearinit);
6028         master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6029         if (state) {
6030                 if (v & FW_HELLO_CMD_ERR_F)
6031                         *state = DEV_STATE_ERR;
6032                 else if (v & FW_HELLO_CMD_INIT_F)
6033                         *state = DEV_STATE_INIT;
6034                 else
6035                         *state = DEV_STATE_UNINIT;
6036         }
6037
6038         /*
6039          * If we're not the Master PF then we need to wait around for the
6040          * Master PF Driver to finish setting up the adapter.
6041          *
6042          * Note that we also do this wait if we're a non-Master-capable PF and
6043          * there is no current Master PF; a Master PF may show up momentarily
6044          * and we wouldn't want to fail pointlessly.  (This can happen when an
6045          * OS loads lots of different drivers rapidly at the same time).  In
6046          * this case, the Master PF returned by the firmware will be
6047          * PCIE_FW_MASTER_M so the test below will work ...
6048          */
6049         if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6050             master_mbox != mbox) {
6051                 int waiting = FW_CMD_HELLO_TIMEOUT;
6052
6053                 /*
6054                  * Wait for the firmware to either indicate an error or
6055                  * initialized state.  If we see either of these we bail out
6056                  * and report the issue to the caller.  If we exhaust the
6057                  * "hello timeout" and we haven't exhausted our retries, try
6058                  * again.  Otherwise bail with a timeout error.
6059                  */
6060                 for (;;) {
6061                         u32 pcie_fw;
6062
6063                         msleep(50);
6064                         waiting -= 50;
6065
6066                         /*
6067                          * If neither Error nor Initialialized are indicated
6068                          * by the firmware keep waiting till we exaust our
6069                          * timeout ... and then retry if we haven't exhausted
6070                          * our retries ...
6071                          */
6072                         pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6073                         if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6074                                 if (waiting <= 0) {
6075                                         if (retries-- > 0)
6076                                                 goto retry;
6077
6078                                         return -ETIMEDOUT;
6079                                 }
6080                                 continue;
6081                         }
6082
6083                         /*
6084                          * We either have an Error or Initialized condition
6085                          * report errors preferentially.
6086                          */
6087                         if (state) {
6088                                 if (pcie_fw & PCIE_FW_ERR_F)
6089                                         *state = DEV_STATE_ERR;
6090                                 else if (pcie_fw & PCIE_FW_INIT_F)
6091                                         *state = DEV_STATE_INIT;
6092                         }
6093
6094                         /*
6095                          * If we arrived before a Master PF was selected and
6096                          * there's not a valid Master PF, grab its identity
6097                          * for our caller.
6098                          */
6099                         if (master_mbox == PCIE_FW_MASTER_M &&
6100                             (pcie_fw & PCIE_FW_MASTER_VLD_F))
6101                                 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6102                         break;
6103                 }
6104         }
6105
6106         return master_mbox;
6107 }
6108
6109 /**
6110  *      t4_fw_bye - end communication with FW
6111  *      @adap: the adapter
6112  *      @mbox: mailbox to use for the FW command
6113  *
6114  *      Issues a command to terminate communication with FW.
6115  */
6116 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6117 {
6118         struct fw_bye_cmd c;
6119
6120         memset(&c, 0, sizeof(c));
6121         INIT_CMD(c, BYE, WRITE);
6122         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6123 }
6124
6125 /**
6126  *      t4_init_cmd - ask FW to initialize the device
6127  *      @adap: the adapter
6128  *      @mbox: mailbox to use for the FW command
6129  *
6130  *      Issues a command to FW to partially initialize the device.  This
6131  *      performs initialization that generally doesn't depend on user input.
6132  */
6133 int t4_early_init(struct adapter *adap, unsigned int mbox)
6134 {
6135         struct fw_initialize_cmd c;
6136
6137         memset(&c, 0, sizeof(c));
6138         INIT_CMD(c, INITIALIZE, WRITE);
6139         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6140 }
6141
6142 /**
6143  *      t4_fw_reset - issue a reset to FW
6144  *      @adap: the adapter
6145  *      @mbox: mailbox to use for the FW command
6146  *      @reset: specifies the type of reset to perform
6147  *
6148  *      Issues a reset command of the specified type to FW.
6149  */
6150 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6151 {
6152         struct fw_reset_cmd c;
6153
6154         memset(&c, 0, sizeof(c));
6155         INIT_CMD(c, RESET, WRITE);
6156         c.val = cpu_to_be32(reset);
6157         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6158 }
6159
6160 /**
6161  *      t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6162  *      @adap: the adapter
6163  *      @mbox: mailbox to use for the FW RESET command (if desired)
6164  *      @force: force uP into RESET even if FW RESET command fails
6165  *
6166  *      Issues a RESET command to firmware (if desired) with a HALT indication
6167  *      and then puts the microprocessor into RESET state.  The RESET command
6168  *      will only be issued if a legitimate mailbox is provided (mbox <=
6169  *      PCIE_FW_MASTER_M).
6170  *
6171  *      This is generally used in order for the host to safely manipulate the
6172  *      adapter without fear of conflicting with whatever the firmware might
6173  *      be doing.  The only way out of this state is to RESTART the firmware
6174  *      ...
6175  */
6176 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6177 {
6178         int ret = 0;
6179
6180         /*
6181          * If a legitimate mailbox is provided, issue a RESET command
6182          * with a HALT indication.
6183          */
6184         if (mbox <= PCIE_FW_MASTER_M) {
6185                 struct fw_reset_cmd c;
6186
6187                 memset(&c, 0, sizeof(c));
6188                 INIT_CMD(c, RESET, WRITE);
6189                 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6190                 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6191                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6192         }
6193
6194         /*
6195          * Normally we won't complete the operation if the firmware RESET
6196          * command fails but if our caller insists we'll go ahead and put the
6197          * uP into RESET.  This can be useful if the firmware is hung or even
6198          * missing ...  We'll have to take the risk of putting the uP into
6199          * RESET without the cooperation of firmware in that case.
6200          *
6201          * We also force the firmware's HALT flag to be on in case we bypassed
6202          * the firmware RESET command above or we're dealing with old firmware
6203          * which doesn't have the HALT capability.  This will serve as a flag
6204          * for the incoming firmware to know that it's coming out of a HALT
6205          * rather than a RESET ... if it's new enough to understand that ...
6206          */
6207         if (ret == 0 || force) {
6208                 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6209                 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6210                                  PCIE_FW_HALT_F);
6211         }
6212
6213         /*
6214          * And we always return the result of the firmware RESET command
6215          * even when we force the uP into RESET ...
6216          */
6217         return ret;
6218 }
6219
6220 /**
6221  *      t4_fw_restart - restart the firmware by taking the uP out of RESET
6222  *      @adap: the adapter
6223  *      @reset: if we want to do a RESET to restart things
6224  *
6225  *      Restart firmware previously halted by t4_fw_halt().  On successful
6226  *      return the previous PF Master remains as the new PF Master and there
6227  *      is no need to issue a new HELLO command, etc.
6228  *
6229  *      We do this in two ways:
6230  *
6231  *       1. If we're dealing with newer firmware we'll simply want to take
6232  *          the chip's microprocessor out of RESET.  This will cause the
6233  *          firmware to start up from its start vector.  And then we'll loop
6234  *          until the firmware indicates it's started again (PCIE_FW.HALT
6235  *          reset to 0) or we timeout.
6236  *
6237  *       2. If we're dealing with older firmware then we'll need to RESET
6238  *          the chip since older firmware won't recognize the PCIE_FW.HALT
6239  *          flag and automatically RESET itself on startup.
6240  */
6241 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6242 {
6243         if (reset) {
6244                 /*
6245                  * Since we're directing the RESET instead of the firmware
6246                  * doing it automatically, we need to clear the PCIE_FW.HALT
6247                  * bit.
6248                  */
6249                 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6250
6251                 /*
6252                  * If we've been given a valid mailbox, first try to get the
6253                  * firmware to do the RESET.  If that works, great and we can
6254                  * return success.  Otherwise, if we haven't been given a
6255                  * valid mailbox or the RESET command failed, fall back to
6256                  * hitting the chip with a hammer.
6257                  */
6258                 if (mbox <= PCIE_FW_MASTER_M) {
6259                         t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6260                         msleep(100);
6261                         if (t4_fw_reset(adap, mbox,
6262                                         PIORST_F | PIORSTMODE_F) == 0)
6263                                 return 0;
6264                 }
6265
6266                 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6267                 msleep(2000);
6268         } else {
6269                 int ms;
6270
6271                 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6272                 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6273                         if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6274                                 return 0;
6275                         msleep(100);
6276                         ms += 100;
6277                 }
6278                 return -ETIMEDOUT;
6279         }
6280         return 0;
6281 }
6282
6283 /**
6284  *      t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6285  *      @adap: the adapter
6286  *      @mbox: mailbox to use for the FW RESET command (if desired)
6287  *      @fw_data: the firmware image to write
6288  *      @size: image size
6289  *      @force: force upgrade even if firmware doesn't cooperate
6290  *
6291  *      Perform all of the steps necessary for upgrading an adapter's
6292  *      firmware image.  Normally this requires the cooperation of the
6293  *      existing firmware in order to halt all existing activities
6294  *      but if an invalid mailbox token is passed in we skip that step
6295  *      (though we'll still put the adapter microprocessor into RESET in
6296  *      that case).
6297  *
6298  *      On successful return the new firmware will have been loaded and
6299  *      the adapter will have been fully RESET losing all previous setup
6300  *      state.  On unsuccessful return the adapter may be completely hosed ...
6301  *      positive errno indicates that the adapter is ~probably~ intact, a
6302  *      negative errno indicates that things are looking bad ...
6303  */
6304 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6305                   const u8 *fw_data, unsigned int size, int force)
6306 {
6307         const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6308         int reset, ret;
6309
6310         if (!t4_fw_matches_chip(adap, fw_hdr))
6311                 return -EINVAL;
6312
6313         /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6314          * wont be sent when we are flashing FW.
6315          */
6316         adap->flags &= ~FW_OK;
6317
6318         ret = t4_fw_halt(adap, mbox, force);
6319         if (ret < 0 && !force)
6320                 goto out;
6321
6322         ret = t4_load_fw(adap, fw_data, size);
6323         if (ret < 0)
6324                 goto out;
6325
6326         /*
6327          * Older versions of the firmware don't understand the new
6328          * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6329          * restart.  So for newly loaded older firmware we'll have to do the
6330          * RESET for it so it starts up on a clean slate.  We can tell if
6331          * the newly loaded firmware will handle this right by checking
6332          * its header flags to see if it advertises the capability.
6333          */
6334         reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6335         ret = t4_fw_restart(adap, mbox, reset);
6336
6337         /* Grab potentially new Firmware Device Log parameters so we can see
6338          * how healthy the new Firmware is.  It's okay to contact the new
6339          * Firmware for these parameters even though, as far as it's
6340          * concerned, we've never said "HELLO" to it ...
6341          */
6342         (void)t4_init_devlog_params(adap);
6343 out:
6344         adap->flags |= FW_OK;
6345         return ret;
6346 }
6347
6348 /**
6349  *      t4_fl_pkt_align - return the fl packet alignment
6350  *      @adap: the adapter
6351  *
6352  *      T4 has a single field to specify the packing and padding boundary.
6353  *      T5 onwards has separate fields for this and hence the alignment for
6354  *      next packet offset is maximum of these two.
6355  *
6356  */
6357 int t4_fl_pkt_align(struct adapter *adap)
6358 {
6359         u32 sge_control, sge_control2;
6360         unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6361
6362         sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6363
6364         /* T4 uses a single control field to specify both the PCIe Padding and
6365          * Packing Boundary.  T5 introduced the ability to specify these
6366          * separately.  The actual Ingress Packet Data alignment boundary
6367          * within Packed Buffer Mode is the maximum of these two
6368          * specifications.  (Note that it makes no real practical sense to
6369          * have the Pading Boudary be larger than the Packing Boundary but you
6370          * could set the chip up that way and, in fact, legacy T4 code would
6371          * end doing this because it would initialize the Padding Boundary and
6372          * leave the Packing Boundary initialized to 0 (16 bytes).)
6373          * Padding Boundary values in T6 starts from 8B,
6374          * where as it is 32B for T4 and T5.
6375          */
6376         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6377                 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6378         else
6379                 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6380
6381         ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6382
6383         fl_align = ingpadboundary;
6384         if (!is_t4(adap->params.chip)) {
6385                 /* T5 has a weird interpretation of one of the PCIe Packing
6386                  * Boundary values.  No idea why ...
6387                  */
6388                 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6389                 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6390                 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6391                         ingpackboundary = 16;
6392                 else
6393                         ingpackboundary = 1 << (ingpackboundary +
6394                                                 INGPACKBOUNDARY_SHIFT_X);
6395
6396                 fl_align = max(ingpadboundary, ingpackboundary);
6397         }
6398         return fl_align;
6399 }
6400
6401 /**
6402  *      t4_fixup_host_params - fix up host-dependent parameters
6403  *      @adap: the adapter
6404  *      @page_size: the host's Base Page Size
6405  *      @cache_line_size: the host's Cache Line Size
6406  *
6407  *      Various registers in T4 contain values which are dependent on the
6408  *      host's Base Page and Cache Line Sizes.  This function will fix all of
6409  *      those registers with the appropriate values as passed in ...
6410  */
6411 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
6412                          unsigned int cache_line_size)
6413 {
6414         unsigned int page_shift = fls(page_size) - 1;
6415         unsigned int sge_hps = page_shift - 10;
6416         unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
6417         unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
6418         unsigned int fl_align_log = fls(fl_align) - 1;
6419
6420         t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
6421                      HOSTPAGESIZEPF0_V(sge_hps) |
6422                      HOSTPAGESIZEPF1_V(sge_hps) |
6423                      HOSTPAGESIZEPF2_V(sge_hps) |
6424                      HOSTPAGESIZEPF3_V(sge_hps) |
6425                      HOSTPAGESIZEPF4_V(sge_hps) |
6426                      HOSTPAGESIZEPF5_V(sge_hps) |
6427                      HOSTPAGESIZEPF6_V(sge_hps) |
6428                      HOSTPAGESIZEPF7_V(sge_hps));
6429
6430         if (is_t4(adap->params.chip)) {
6431                 t4_set_reg_field(adap, SGE_CONTROL_A,
6432                                  INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
6433                                  EGRSTATUSPAGESIZE_F,
6434                                  INGPADBOUNDARY_V(fl_align_log -
6435                                                   INGPADBOUNDARY_SHIFT_X) |
6436                                  EGRSTATUSPAGESIZE_V(stat_len != 64));
6437         } else {
6438                 unsigned int pack_align;
6439                 unsigned int ingpad, ingpack;
6440                 unsigned int pcie_cap;
6441
6442                 /* T5 introduced the separation of the Free List Padding and
6443                  * Packing Boundaries.  Thus, we can select a smaller Padding
6444                  * Boundary to avoid uselessly chewing up PCIe Link and Memory
6445                  * Bandwidth, and use a Packing Boundary which is large enough
6446                  * to avoid false sharing between CPUs, etc.
6447                  *
6448                  * For the PCI Link, the smaller the Padding Boundary the
6449                  * better.  For the Memory Controller, a smaller Padding
6450                  * Boundary is better until we cross under the Memory Line
6451                  * Size (the minimum unit of transfer to/from Memory).  If we
6452                  * have a Padding Boundary which is smaller than the Memory
6453                  * Line Size, that'll involve a Read-Modify-Write cycle on the
6454                  * Memory Controller which is never good.
6455                  */
6456
6457                 /* We want the Packing Boundary to be based on the Cache Line
6458                  * Size in order to help avoid False Sharing performance
6459                  * issues between CPUs, etc.  We also want the Packing
6460                  * Boundary to incorporate the PCI-E Maximum Payload Size.  We
6461                  * get best performance when the Packing Boundary is a
6462                  * multiple of the Maximum Payload Size.
6463                  */
6464                 pack_align = fl_align;
6465                 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
6466                 if (pcie_cap) {
6467                         unsigned int mps, mps_log;
6468                         u16 devctl;
6469
6470                         /* The PCIe Device Control Maximum Payload Size field
6471                          * [bits 7:5] encodes sizes as powers of 2 starting at
6472                          * 128 bytes.
6473                          */
6474                         pci_read_config_word(adap->pdev,
6475                                              pcie_cap + PCI_EXP_DEVCTL,
6476                                              &devctl);
6477                         mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
6478                         mps = 1 << mps_log;
6479                         if (mps > pack_align)
6480                                 pack_align = mps;
6481                 }
6482
6483                 /* N.B. T5/T6 have a crazy special interpretation of the "0"
6484                  * value for the Packing Boundary.  This corresponds to 16
6485                  * bytes instead of the expected 32 bytes.  So if we want 32
6486                  * bytes, the best we can really do is 64 bytes ...
6487                  */
6488                 if (pack_align <= 16) {
6489                         ingpack = INGPACKBOUNDARY_16B_X;
6490                         fl_align = 16;
6491                 } else if (pack_align == 32) {
6492                         ingpack = INGPACKBOUNDARY_64B_X;
6493                         fl_align = 64;
6494                 } else {
6495                         unsigned int pack_align_log = fls(pack_align) - 1;
6496
6497                         ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
6498                         fl_align = pack_align;
6499                 }
6500
6501                 /* Use the smallest Ingress Padding which isn't smaller than
6502                  * the Memory Controller Read/Write Size.  We'll take that as
6503                  * being 8 bytes since we don't know of any system with a
6504                  * wider Memory Controller Bus Width.
6505                  */
6506                 if (is_t5(adap->params.chip))
6507                         ingpad = INGPADBOUNDARY_32B_X;
6508                 else
6509                         ingpad = T6_INGPADBOUNDARY_8B_X;
6510
6511                 t4_set_reg_field(adap, SGE_CONTROL_A,
6512                                  INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
6513                                  EGRSTATUSPAGESIZE_F,
6514                                  INGPADBOUNDARY_V(ingpad) |
6515                                  EGRSTATUSPAGESIZE_V(stat_len != 64));
6516                 t4_set_reg_field(adap, SGE_CONTROL2_A,
6517                                  INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
6518                                  INGPACKBOUNDARY_V(ingpack));
6519         }
6520         /*
6521          * Adjust various SGE Free List Host Buffer Sizes.
6522          *
6523          * This is something of a crock since we're using fixed indices into
6524          * the array which are also known by the sge.c code and the T4
6525          * Firmware Configuration File.  We need to come up with a much better
6526          * approach to managing this array.  For now, the first four entries
6527          * are:
6528          *
6529          *   0: Host Page Size
6530          *   1: 64KB
6531          *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
6532          *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
6533          *
6534          * For the single-MTU buffers in unpacked mode we need to include
6535          * space for the SGE Control Packet Shift, 14 byte Ethernet header,
6536          * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
6537          * Padding boundary.  All of these are accommodated in the Factory
6538          * Default Firmware Configuration File but we need to adjust it for
6539          * this host's cache line size.
6540          */
6541         t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
6542         t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
6543                      (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
6544                      & ~(fl_align-1));
6545         t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
6546                      (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
6547                      & ~(fl_align-1));
6548
6549         t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
6550
6551         return 0;
6552 }
6553
6554 /**
6555  *      t4_fw_initialize - ask FW to initialize the device
6556  *      @adap: the adapter
6557  *      @mbox: mailbox to use for the FW command
6558  *
6559  *      Issues a command to FW to partially initialize the device.  This
6560  *      performs initialization that generally doesn't depend on user input.
6561  */
6562 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6563 {
6564         struct fw_initialize_cmd c;
6565
6566         memset(&c, 0, sizeof(c));
6567         INIT_CMD(c, INITIALIZE, WRITE);
6568         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6569 }
6570
6571 /**
6572  *      t4_query_params_rw - query FW or device parameters
6573  *      @adap: the adapter
6574  *      @mbox: mailbox to use for the FW command
6575  *      @pf: the PF
6576  *      @vf: the VF
6577  *      @nparams: the number of parameters
6578  *      @params: the parameter names
6579  *      @val: the parameter values
6580  *      @rw: Write and read flag
6581  *
6582  *      Reads the value of FW or device parameters.  Up to 7 parameters can be
6583  *      queried at once.
6584  */
6585 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6586                        unsigned int vf, unsigned int nparams, const u32 *params,
6587                        u32 *val, int rw)
6588 {
6589         int i, ret;
6590         struct fw_params_cmd c;
6591         __be32 *p = &c.param[0].mnem;
6592
6593         if (nparams > 7)
6594                 return -EINVAL;
6595
6596         memset(&c, 0, sizeof(c));
6597         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
6598                                   FW_CMD_REQUEST_F | FW_CMD_READ_F |
6599                                   FW_PARAMS_CMD_PFN_V(pf) |
6600                                   FW_PARAMS_CMD_VFN_V(vf));
6601         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6602
6603         for (i = 0; i < nparams; i++) {
6604                 *p++ = cpu_to_be32(*params++);
6605                 if (rw)
6606                         *p = cpu_to_be32(*(val + i));
6607                 p++;
6608         }
6609
6610         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6611         if (ret == 0)
6612                 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6613                         *val++ = be32_to_cpu(*p);
6614         return ret;
6615 }
6616
6617 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6618                     unsigned int vf, unsigned int nparams, const u32 *params,
6619                     u32 *val)
6620 {
6621         return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6622 }
6623
6624 /**
6625  *      t4_set_params_timeout - sets FW or device parameters
6626  *      @adap: the adapter
6627  *      @mbox: mailbox to use for the FW command
6628  *      @pf: the PF
6629  *      @vf: the VF
6630  *      @nparams: the number of parameters
6631  *      @params: the parameter names
6632  *      @val: the parameter values
6633  *      @timeout: the timeout time
6634  *
6635  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
6636  *      specified at once.
6637  */
6638 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6639                           unsigned int pf, unsigned int vf,
6640                           unsigned int nparams, const u32 *params,
6641                           const u32 *val, int timeout)
6642 {
6643         struct fw_params_cmd c;
6644         __be32 *p = &c.param[0].mnem;
6645
6646         if (nparams > 7)
6647                 return -EINVAL;
6648
6649         memset(&c, 0, sizeof(c));
6650         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
6651                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6652                                   FW_PARAMS_CMD_PFN_V(pf) |
6653                                   FW_PARAMS_CMD_VFN_V(vf));
6654         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6655
6656         while (nparams--) {
6657                 *p++ = cpu_to_be32(*params++);
6658                 *p++ = cpu_to_be32(*val++);
6659         }
6660
6661         return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6662 }
6663
6664 /**
6665  *      t4_set_params - sets FW or device parameters
6666  *      @adap: the adapter
6667  *      @mbox: mailbox to use for the FW command
6668  *      @pf: the PF
6669  *      @vf: the VF
6670  *      @nparams: the number of parameters
6671  *      @params: the parameter names
6672  *      @val: the parameter values
6673  *
6674  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
6675  *      specified at once.
6676  */
6677 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6678                   unsigned int vf, unsigned int nparams, const u32 *params,
6679                   const u32 *val)
6680 {
6681         return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6682                                      FW_CMD_MAX_TIMEOUT);
6683 }
6684
6685 /**
6686  *      t4_cfg_pfvf - configure PF/VF resource limits
6687  *      @adap: the adapter
6688  *      @mbox: mailbox to use for the FW command
6689  *      @pf: the PF being configured
6690  *      @vf: the VF being configured
6691  *      @txq: the max number of egress queues
6692  *      @txq_eth_ctrl: the max number of egress Ethernet or control queues
6693  *      @rxqi: the max number of interrupt-capable ingress queues
6694  *      @rxq: the max number of interruptless ingress queues
6695  *      @tc: the PCI traffic class
6696  *      @vi: the max number of virtual interfaces
6697  *      @cmask: the channel access rights mask for the PF/VF
6698  *      @pmask: the port access rights mask for the PF/VF
6699  *      @nexact: the maximum number of exact MPS filters
6700  *      @rcaps: read capabilities
6701  *      @wxcaps: write/execute capabilities
6702  *
6703  *      Configures resource limits and capabilities for a physical or virtual
6704  *      function.
6705  */
6706 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6707                 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6708                 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6709                 unsigned int vi, unsigned int cmask, unsigned int pmask,
6710                 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6711 {
6712         struct fw_pfvf_cmd c;
6713
6714         memset(&c, 0, sizeof(c));
6715         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
6716                                   FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
6717                                   FW_PFVF_CMD_VFN_V(vf));
6718         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6719         c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
6720                                      FW_PFVF_CMD_NIQ_V(rxq));
6721         c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
6722                                     FW_PFVF_CMD_PMASK_V(pmask) |
6723                                     FW_PFVF_CMD_NEQ_V(txq));
6724         c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
6725                                       FW_PFVF_CMD_NVI_V(vi) |
6726                                       FW_PFVF_CMD_NEXACTF_V(nexact));
6727         c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
6728                                         FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
6729                                         FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
6730         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6731 }
6732
6733 /**
6734  *      t4_alloc_vi - allocate a virtual interface
6735  *      @adap: the adapter
6736  *      @mbox: mailbox to use for the FW command
6737  *      @port: physical port associated with the VI
6738  *      @pf: the PF owning the VI
6739  *      @vf: the VF owning the VI
6740  *      @nmac: number of MAC addresses needed (1 to 5)
6741  *      @mac: the MAC addresses of the VI
6742  *      @rss_size: size of RSS table slice associated with this VI
6743  *
6744  *      Allocates a virtual interface for the given physical port.  If @mac is
6745  *      not %NULL it contains the MAC addresses of the VI as assigned by FW.
6746  *      @mac should be large enough to hold @nmac Ethernet addresses, they are
6747  *      stored consecutively so the space needed is @nmac * 6 bytes.
6748  *      Returns a negative error number or the non-negative VI id.
6749  */
6750 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6751                 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6752                 unsigned int *rss_size)
6753 {
6754         int ret;
6755         struct fw_vi_cmd c;
6756
6757         memset(&c, 0, sizeof(c));
6758         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
6759                                   FW_CMD_WRITE_F | FW_CMD_EXEC_F |
6760                                   FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
6761         c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
6762         c.portid_pkd = FW_VI_CMD_PORTID_V(port);
6763         c.nmac = nmac - 1;
6764
6765         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6766         if (ret)
6767                 return ret;
6768
6769         if (mac) {
6770                 memcpy(mac, c.mac, sizeof(c.mac));
6771                 switch (nmac) {
6772                 case 5:
6773                         memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6774                 case 4:
6775                         memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6776                 case 3:
6777                         memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6778                 case 2:
6779                         memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
6780                 }
6781         }
6782         if (rss_size)
6783                 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
6784         return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
6785 }
6786
6787 /**
6788  *      t4_free_vi - free a virtual interface
6789  *      @adap: the adapter
6790  *      @mbox: mailbox to use for the FW command
6791  *      @pf: the PF owning the VI
6792  *      @vf: the VF owning the VI
6793  *      @viid: virtual interface identifiler
6794  *
6795  *      Free a previously allocated virtual interface.
6796  */
6797 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6798                unsigned int vf, unsigned int viid)
6799 {
6800         struct fw_vi_cmd c;
6801
6802         memset(&c, 0, sizeof(c));
6803         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
6804                                   FW_CMD_REQUEST_F |
6805                                   FW_CMD_EXEC_F |
6806                                   FW_VI_CMD_PFN_V(pf) |
6807                                   FW_VI_CMD_VFN_V(vf));
6808         c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
6809         c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
6810
6811         return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6812 }
6813
6814 /**
6815  *      t4_set_rxmode - set Rx properties of a virtual interface
6816  *      @adap: the adapter
6817  *      @mbox: mailbox to use for the FW command
6818  *      @viid: the VI id
6819  *      @mtu: the new MTU or -1
6820  *      @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6821  *      @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6822  *      @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
6823  *      @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
6824  *      @sleep_ok: if true we may sleep while awaiting command completion
6825  *
6826  *      Sets Rx properties of a virtual interface.
6827  */
6828 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
6829                   int mtu, int promisc, int all_multi, int bcast, int vlanex,
6830                   bool sleep_ok)
6831 {
6832         struct fw_vi_rxmode_cmd c;
6833
6834         /* convert to FW values */
6835         if (mtu < 0)
6836                 mtu = FW_RXMODE_MTU_NO_CHG;
6837         if (promisc < 0)
6838                 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
6839         if (all_multi < 0)
6840                 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
6841         if (bcast < 0)
6842                 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
6843         if (vlanex < 0)
6844                 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
6845
6846         memset(&c, 0, sizeof(c));
6847         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
6848                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6849                                    FW_VI_RXMODE_CMD_VIID_V(viid));
6850         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6851         c.mtu_to_vlanexen =
6852                 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
6853                             FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
6854                             FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
6855                             FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
6856                             FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
6857         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6858 }
6859
6860 /**
6861  *      t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
6862  *      @adap: the adapter
6863  *      @mbox: mailbox to use for the FW command
6864  *      @viid: the VI id
6865  *      @free: if true any existing filters for this VI id are first removed
6866  *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
6867  *      @addr: the MAC address(es)
6868  *      @idx: where to store the index of each allocated filter
6869  *      @hash: pointer to hash address filter bitmap
6870  *      @sleep_ok: call is allowed to sleep
6871  *
6872  *      Allocates an exact-match filter for each of the supplied addresses and
6873  *      sets it to the corresponding address.  If @idx is not %NULL it should
6874  *      have at least @naddr entries, each of which will be set to the index of
6875  *      the filter allocated for the corresponding MAC address.  If a filter
6876  *      could not be allocated for an address its index is set to 0xffff.
6877  *      If @hash is not %NULL addresses that fail to allocate an exact filter
6878  *      are hashed and update the hash filter bitmap pointed at by @hash.
6879  *
6880  *      Returns a negative error number or the number of filters allocated.
6881  */
6882 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
6883                       unsigned int viid, bool free, unsigned int naddr,
6884                       const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
6885 {
6886         int offset, ret = 0;
6887         struct fw_vi_mac_cmd c;
6888         unsigned int nfilters = 0;
6889         unsigned int max_naddr = adap->params.arch.mps_tcam_size;
6890         unsigned int rem = naddr;
6891
6892         if (naddr > max_naddr)
6893                 return -EINVAL;
6894
6895         for (offset = 0; offset < naddr ; /**/) {
6896                 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
6897                                          rem : ARRAY_SIZE(c.u.exact));
6898                 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6899                                                      u.exact[fw_naddr]), 16);
6900                 struct fw_vi_mac_exact *p;
6901                 int i;
6902
6903                 memset(&c, 0, sizeof(c));
6904                 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6905                                            FW_CMD_REQUEST_F |
6906                                            FW_CMD_WRITE_F |
6907                                            FW_CMD_EXEC_V(free) |
6908                                            FW_VI_MAC_CMD_VIID_V(viid));
6909                 c.freemacs_to_len16 =
6910                         cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
6911                                     FW_CMD_LEN16_V(len16));
6912
6913                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6914                         p->valid_to_idx =
6915                                 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
6916                                             FW_VI_MAC_CMD_IDX_V(
6917                                                     FW_VI_MAC_ADD_MAC));
6918                         memcpy(p->macaddr, addr[offset + i],
6919                                sizeof(p->macaddr));
6920                 }
6921
6922                 /* It's okay if we run out of space in our MAC address arena.
6923                  * Some of the addresses we submit may get stored so we need
6924                  * to run through the reply to see what the results were ...
6925                  */
6926                 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6927                 if (ret && ret != -FW_ENOMEM)
6928                         break;
6929
6930                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6931                         u16 index = FW_VI_MAC_CMD_IDX_G(
6932                                         be16_to_cpu(p->valid_to_idx));
6933
6934                         if (idx)
6935                                 idx[offset + i] = (index >= max_naddr ?
6936                                                    0xffff : index);
6937                         if (index < max_naddr)
6938                                 nfilters++;
6939                         else if (hash)
6940                                 *hash |= (1ULL <<
6941                                           hash_mac_addr(addr[offset + i]));
6942                 }
6943
6944                 free = false;
6945                 offset += fw_naddr;
6946                 rem -= fw_naddr;
6947         }
6948
6949         if (ret == 0 || ret == -FW_ENOMEM)
6950                 ret = nfilters;
6951         return ret;
6952 }
6953
6954 /**
6955  *      t4_free_mac_filt - frees exact-match filters of given MAC addresses
6956  *      @adap: the adapter
6957  *      @mbox: mailbox to use for the FW command
6958  *      @viid: the VI id
6959  *      @naddr: the number of MAC addresses to allocate filters for (up to 7)
6960  *      @addr: the MAC address(es)
6961  *      @sleep_ok: call is allowed to sleep
6962  *
6963  *      Frees the exact-match filter for each of the supplied addresses
6964  *
6965  *      Returns a negative error number or the number of filters freed.
6966  */
6967 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
6968                      unsigned int viid, unsigned int naddr,
6969                      const u8 **addr, bool sleep_ok)
6970 {
6971         int offset, ret = 0;
6972         struct fw_vi_mac_cmd c;
6973         unsigned int nfilters = 0;
6974         unsigned int max_naddr = is_t4(adap->params.chip) ?
6975                                        NUM_MPS_CLS_SRAM_L_INSTANCES :
6976                                        NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6977         unsigned int rem = naddr;
6978
6979         if (naddr > max_naddr)
6980                 return -EINVAL;
6981
6982         for (offset = 0; offset < (int)naddr ; /**/) {
6983                 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
6984                                          ? rem
6985                                          : ARRAY_SIZE(c.u.exact));
6986                 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6987                                                      u.exact[fw_naddr]), 16);
6988                 struct fw_vi_mac_exact *p;
6989                 int i;
6990
6991                 memset(&c, 0, sizeof(c));
6992                 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6993                                      FW_CMD_REQUEST_F |
6994                                      FW_CMD_WRITE_F |
6995                                      FW_CMD_EXEC_V(0) |
6996                                      FW_VI_MAC_CMD_VIID_V(viid));
6997                 c.freemacs_to_len16 =
6998                                 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
6999                                             FW_CMD_LEN16_V(len16));
7000
7001                 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7002                         p->valid_to_idx = cpu_to_be16(
7003                                 FW_VI_MAC_CMD_VALID_F |
7004                                 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7005                         memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7006                 }
7007
7008                 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7009                 if (ret)
7010                         break;
7011
7012                 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7013                         u16 index = FW_VI_MAC_CMD_IDX_G(
7014                                                 be16_to_cpu(p->valid_to_idx));
7015
7016                         if (index < max_naddr)
7017                                 nfilters++;
7018                 }
7019
7020                 offset += fw_naddr;
7021                 rem -= fw_naddr;
7022         }
7023
7024         if (ret == 0)
7025                 ret = nfilters;
7026         return ret;
7027 }
7028
7029 /**
7030  *      t4_change_mac - modifies the exact-match filter for a MAC address
7031  *      @adap: the adapter
7032  *      @mbox: mailbox to use for the FW command
7033  *      @viid: the VI id
7034  *      @idx: index of existing filter for old value of MAC address, or -1
7035  *      @addr: the new MAC address value
7036  *      @persist: whether a new MAC allocation should be persistent
7037  *      @add_smt: if true also add the address to the HW SMT
7038  *
7039  *      Modifies an exact-match filter and sets it to the new MAC address.
7040  *      Note that in general it is not possible to modify the value of a given
7041  *      filter so the generic way to modify an address filter is to free the one
7042  *      being used by the old address value and allocate a new filter for the
7043  *      new address value.  @idx can be -1 if the address is a new addition.
7044  *
7045  *      Returns a negative error number or the index of the filter with the new
7046  *      MAC value.
7047  */
7048 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7049                   int idx, const u8 *addr, bool persist, bool add_smt)
7050 {
7051         int ret, mode;
7052         struct fw_vi_mac_cmd c;
7053         struct fw_vi_mac_exact *p = c.u.exact;
7054         unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7055
7056         if (idx < 0)                             /* new allocation */
7057                 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7058         mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7059
7060         memset(&c, 0, sizeof(c));
7061         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7062                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7063                                    FW_VI_MAC_CMD_VIID_V(viid));
7064         c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7065         p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7066                                       FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7067                                       FW_VI_MAC_CMD_IDX_V(idx));
7068         memcpy(p->macaddr, addr, sizeof(p->macaddr));
7069
7070         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7071         if (ret == 0) {
7072                 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7073                 if (ret >= max_mac_addr)
7074                         ret = -ENOMEM;
7075         }
7076         return ret;
7077 }
7078
7079 /**
7080  *      t4_set_addr_hash - program the MAC inexact-match hash filter
7081  *      @adap: the adapter
7082  *      @mbox: mailbox to use for the FW command
7083  *      @viid: the VI id
7084  *      @ucast: whether the hash filter should also match unicast addresses
7085  *      @vec: the value to be written to the hash filter
7086  *      @sleep_ok: call is allowed to sleep
7087  *
7088  *      Sets the 64-bit inexact-match hash filter for a virtual interface.
7089  */
7090 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7091                      bool ucast, u64 vec, bool sleep_ok)
7092 {
7093         struct fw_vi_mac_cmd c;
7094
7095         memset(&c, 0, sizeof(c));
7096         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7097                                    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7098                                    FW_VI_ENABLE_CMD_VIID_V(viid));
7099         c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7100                                           FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7101                                           FW_CMD_LEN16_V(1));
7102         c.u.hash.hashvec = cpu_to_be64(vec);
7103         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7104 }
7105
7106 /**
7107  *      t4_enable_vi_params - enable/disable a virtual interface
7108  *      @adap: the adapter
7109  *      @mbox: mailbox to use for the FW command
7110  *      @viid: the VI id
7111  *      @rx_en: 1=enable Rx, 0=disable Rx
7112  *      @tx_en: 1=enable Tx, 0=disable Tx
7113  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
7114  *
7115  *      Enables/disables a virtual interface.  Note that setting DCB Enable
7116  *      only makes sense when enabling a Virtual Interface ...
7117  */
7118 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7119                         unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7120 {
7121         struct fw_vi_enable_cmd c;
7122
7123         memset(&c, 0, sizeof(c));
7124         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7125                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7126                                    FW_VI_ENABLE_CMD_VIID_V(viid));
7127         c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7128                                      FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7129                                      FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7130                                      FW_LEN16(c));
7131         return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7132 }
7133
7134 /**
7135  *      t4_enable_vi - enable/disable a virtual interface
7136  *      @adap: the adapter
7137  *      @mbox: mailbox to use for the FW command
7138  *      @viid: the VI id
7139  *      @rx_en: 1=enable Rx, 0=disable Rx
7140  *      @tx_en: 1=enable Tx, 0=disable Tx
7141  *
7142  *      Enables/disables a virtual interface.
7143  */
7144 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7145                  bool rx_en, bool tx_en)
7146 {
7147         return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7148 }
7149
7150 /**
7151  *      t4_identify_port - identify a VI's port by blinking its LED
7152  *      @adap: the adapter
7153  *      @mbox: mailbox to use for the FW command
7154  *      @viid: the VI id
7155  *      @nblinks: how many times to blink LED at 2.5 Hz
7156  *
7157  *      Identifies a VI's port by blinking its LED.
7158  */
7159 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7160                      unsigned int nblinks)
7161 {
7162         struct fw_vi_enable_cmd c;
7163
7164         memset(&c, 0, sizeof(c));
7165         c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7166                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7167                                    FW_VI_ENABLE_CMD_VIID_V(viid));
7168         c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7169         c.blinkdur = cpu_to_be16(nblinks);
7170         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7171 }
7172
7173 /**
7174  *      t4_iq_stop - stop an ingress queue and its FLs
7175  *      @adap: the adapter
7176  *      @mbox: mailbox to use for the FW command
7177  *      @pf: the PF owning the queues
7178  *      @vf: the VF owning the queues
7179  *      @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7180  *      @iqid: ingress queue id
7181  *      @fl0id: FL0 queue id or 0xffff if no attached FL0
7182  *      @fl1id: FL1 queue id or 0xffff if no attached FL1
7183  *
7184  *      Stops an ingress queue and its associated FLs, if any.  This causes
7185  *      any current or future data/messages destined for these queues to be
7186  *      tossed.
7187  */
7188 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7189                unsigned int vf, unsigned int iqtype, unsigned int iqid,
7190                unsigned int fl0id, unsigned int fl1id)
7191 {
7192         struct fw_iq_cmd c;
7193
7194         memset(&c, 0, sizeof(c));
7195         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7196                                   FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7197                                   FW_IQ_CMD_VFN_V(vf));
7198         c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7199         c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7200         c.iqid = cpu_to_be16(iqid);
7201         c.fl0id = cpu_to_be16(fl0id);
7202         c.fl1id = cpu_to_be16(fl1id);
7203         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7204 }
7205
7206 /**
7207  *      t4_iq_free - free an ingress queue and its FLs
7208  *      @adap: the adapter
7209  *      @mbox: mailbox to use for the FW command
7210  *      @pf: the PF owning the queues
7211  *      @vf: the VF owning the queues
7212  *      @iqtype: the ingress queue type
7213  *      @iqid: ingress queue id
7214  *      @fl0id: FL0 queue id or 0xffff if no attached FL0
7215  *      @fl1id: FL1 queue id or 0xffff if no attached FL1
7216  *
7217  *      Frees an ingress queue and its associated FLs, if any.
7218  */
7219 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7220                unsigned int vf, unsigned int iqtype, unsigned int iqid,
7221                unsigned int fl0id, unsigned int fl1id)
7222 {
7223         struct fw_iq_cmd c;
7224
7225         memset(&c, 0, sizeof(c));
7226         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7227                                   FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7228                                   FW_IQ_CMD_VFN_V(vf));
7229         c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7230         c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7231         c.iqid = cpu_to_be16(iqid);
7232         c.fl0id = cpu_to_be16(fl0id);
7233         c.fl1id = cpu_to_be16(fl1id);
7234         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7235 }
7236
7237 /**
7238  *      t4_eth_eq_free - free an Ethernet egress queue
7239  *      @adap: the adapter
7240  *      @mbox: mailbox to use for the FW command
7241  *      @pf: the PF owning the queue
7242  *      @vf: the VF owning the queue
7243  *      @eqid: egress queue id
7244  *
7245  *      Frees an Ethernet egress queue.
7246  */
7247 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7248                    unsigned int vf, unsigned int eqid)
7249 {
7250         struct fw_eq_eth_cmd c;
7251
7252         memset(&c, 0, sizeof(c));
7253         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7254                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7255                                   FW_EQ_ETH_CMD_PFN_V(pf) |
7256                                   FW_EQ_ETH_CMD_VFN_V(vf));
7257         c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7258         c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
7259         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7260 }
7261
7262 /**
7263  *      t4_ctrl_eq_free - free a control egress queue
7264  *      @adap: the adapter
7265  *      @mbox: mailbox to use for the FW command
7266  *      @pf: the PF owning the queue
7267  *      @vf: the VF owning the queue
7268  *      @eqid: egress queue id
7269  *
7270  *      Frees a control egress queue.
7271  */
7272 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7273                     unsigned int vf, unsigned int eqid)
7274 {
7275         struct fw_eq_ctrl_cmd c;
7276
7277         memset(&c, 0, sizeof(c));
7278         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7279                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7280                                   FW_EQ_CTRL_CMD_PFN_V(pf) |
7281                                   FW_EQ_CTRL_CMD_VFN_V(vf));
7282         c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7283         c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
7284         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7285 }
7286
7287 /**
7288  *      t4_ofld_eq_free - free an offload egress queue
7289  *      @adap: the adapter
7290  *      @mbox: mailbox to use for the FW command
7291  *      @pf: the PF owning the queue
7292  *      @vf: the VF owning the queue
7293  *      @eqid: egress queue id
7294  *
7295  *      Frees a control egress queue.
7296  */
7297 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7298                     unsigned int vf, unsigned int eqid)
7299 {
7300         struct fw_eq_ofld_cmd c;
7301
7302         memset(&c, 0, sizeof(c));
7303         c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
7304                                   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7305                                   FW_EQ_OFLD_CMD_PFN_V(pf) |
7306                                   FW_EQ_OFLD_CMD_VFN_V(vf));
7307         c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
7308         c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
7309         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7310 }
7311
7312 /**
7313  *      t4_link_down_rc_str - return a string for a Link Down Reason Code
7314  *      @adap: the adapter
7315  *      @link_down_rc: Link Down Reason Code
7316  *
7317  *      Returns a string representation of the Link Down Reason Code.
7318  */
7319 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
7320 {
7321         static const char * const reason[] = {
7322                 "Link Down",
7323                 "Remote Fault",
7324                 "Auto-negotiation Failure",
7325                 "Reserved",
7326                 "Insufficient Airflow",
7327                 "Unable To Determine Reason",
7328                 "No RX Signal Detected",
7329                 "Reserved",
7330         };
7331
7332         if (link_down_rc >= ARRAY_SIZE(reason))
7333                 return "Bad Reason Code";
7334
7335         return reason[link_down_rc];
7336 }
7337
7338 /**
7339  *      t4_handle_get_port_info - process a FW reply message
7340  *      @pi: the port info
7341  *      @rpl: start of the FW message
7342  *
7343  *      Processes a GET_PORT_INFO FW reply message.
7344  */
7345 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
7346 {
7347         const struct fw_port_cmd *p = (const void *)rpl;
7348         struct adapter *adap = pi->adapter;
7349
7350         /* link/module state change message */
7351         int speed = 0, fc = 0;
7352         struct link_config *lc;
7353         u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7354         int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
7355         u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
7356
7357         if (stat & FW_PORT_CMD_RXPAUSE_F)
7358                 fc |= PAUSE_RX;
7359         if (stat & FW_PORT_CMD_TXPAUSE_F)
7360                 fc |= PAUSE_TX;
7361         if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
7362                 speed = 100;
7363         else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
7364                 speed = 1000;
7365         else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
7366                 speed = 10000;
7367         else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
7368                 speed = 25000;
7369         else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
7370                 speed = 40000;
7371         else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
7372                 speed = 100000;
7373
7374         lc = &pi->link_cfg;
7375
7376         if (mod != pi->mod_type) {
7377                 pi->mod_type = mod;
7378                 t4_os_portmod_changed(adap, pi->port_id);
7379         }
7380         if (link_ok != lc->link_ok || speed != lc->speed ||
7381             fc != lc->fc) {     /* something changed */
7382                 if (!link_ok && lc->link_ok) {
7383                         unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
7384
7385                         lc->link_down_rc = rc;
7386                         dev_warn(adap->pdev_dev,
7387                                  "Port %d link down, reason: %s\n",
7388                                  pi->port_id, t4_link_down_rc_str(rc));
7389                 }
7390                 lc->link_ok = link_ok;
7391                 lc->speed = speed;
7392                 lc->fc = fc;
7393                 lc->supported = be16_to_cpu(p->u.info.pcap);
7394                 lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
7395
7396                 t4_os_link_changed(adap, pi->port_id, link_ok);
7397         }
7398 }
7399
7400 /**
7401  *      t4_update_port_info - retrieve and update port information if changed
7402  *      @pi: the port_info
7403  *
7404  *      We issue a Get Port Information Command to the Firmware and, if
7405  *      successful, we check to see if anything is different from what we
7406  *      last recorded and update things accordingly.
7407  */
7408 int t4_update_port_info(struct port_info *pi)
7409 {
7410         struct fw_port_cmd port_cmd;
7411         int ret;
7412
7413         memset(&port_cmd, 0, sizeof(port_cmd));
7414         port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
7415                                             FW_CMD_REQUEST_F | FW_CMD_READ_F |
7416                                             FW_PORT_CMD_PORTID_V(pi->port_id));
7417         port_cmd.action_to_len16 = cpu_to_be32(
7418                 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
7419                 FW_LEN16(port_cmd));
7420         ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
7421                          &port_cmd, sizeof(port_cmd), &port_cmd);
7422         if (ret)
7423                 return ret;
7424
7425         t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
7426         return 0;
7427 }
7428
7429 /**
7430  *      t4_handle_fw_rpl - process a FW reply message
7431  *      @adap: the adapter
7432  *      @rpl: start of the FW message
7433  *
7434  *      Processes a FW message, such as link state change messages.
7435  */
7436 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7437 {
7438         u8 opcode = *(const u8 *)rpl;
7439
7440         /* This might be a port command ... this simplifies the following
7441          * conditionals ...  We can get away with pre-dereferencing
7442          * action_to_len16 because it's in the first 16 bytes and all messages
7443          * will be at least that long.
7444          */
7445         const struct fw_port_cmd *p = (const void *)rpl;
7446         unsigned int action =
7447                 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
7448
7449         if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7450                 int i;
7451                 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
7452                 struct port_info *pi = NULL;
7453
7454                 for_each_port(adap, i) {
7455                         pi = adap2pinfo(adap, i);
7456                         if (pi->tx_chan == chan)
7457                                 break;
7458                 }
7459
7460                 t4_handle_get_port_info(pi, rpl);
7461         } else {
7462                 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
7463                 return -EINVAL;
7464         }
7465         return 0;
7466 }
7467
7468 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
7469 {
7470         u16 val;
7471
7472         if (pci_is_pcie(adapter->pdev)) {
7473                 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
7474                 p->speed = val & PCI_EXP_LNKSTA_CLS;
7475                 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7476         }
7477 }
7478
7479 /**
7480  *      init_link_config - initialize a link's SW state
7481  *      @lc: structure holding the link state
7482  *      @caps: link capabilities
7483  *
7484  *      Initializes the SW state maintained for each link, including the link's
7485  *      capabilities and default speed/flow-control/autonegotiation settings.
7486  */
7487 static void init_link_config(struct link_config *lc, unsigned int pcaps,
7488                              unsigned int acaps)
7489 {
7490         lc->supported = pcaps;
7491         lc->lp_advertising = 0;
7492         lc->requested_speed = 0;
7493         lc->speed = 0;
7494         lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7495         lc->auto_fec = 0;
7496
7497         /* For Forward Error Control, we default to whatever the Firmware
7498          * tells us the Link is currently advertising.
7499          */
7500         if (acaps & FW_PORT_CAP_FEC_RS)
7501                 lc->auto_fec |= FEC_RS;
7502         if (acaps & FW_PORT_CAP_FEC_BASER_RS)
7503                 lc->auto_fec |= FEC_BASER_RS;
7504         lc->requested_fec = FEC_AUTO;
7505         lc->fec = lc->auto_fec;
7506
7507         if (lc->supported & FW_PORT_CAP_ANEG) {
7508                 lc->advertising = lc->supported & ADVERT_MASK;
7509                 lc->autoneg = AUTONEG_ENABLE;
7510                 lc->requested_fc |= PAUSE_AUTONEG;
7511         } else {
7512                 lc->advertising = 0;
7513                 lc->autoneg = AUTONEG_DISABLE;
7514         }
7515 }
7516
7517 #define CIM_PF_NOACCESS 0xeeeeeeee
7518
7519 int t4_wait_dev_ready(void __iomem *regs)
7520 {
7521         u32 whoami;
7522
7523         whoami = readl(regs + PL_WHOAMI_A);
7524         if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
7525                 return 0;
7526
7527         msleep(500);
7528         whoami = readl(regs + PL_WHOAMI_A);
7529         return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
7530 }
7531
7532 struct flash_desc {
7533         u32 vendor_and_model_id;
7534         u32 size_mb;
7535 };
7536
7537 static int get_flash_params(struct adapter *adap)
7538 {
7539         /* Table for non-Numonix supported flash parts.  Numonix parts are left
7540          * to the preexisting code.  All flash parts have 64KB sectors.
7541          */
7542         static struct flash_desc supported_flash[] = {
7543                 { 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
7544         };
7545
7546         int ret;
7547         u32 info;
7548
7549         ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
7550         if (!ret)
7551                 ret = sf1_read(adap, 3, 0, 1, &info);
7552         t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
7553         if (ret)
7554                 return ret;
7555
7556         for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7557                 if (supported_flash[ret].vendor_and_model_id == info) {
7558                         adap->params.sf_size = supported_flash[ret].size_mb;
7559                         adap->params.sf_nsec =
7560                                 adap->params.sf_size / SF_SEC_SIZE;
7561                         return 0;
7562                 }
7563
7564         if ((info & 0xff) != 0x20)             /* not a Numonix flash */
7565                 return -EINVAL;
7566         info >>= 16;                           /* log2 of size */
7567         if (info >= 0x14 && info < 0x18)
7568                 adap->params.sf_nsec = 1 << (info - 16);
7569         else if (info == 0x18)
7570                 adap->params.sf_nsec = 64;
7571         else
7572                 return -EINVAL;
7573         adap->params.sf_size = 1 << info;
7574         adap->params.sf_fw_start =
7575                 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
7576
7577         if (adap->params.sf_size < FLASH_MIN_SIZE)
7578                 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
7579                          adap->params.sf_size, FLASH_MIN_SIZE);
7580         return 0;
7581 }
7582
7583 static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
7584 {
7585         u16 val;
7586         u32 pcie_cap;
7587
7588         pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
7589         if (pcie_cap) {
7590                 pci_read_config_word(adapter->pdev,
7591                                      pcie_cap + PCI_EXP_DEVCTL2, &val);
7592                 val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
7593                 val |= range;
7594                 pci_write_config_word(adapter->pdev,
7595                                       pcie_cap + PCI_EXP_DEVCTL2, val);
7596         }
7597 }
7598
7599 /**
7600  *      t4_prep_adapter - prepare SW and HW for operation
7601  *      @adapter: the adapter
7602  *      @reset: if true perform a HW reset
7603  *
7604  *      Initialize adapter SW state for the various HW modules, set initial
7605  *      values for some adapter tunables, take PHYs out of reset, and
7606  *      initialize the MDIO interface.
7607  */
7608 int t4_prep_adapter(struct adapter *adapter)
7609 {
7610         int ret, ver;
7611         uint16_t device_id;
7612         u32 pl_rev;
7613
7614         get_pci_mode(adapter, &adapter->params.pci);
7615         pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
7616
7617         ret = get_flash_params(adapter);
7618         if (ret < 0) {
7619                 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
7620                 return ret;
7621         }
7622
7623         /* Retrieve adapter's device ID
7624          */
7625         pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
7626         ver = device_id >> 12;
7627         adapter->params.chip = 0;
7628         switch (ver) {
7629         case CHELSIO_T4:
7630                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
7631                 adapter->params.arch.sge_fl_db = DBPRIO_F;
7632                 adapter->params.arch.mps_tcam_size =
7633                                  NUM_MPS_CLS_SRAM_L_INSTANCES;
7634                 adapter->params.arch.mps_rplc_size = 128;
7635                 adapter->params.arch.nchan = NCHAN;
7636                 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
7637                 adapter->params.arch.vfcount = 128;
7638                 /* Congestion map is for 4 channels so that
7639                  * MPS can have 4 priority per port.
7640                  */
7641                 adapter->params.arch.cng_ch_bits_log = 2;
7642                 break;
7643         case CHELSIO_T5:
7644                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
7645                 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
7646                 adapter->params.arch.mps_tcam_size =
7647                                  NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7648                 adapter->params.arch.mps_rplc_size = 128;
7649                 adapter->params.arch.nchan = NCHAN;
7650                 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
7651                 adapter->params.arch.vfcount = 128;
7652                 adapter->params.arch.cng_ch_bits_log = 2;
7653                 break;
7654         case CHELSIO_T6:
7655                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
7656                 adapter->params.arch.sge_fl_db = 0;
7657                 adapter->params.arch.mps_tcam_size =
7658                                  NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7659                 adapter->params.arch.mps_rplc_size = 256;
7660                 adapter->params.arch.nchan = 2;
7661                 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
7662                 adapter->params.arch.vfcount = 256;
7663                 /* Congestion map will be for 2 channels so that
7664                  * MPS can have 8 priority per port.
7665                  */
7666                 adapter->params.arch.cng_ch_bits_log = 3;
7667                 break;
7668         default:
7669                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
7670                         device_id);
7671                 return -EINVAL;
7672         }
7673
7674         adapter->params.cim_la_size = CIMLA_SIZE;
7675         init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7676
7677         /*
7678          * Default port for debugging in case we can't reach FW.
7679          */
7680         adapter->params.nports = 1;
7681         adapter->params.portvec = 1;
7682         adapter->params.vpd.cclk = 50000;
7683
7684         /* Set pci completion timeout value to 4 seconds. */
7685         set_pcie_completion_timeout(adapter, 0xd);
7686         return 0;
7687 }
7688
7689 /**
7690  *      t4_shutdown_adapter - shut down adapter, host & wire
7691  *      @adapter: the adapter
7692  *
7693  *      Perform an emergency shutdown of the adapter and stop it from
7694  *      continuing any further communication on the ports or DMA to the
7695  *      host.  This is typically used when the adapter and/or firmware
7696  *      have crashed and we want to prevent any further accidental
7697  *      communication with the rest of the world.  This will also force
7698  *      the port Link Status to go down -- if register writes work --
7699  *      which should help our peers figure out that we're down.
7700  */
7701 int t4_shutdown_adapter(struct adapter *adapter)
7702 {
7703         int port;
7704
7705         t4_intr_disable(adapter);
7706         t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
7707         for_each_port(adapter, port) {
7708                 u32 a_port_cfg = is_t4(adapter->params.chip) ?
7709                                        PORT_REG(port, XGMAC_PORT_CFG_A) :
7710                                        T5_PORT_REG(port, MAC_PORT_CFG_A);
7711
7712                 t4_write_reg(adapter, a_port_cfg,
7713                              t4_read_reg(adapter, a_port_cfg)
7714                              & ~SIGNAL_DET_V(1));
7715         }
7716         t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
7717
7718         return 0;
7719 }
7720
7721 /**
7722  *      t4_bar2_sge_qregs - return BAR2 SGE Queue register information
7723  *      @adapter: the adapter
7724  *      @qid: the Queue ID
7725  *      @qtype: the Ingress or Egress type for @qid
7726  *      @user: true if this request is for a user mode queue
7727  *      @pbar2_qoffset: BAR2 Queue Offset
7728  *      @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
7729  *
7730  *      Returns the BAR2 SGE Queue Registers information associated with the
7731  *      indicated Absolute Queue ID.  These are passed back in return value
7732  *      pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
7733  *      and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
7734  *
7735  *      This may return an error which indicates that BAR2 SGE Queue
7736  *      registers aren't available.  If an error is not returned, then the
7737  *      following values are returned:
7738  *
7739  *        *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
7740  *        *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
7741  *
7742  *      If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
7743  *      require the "Inferred Queue ID" ability may be used.  E.g. the
7744  *      Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
7745  *      then these "Inferred Queue ID" register may not be used.
7746  */
7747 int t4_bar2_sge_qregs(struct adapter *adapter,
7748                       unsigned int qid,
7749                       enum t4_bar2_qtype qtype,
7750                       int user,
7751                       u64 *pbar2_qoffset,
7752                       unsigned int *pbar2_qid)
7753 {
7754         unsigned int page_shift, page_size, qpp_shift, qpp_mask;
7755         u64 bar2_page_offset, bar2_qoffset;
7756         unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
7757
7758         /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
7759         if (!user && is_t4(adapter->params.chip))
7760                 return -EINVAL;
7761
7762         /* Get our SGE Page Size parameters.
7763          */
7764         page_shift = adapter->params.sge.hps + 10;
7765         page_size = 1 << page_shift;
7766
7767         /* Get the right Queues per Page parameters for our Queue.
7768          */
7769         qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
7770                      ? adapter->params.sge.eq_qpp
7771                      : adapter->params.sge.iq_qpp);
7772         qpp_mask = (1 << qpp_shift) - 1;
7773
7774         /*  Calculate the basics of the BAR2 SGE Queue register area:
7775          *  o The BAR2 page the Queue registers will be in.
7776          *  o The BAR2 Queue ID.
7777          *  o The BAR2 Queue ID Offset into the BAR2 page.
7778          */
7779         bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
7780         bar2_qid = qid & qpp_mask;
7781         bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
7782
7783         /* If the BAR2 Queue ID Offset is less than the Page Size, then the
7784          * hardware will infer the Absolute Queue ID simply from the writes to
7785          * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
7786          * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
7787          * write to the first BAR2 SGE Queue Area within the BAR2 Page with
7788          * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
7789          * from the BAR2 Page and BAR2 Queue ID.
7790          *
7791          * One important censequence of this is that some BAR2 SGE registers
7792          * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
7793          * there.  But other registers synthesize the SGE Queue ID purely
7794          * from the writes to the registers -- the Write Combined Doorbell
7795          * Buffer is a good example.  These BAR2 SGE Registers are only
7796          * available for those BAR2 SGE Register areas where the SGE Absolute
7797          * Queue ID can be inferred from simple writes.
7798          */
7799         bar2_qoffset = bar2_page_offset;
7800         bar2_qinferred = (bar2_qid_offset < page_size);
7801         if (bar2_qinferred) {
7802                 bar2_qoffset += bar2_qid_offset;
7803                 bar2_qid = 0;
7804         }
7805
7806         *pbar2_qoffset = bar2_qoffset;
7807         *pbar2_qid = bar2_qid;
7808         return 0;
7809 }
7810
7811 /**
7812  *      t4_init_devlog_params - initialize adapter->params.devlog
7813  *      @adap: the adapter
7814  *
7815  *      Initialize various fields of the adapter's Firmware Device Log
7816  *      Parameters structure.
7817  */
7818 int t4_init_devlog_params(struct adapter *adap)
7819 {
7820         struct devlog_params *dparams = &adap->params.devlog;
7821         u32 pf_dparams;
7822         unsigned int devlog_meminfo;
7823         struct fw_devlog_cmd devlog_cmd;
7824         int ret;
7825
7826         /* If we're dealing with newer firmware, the Device Log Paramerters
7827          * are stored in a designated register which allows us to access the
7828          * Device Log even if we can't talk to the firmware.
7829          */
7830         pf_dparams =
7831                 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
7832         if (pf_dparams) {
7833                 unsigned int nentries, nentries128;
7834
7835                 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
7836                 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
7837
7838                 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
7839                 nentries = (nentries128 + 1) * 128;
7840                 dparams->size = nentries * sizeof(struct fw_devlog_e);
7841
7842                 return 0;
7843         }
7844
7845         /* Otherwise, ask the firmware for it's Device Log Parameters.
7846          */
7847         memset(&devlog_cmd, 0, sizeof(devlog_cmd));
7848         devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
7849                                              FW_CMD_REQUEST_F | FW_CMD_READ_F);
7850         devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7851         ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7852                          &devlog_cmd);
7853         if (ret)
7854                 return ret;
7855
7856         devlog_meminfo =
7857                 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7858         dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
7859         dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
7860         dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7861
7862         return 0;
7863 }
7864
7865 /**
7866  *      t4_init_sge_params - initialize adap->params.sge
7867  *      @adapter: the adapter
7868  *
7869  *      Initialize various fields of the adapter's SGE Parameters structure.
7870  */
7871 int t4_init_sge_params(struct adapter *adapter)
7872 {
7873         struct sge_params *sge_params = &adapter->params.sge;
7874         u32 hps, qpp;
7875         unsigned int s_hps, s_qpp;
7876
7877         /* Extract the SGE Page Size for our PF.
7878          */
7879         hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
7880         s_hps = (HOSTPAGESIZEPF0_S +
7881                  (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
7882         sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
7883
7884         /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
7885          */
7886         s_qpp = (QUEUESPERPAGEPF0_S +
7887                 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
7888         qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
7889         sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
7890         qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
7891         sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
7892
7893         return 0;
7894 }
7895
7896 /**
7897  *      t4_init_tp_params - initialize adap->params.tp
7898  *      @adap: the adapter
7899  *
7900  *      Initialize various fields of the adapter's TP Parameters structure.
7901  */
7902 int t4_init_tp_params(struct adapter *adap)
7903 {
7904         int chan;
7905         u32 v;
7906
7907         v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
7908         adap->params.tp.tre = TIMERRESOLUTION_G(v);
7909         adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
7910
7911         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7912         for (chan = 0; chan < NCHAN; chan++)
7913                 adap->params.tp.tx_modq[chan] = chan;
7914
7915         /* Cache the adapter's Compressed Filter Mode and global Incress
7916          * Configuration.
7917          */
7918         if (t4_use_ldst(adap)) {
7919                 t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
7920                                 TP_VLAN_PRI_MAP_A, 1);
7921                 t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
7922                                 TP_INGRESS_CONFIG_A, 1);
7923         } else {
7924                 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
7925                                  &adap->params.tp.vlan_pri_map, 1,
7926                                  TP_VLAN_PRI_MAP_A);
7927                 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
7928                                  &adap->params.tp.ingress_config, 1,
7929                                  TP_INGRESS_CONFIG_A);
7930         }
7931         /* For T6, cache the adapter's compressed error vector
7932          * and passing outer header info for encapsulated packets.
7933          */
7934         if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
7935                 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
7936                 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
7937         }
7938
7939         /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7940          * shift positions of several elements of the Compressed Filter Tuple
7941          * for this adapter which we need frequently ...
7942          */
7943         adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
7944         adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
7945         adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
7946         adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
7947                                                                PROTOCOL_F);
7948
7949         /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7950          * represents the presence of an Outer VLAN instead of a VNIC ID.
7951          */
7952         if ((adap->params.tp.ingress_config & VNIC_F) == 0)
7953                 adap->params.tp.vnic_shift = -1;
7954
7955         return 0;
7956 }
7957
7958 /**
7959  *      t4_filter_field_shift - calculate filter field shift
7960  *      @adap: the adapter
7961  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7962  *
7963  *      Return the shift position of a filter field within the Compressed
7964  *      Filter Tuple.  The filter field is specified via its selection bit
7965  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
7966  */
7967 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7968 {
7969         unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7970         unsigned int sel;
7971         int field_shift;
7972
7973         if ((filter_mode & filter_sel) == 0)
7974                 return -1;
7975
7976         for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
7977                 switch (filter_mode & sel) {
7978                 case FCOE_F:
7979                         field_shift += FT_FCOE_W;
7980                         break;
7981                 case PORT_F:
7982                         field_shift += FT_PORT_W;
7983                         break;
7984                 case VNIC_ID_F:
7985                         field_shift += FT_VNIC_ID_W;
7986                         break;
7987                 case VLAN_F:
7988                         field_shift += FT_VLAN_W;
7989                         break;
7990                 case TOS_F:
7991                         field_shift += FT_TOS_W;
7992                         break;
7993                 case PROTOCOL_F:
7994                         field_shift += FT_PROTOCOL_W;
7995                         break;
7996                 case ETHERTYPE_F:
7997                         field_shift += FT_ETHERTYPE_W;
7998                         break;
7999                 case MACMATCH_F:
8000                         field_shift += FT_MACMATCH_W;
8001                         break;
8002                 case MPSHITTYPE_F:
8003                         field_shift += FT_MPSHITTYPE_W;
8004                         break;
8005                 case FRAGMENTATION_F:
8006                         field_shift += FT_FRAGMENTATION_W;
8007                         break;
8008                 }
8009         }
8010         return field_shift;
8011 }
8012
8013 int t4_init_rss_mode(struct adapter *adap, int mbox)
8014 {
8015         int i, ret;
8016         struct fw_rss_vi_config_cmd rvc;
8017
8018         memset(&rvc, 0, sizeof(rvc));
8019
8020         for_each_port(adap, i) {
8021                 struct port_info *p = adap2pinfo(adap, i);
8022
8023                 rvc.op_to_viid =
8024                         cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
8025                                     FW_CMD_REQUEST_F | FW_CMD_READ_F |
8026                                     FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
8027                 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
8028                 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
8029                 if (ret)
8030                         return ret;
8031                 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
8032         }
8033         return 0;
8034 }
8035
8036 /**
8037  *      t4_init_portinfo - allocate a virtual interface amd initialize port_info
8038  *      @pi: the port_info
8039  *      @mbox: mailbox to use for the FW command
8040  *      @port: physical port associated with the VI
8041  *      @pf: the PF owning the VI
8042  *      @vf: the VF owning the VI
8043  *      @mac: the MAC address of the VI
8044  *
8045  *      Allocates a virtual interface for the given physical port.  If @mac is
8046  *      not %NULL it contains the MAC address of the VI as assigned by FW.
8047  *      @mac should be large enough to hold an Ethernet address.
8048  *      Returns < 0 on error.
8049  */
8050 int t4_init_portinfo(struct port_info *pi, int mbox,
8051                      int port, int pf, int vf, u8 mac[])
8052 {
8053         int ret;
8054         struct fw_port_cmd c;
8055         unsigned int rss_size;
8056
8057         memset(&c, 0, sizeof(c));
8058         c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8059                                      FW_CMD_REQUEST_F | FW_CMD_READ_F |
8060                                      FW_PORT_CMD_PORTID_V(port));
8061         c.action_to_len16 = cpu_to_be32(
8062                 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
8063                 FW_LEN16(c));
8064         ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
8065         if (ret)
8066                 return ret;
8067
8068         ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
8069         if (ret < 0)
8070                 return ret;
8071
8072         pi->viid = ret;
8073         pi->tx_chan = port;
8074         pi->lport = port;
8075         pi->rss_size = rss_size;
8076
8077         ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
8078         pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
8079                 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
8080         pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
8081         pi->mod_type = FW_PORT_MOD_TYPE_NA;
8082
8083         init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
8084                          be16_to_cpu(c.u.info.acap));
8085         return 0;
8086 }
8087
8088 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
8089 {
8090         u8 addr[6];
8091         int ret, i, j = 0;
8092
8093         for_each_port(adap, i) {
8094                 struct port_info *pi = adap2pinfo(adap, i);
8095
8096                 while ((adap->params.portvec & (1 << j)) == 0)
8097                         j++;
8098
8099                 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
8100                 if (ret)
8101                         return ret;
8102
8103                 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
8104                 j++;
8105         }
8106         return 0;
8107 }
8108
8109 /**
8110  *      t4_read_cimq_cfg - read CIM queue configuration
8111  *      @adap: the adapter
8112  *      @base: holds the queue base addresses in bytes
8113  *      @size: holds the queue sizes in bytes
8114  *      @thres: holds the queue full thresholds in bytes
8115  *
8116  *      Returns the current configuration of the CIM queues, starting with
8117  *      the IBQs, then the OBQs.
8118  */
8119 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8120 {
8121         unsigned int i, v;
8122         int cim_num_obq = is_t4(adap->params.chip) ?
8123                                 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
8124
8125         for (i = 0; i < CIM_NUM_IBQ; i++) {
8126                 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
8127                              QUENUMSELECT_V(i));
8128                 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
8129                 /* value is in 256-byte units */
8130                 *base++ = CIMQBASE_G(v) * 256;
8131                 *size++ = CIMQSIZE_G(v) * 256;
8132                 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
8133         }
8134         for (i = 0; i < cim_num_obq; i++) {
8135                 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
8136                              QUENUMSELECT_V(i));
8137                 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
8138                 /* value is in 256-byte units */
8139                 *base++ = CIMQBASE_G(v) * 256;
8140                 *size++ = CIMQSIZE_G(v) * 256;
8141         }
8142 }
8143
8144 /**
8145  *      t4_read_cim_ibq - read the contents of a CIM inbound queue
8146  *      @adap: the adapter
8147  *      @qid: the queue index
8148  *      @data: where to store the queue contents
8149  *      @n: capacity of @data in 32-bit words
8150  *
8151  *      Reads the contents of the selected CIM queue starting at address 0 up
8152  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8153  *      error and the number of 32-bit words actually read on success.
8154  */
8155 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8156 {
8157         int i, err, attempts;
8158         unsigned int addr;
8159         const unsigned int nwords = CIM_IBQ_SIZE * 4;
8160
8161         if (qid > 5 || (n & 3))
8162                 return -EINVAL;
8163
8164         addr = qid * nwords;
8165         if (n > nwords)
8166                 n = nwords;
8167
8168         /* It might take 3-10ms before the IBQ debug read access is allowed.
8169          * Wait for 1 Sec with a delay of 1 usec.
8170          */
8171         attempts = 1000000;
8172
8173         for (i = 0; i < n; i++, addr++) {
8174                 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
8175                              IBQDBGEN_F);
8176                 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
8177                                       attempts, 1);
8178                 if (err)
8179                         return err;
8180                 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
8181         }
8182         t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
8183         return i;
8184 }
8185
8186 /**
8187  *      t4_read_cim_obq - read the contents of a CIM outbound queue
8188  *      @adap: the adapter
8189  *      @qid: the queue index
8190  *      @data: where to store the queue contents
8191  *      @n: capacity of @data in 32-bit words
8192  *
8193  *      Reads the contents of the selected CIM queue starting at address 0 up
8194  *      to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8195  *      error and the number of 32-bit words actually read on success.
8196  */
8197 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8198 {
8199         int i, err;
8200         unsigned int addr, v, nwords;
8201         int cim_num_obq = is_t4(adap->params.chip) ?
8202                                 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
8203
8204         if ((qid > (cim_num_obq - 1)) || (n & 3))
8205                 return -EINVAL;
8206
8207         t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
8208                      QUENUMSELECT_V(qid));
8209         v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
8210
8211         addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
8212         nwords = CIMQSIZE_G(v) * 64;  /* same */
8213         if (n > nwords)
8214                 n = nwords;
8215
8216         for (i = 0; i < n; i++, addr++) {
8217                 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
8218                              OBQDBGEN_F);
8219                 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
8220                                       2, 1);
8221                 if (err)
8222                         return err;
8223                 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
8224         }
8225         t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
8226         return i;
8227 }
8228
8229 /**
8230  *      t4_cim_read - read a block from CIM internal address space
8231  *      @adap: the adapter
8232  *      @addr: the start address within the CIM address space
8233  *      @n: number of words to read
8234  *      @valp: where to store the result
8235  *
8236  *      Reads a block of 4-byte words from the CIM intenal address space.
8237  */
8238 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8239                 unsigned int *valp)
8240 {
8241         int ret = 0;
8242
8243         if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
8244                 return -EBUSY;
8245
8246         for ( ; !ret && n--; addr += 4) {
8247                 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
8248                 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
8249                                       0, 5, 2);
8250                 if (!ret)
8251                         *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
8252         }
8253         return ret;
8254 }
8255
8256 /**
8257  *      t4_cim_write - write a block into CIM internal address space
8258  *      @adap: the adapter
8259  *      @addr: the start address within the CIM address space
8260  *      @n: number of words to write
8261  *      @valp: set of values to write
8262  *
8263  *      Writes a block of 4-byte words into the CIM intenal address space.
8264  */
8265 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8266                  const unsigned int *valp)
8267 {
8268         int ret = 0;
8269
8270         if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
8271                 return -EBUSY;
8272
8273         for ( ; !ret && n--; addr += 4) {
8274                 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
8275                 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
8276                 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
8277                                       0, 5, 2);
8278         }
8279         return ret;
8280 }
8281
8282 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8283                          unsigned int val)
8284 {
8285         return t4_cim_write(adap, addr, 1, &val);
8286 }
8287
8288 /**
8289  *      t4_cim_read_la - read CIM LA capture buffer
8290  *      @adap: the adapter
8291  *      @la_buf: where to store the LA data
8292  *      @wrptr: the HW write pointer within the capture buffer
8293  *
8294  *      Reads the contents of the CIM LA buffer with the most recent entry at
8295  *      the end of the returned data and with the entry at @wrptr first.
8296  *      We try to leave the LA in the running state we find it in.
8297  */
8298 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8299 {
8300         int i, ret;
8301         unsigned int cfg, val, idx;
8302
8303         ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
8304         if (ret)
8305                 return ret;
8306
8307         if (cfg & UPDBGLAEN_F) {        /* LA is running, freeze it */
8308                 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
8309                 if (ret)
8310                         return ret;
8311         }
8312
8313         ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
8314         if (ret)
8315                 goto restart;
8316
8317         idx = UPDBGLAWRPTR_G(val);
8318         if (wrptr)
8319                 *wrptr = idx;
8320
8321         for (i = 0; i < adap->params.cim_la_size; i++) {
8322                 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
8323                                     UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
8324                 if (ret)
8325                         break;
8326                 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
8327                 if (ret)
8328                         break;
8329                 if (val & UPDBGLARDEN_F) {
8330                         ret = -ETIMEDOUT;
8331                         break;
8332                 }
8333                 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
8334                 if (ret)
8335                         break;
8336
8337                 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8338                  * identify the 32-bit portion of the full 312-bit data
8339                  */
8340                 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
8341                         idx = (idx & 0xff0) + 0x10;
8342                 else
8343                         idx++;
8344                 /* address can't exceed 0xfff */
8345                 idx &= UPDBGLARDPTR_M;
8346         }
8347 restart:
8348         if (cfg & UPDBGLAEN_F) {
8349                 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
8350                                       cfg & ~UPDBGLARDEN_F);
8351                 if (!ret)
8352                         ret = r;
8353         }
8354         return ret;
8355 }
8356
8357 /**
8358  *      t4_tp_read_la - read TP LA capture buffer
8359  *      @adap: the adapter
8360  *      @la_buf: where to store the LA data
8361  *      @wrptr: the HW write pointer within the capture buffer
8362  *
8363  *      Reads the contents of the TP LA buffer with the most recent entry at
8364  *      the end of the returned data and with the entry at @wrptr first.
8365  *      We leave the LA in the running state we find it in.
8366  */
8367 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8368 {
8369         bool last_incomplete;
8370         unsigned int i, cfg, val, idx;
8371
8372         cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
8373         if (cfg & DBGLAENABLE_F)                        /* freeze LA */
8374                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
8375                              adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
8376
8377         val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
8378         idx = DBGLAWPTR_G(val);
8379         last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
8380         if (last_incomplete)
8381                 idx = (idx + 1) & DBGLARPTR_M;
8382         if (wrptr)
8383                 *wrptr = idx;
8384
8385         val &= 0xffff;
8386         val &= ~DBGLARPTR_V(DBGLARPTR_M);
8387         val |= adap->params.tp.la_mask;
8388
8389         for (i = 0; i < TPLA_SIZE; i++) {
8390                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
8391                 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
8392                 idx = (idx + 1) & DBGLARPTR_M;
8393         }
8394
8395         /* Wipe out last entry if it isn't valid */
8396         if (last_incomplete)
8397                 la_buf[TPLA_SIZE - 1] = ~0ULL;
8398
8399         if (cfg & DBGLAENABLE_F)                    /* restore running state */
8400                 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
8401                              cfg | adap->params.tp.la_mask);
8402 }
8403
8404 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8405  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
8406  * state for more than the Warning Threshold then we'll issue a warning about
8407  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
8408  * appears to be hung every Warning Repeat second till the situation clears.
8409  * If the situation clears, we'll note that as well.
8410  */
8411 #define SGE_IDMA_WARN_THRESH 1
8412 #define SGE_IDMA_WARN_REPEAT 300
8413
8414 /**
8415  *      t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8416  *      @adapter: the adapter
8417  *      @idma: the adapter IDMA Monitor state
8418  *
8419  *      Initialize the state of an SGE Ingress DMA Monitor.
8420  */
8421 void t4_idma_monitor_init(struct adapter *adapter,
8422                           struct sge_idma_monitor_state *idma)
8423 {
8424         /* Initialize the state variables for detecting an SGE Ingress DMA
8425          * hang.  The SGE has internal counters which count up on each clock
8426          * tick whenever the SGE finds its Ingress DMA State Engines in the
8427          * same state they were on the previous clock tick.  The clock used is
8428          * the Core Clock so we have a limit on the maximum "time" they can
8429          * record; typically a very small number of seconds.  For instance,
8430          * with a 600MHz Core Clock, we can only count up to a bit more than
8431          * 7s.  So we'll synthesize a larger counter in order to not run the
8432          * risk of having the "timers" overflow and give us the flexibility to
8433          * maintain a Hung SGE State Machine of our own which operates across
8434          * a longer time frame.
8435          */
8436         idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8437         idma->idma_stalled[0] = 0;
8438         idma->idma_stalled[1] = 0;
8439 }
8440
8441 /**
8442  *      t4_idma_monitor - monitor SGE Ingress DMA state
8443  *      @adapter: the adapter
8444  *      @idma: the adapter IDMA Monitor state
8445  *      @hz: number of ticks/second
8446  *      @ticks: number of ticks since the last IDMA Monitor call
8447  */
8448 void t4_idma_monitor(struct adapter *adapter,
8449                      struct sge_idma_monitor_state *idma,
8450                      int hz, int ticks)
8451 {
8452         int i, idma_same_state_cnt[2];
8453
8454          /* Read the SGE Debug Ingress DMA Same State Count registers.  These
8455           * are counters inside the SGE which count up on each clock when the
8456           * SGE finds its Ingress DMA State Engines in the same states they
8457           * were in the previous clock.  The counters will peg out at
8458           * 0xffffffff without wrapping around so once they pass the 1s
8459           * threshold they'll stay above that till the IDMA state changes.
8460           */
8461         t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
8462         idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
8463         idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8464
8465         for (i = 0; i < 2; i++) {
8466                 u32 debug0, debug11;
8467
8468                 /* If the Ingress DMA Same State Counter ("timer") is less
8469                  * than 1s, then we can reset our synthesized Stall Timer and
8470                  * continue.  If we have previously emitted warnings about a
8471                  * potential stalled Ingress Queue, issue a note indicating
8472                  * that the Ingress Queue has resumed forward progress.
8473                  */
8474                 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8475                         if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
8476                                 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
8477                                          "resumed after %d seconds\n",
8478                                          i, idma->idma_qid[i],
8479                                          idma->idma_stalled[i] / hz);
8480                         idma->idma_stalled[i] = 0;
8481                         continue;
8482                 }
8483
8484                 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8485                  * domain.  The first time we get here it'll be because we
8486                  * passed the 1s Threshold; each additional time it'll be
8487                  * because the RX Timer Callback is being fired on its regular
8488                  * schedule.
8489                  *
8490                  * If the stall is below our Potential Hung Ingress Queue
8491                  * Warning Threshold, continue.
8492                  */
8493                 if (idma->idma_stalled[i] == 0) {
8494                         idma->idma_stalled[i] = hz;
8495                         idma->idma_warn[i] = 0;
8496                 } else {
8497                         idma->idma_stalled[i] += ticks;
8498                         idma->idma_warn[i] -= ticks;
8499                 }
8500
8501                 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
8502                         continue;
8503
8504                 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8505                  */
8506                 if (idma->idma_warn[i] > 0)
8507                         continue;
8508                 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
8509
8510                 /* Read and save the SGE IDMA State and Queue ID information.
8511                  * We do this every time in case it changes across time ...
8512                  * can't be too careful ...
8513                  */
8514                 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
8515                 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8516                 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8517
8518                 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
8519                 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8520                 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8521
8522                 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
8523                          "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8524                          i, idma->idma_qid[i], idma->idma_state[i],
8525                          idma->idma_stalled[i] / hz,
8526                          debug0, debug11);
8527                 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8528         }
8529 }
8530
8531 /**
8532  *      t4_set_vf_mac - Set MAC address for the specified VF
8533  *      @adapter: The adapter
8534  *      @vf: one of the VFs instantiated by the specified PF
8535  *      @naddr: the number of MAC addresses
8536  *      @addr: the MAC address(es) to be set to the specified VF
8537  */
8538 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
8539                       unsigned int naddr, u8 *addr)
8540 {
8541         struct fw_acl_mac_cmd cmd;
8542
8543         memset(&cmd, 0, sizeof(cmd));
8544         cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
8545                                     FW_CMD_REQUEST_F |
8546                                     FW_CMD_WRITE_F |
8547                                     FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
8548                                     FW_ACL_MAC_CMD_VFN_V(vf));
8549
8550         /* Note: Do not enable the ACL */
8551         cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
8552         cmd.nmac = naddr;
8553
8554         switch (adapter->pf) {
8555         case 3:
8556                 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
8557                 break;
8558         case 2:
8559                 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
8560                 break;
8561         case 1:
8562                 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
8563                 break;
8564         case 0:
8565                 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
8566                 break;
8567         }
8568
8569         return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
8570 }
8571
8572 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
8573                     int rateunit, int ratemode, int channel, int class,
8574                     int minrate, int maxrate, int weight, int pktsize)
8575 {
8576         struct fw_sched_cmd cmd;
8577
8578         memset(&cmd, 0, sizeof(cmd));
8579         cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
8580                                       FW_CMD_REQUEST_F |
8581                                       FW_CMD_WRITE_F);
8582         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
8583
8584         cmd.u.params.sc = FW_SCHED_SC_PARAMS;
8585         cmd.u.params.type = type;
8586         cmd.u.params.level = level;
8587         cmd.u.params.mode = mode;
8588         cmd.u.params.ch = channel;
8589         cmd.u.params.cl = class;
8590         cmd.u.params.unit = rateunit;
8591         cmd.u.params.rate = ratemode;
8592         cmd.u.params.min = cpu_to_be32(minrate);
8593         cmd.u.params.max = cpu_to_be32(maxrate);
8594         cmd.u.params.weight = cpu_to_be16(weight);
8595         cmd.u.params.pktsize = cpu_to_be16(pktsize);
8596
8597         return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
8598                                NULL, 1);
8599 }