1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/string.h>
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
25 #define CHIP_MCP_RESP_ITER_US 10
27 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 offsetof(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 offsetof(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
50 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
52 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
57 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
59 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
61 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
63 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
65 DP_VERBOSE(p_hwfn, QED_MSG_SP,
66 "port_addr = 0x%x, port_id 0x%02x\n",
67 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
70 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
72 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
75 if (!p_hwfn->mcp_info->public_base)
78 for (i = 0; i < length; i++) {
79 tmp = qed_rd(p_hwfn, p_ptt,
80 p_hwfn->mcp_info->mfw_mb_addr +
81 (i << 2) + sizeof(u32));
83 /* The MB data is actually BE; Need to force it to cpu */
84 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
85 be32_to_cpu((__force __be32)tmp);
89 int qed_mcp_free(struct qed_hwfn *p_hwfn)
91 if (p_hwfn->mcp_info) {
92 kfree(p_hwfn->mcp_info->mfw_mb_cur);
93 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
95 kfree(p_hwfn->mcp_info);
100 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
102 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
103 u32 drv_mb_offsize, mfw_mb_offsize;
104 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
106 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
107 if (!p_info->public_base)
110 p_info->public_base |= GRCBASE_MCP;
112 /* Calculate the driver and MFW mailbox address */
113 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
114 SECTION_OFFSIZE_ADDR(p_info->public_base,
116 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
117 DP_VERBOSE(p_hwfn, QED_MSG_SP,
118 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
119 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
121 /* Set the MFW MB address */
122 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
123 SECTION_OFFSIZE_ADDR(p_info->public_base,
125 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
126 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
128 /* Get the current driver mailbox sequence before sending
131 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
132 DRV_MSG_SEQ_NUMBER_MASK;
134 /* Get current FW pulse sequence */
135 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
138 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
143 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
145 struct qed_mcp_info *p_info;
148 /* Allocate mcp_info structure */
149 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
150 if (!p_hwfn->mcp_info)
152 p_info = p_hwfn->mcp_info;
154 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
155 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
156 /* Do not free mcp_info here, since public_base indicate that
157 * the MCP is not initialized
162 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
163 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
164 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
165 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
168 /* Initialize the MFW spinlock */
169 spin_lock_init(&p_info->lock);
174 qed_mcp_free(p_hwfn);
178 /* Locks the MFW mailbox of a PF to ensure a single access.
179 * The lock is achieved in most cases by holding a spinlock, causing other
180 * threads to wait till a previous access is done.
181 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
182 * access is achieved by setting a blocking flag, which will fail other
183 * competing contexts to send their mailboxes.
185 static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
187 spin_lock_bh(&p_hwfn->mcp_info->lock);
189 /* The spinlock shouldn't be acquired when the mailbox command is
190 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
191 * pending [UN]LOAD_REQ command of another PF together with a spinlock
192 * (i.e. interrupts are disabled) - can lead to a deadlock.
193 * It is assumed that for a single PF, no other mailbox commands can be
194 * sent from another context while sending LOAD_REQ, and that any
195 * parallel commands to UNLOAD_REQ can be cancelled.
197 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
198 p_hwfn->mcp_info->block_mb_sending = false;
200 if (p_hwfn->mcp_info->block_mb_sending) {
202 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
204 spin_unlock_bh(&p_hwfn->mcp_info->lock);
208 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
209 p_hwfn->mcp_info->block_mb_sending = true;
210 spin_unlock_bh(&p_hwfn->mcp_info->lock);
216 static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
218 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
219 spin_unlock_bh(&p_hwfn->mcp_info->lock);
222 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
224 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
225 u8 delay = CHIP_MCP_RESP_ITER_US;
226 u32 org_mcp_reset_seq, cnt = 0;
229 /* Ensure that only a single thread is accessing the mailbox at a
232 rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
236 /* Set drv command along with the updated sequence */
237 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
238 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
239 (DRV_MSG_CODE_MCP_RESET | seq));
242 /* Wait for MFW response */
244 /* Give the FW up to 500 second (50*1000*10usec) */
245 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
246 MISCS_REG_GENERIC_POR_0)) &&
247 (cnt++ < QED_MCP_RESET_RETRIES));
249 if (org_mcp_reset_seq !=
250 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
251 DP_VERBOSE(p_hwfn, QED_MSG_SP,
252 "MCP was reset after %d usec\n", cnt * delay);
254 DP_ERR(p_hwfn, "Failed to reset MCP\n");
258 qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
263 static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
264 struct qed_ptt *p_ptt,
270 u8 delay = CHIP_MCP_RESP_ITER_US;
271 u32 seq, cnt = 1, actual_mb_seq;
274 /* Get actual driver mailbox sequence */
275 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
276 DRV_MSG_SEQ_NUMBER_MASK;
278 /* Use MCP history register to check if MCP reset occurred between
281 if (p_hwfn->mcp_info->mcp_hist !=
282 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
283 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
284 qed_load_mcp_offsets(p_hwfn, p_ptt);
285 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
287 seq = ++p_hwfn->mcp_info->drv_mb_seq;
290 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
292 /* Set drv command along with the updated sequence */
293 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
295 DP_VERBOSE(p_hwfn, QED_MSG_SP,
296 "wrote command (%x) to MFW MB param 0x%08x\n",
300 /* Wait for MFW response */
302 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
304 /* Give the FW up to 5 second (500*10ms) */
305 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
306 (cnt++ < QED_DRV_MB_MAX_RETRIES));
308 DP_VERBOSE(p_hwfn, QED_MSG_SP,
309 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
310 cnt * delay, *o_mcp_resp, seq);
312 /* Is this a reply to our command? */
313 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
314 *o_mcp_resp &= FW_MSG_CODE_MASK;
315 /* Get the MCP param */
316 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
319 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
327 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
328 struct qed_ptt *p_ptt,
329 struct qed_mcp_mb_params *p_mb_params)
334 /* MCP not initialized */
335 if (!qed_mcp_is_init(p_hwfn)) {
336 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
340 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
341 offsetof(struct public_drv_mb, union_data);
343 /* Ensure that only a single thread is accessing the mailbox at a
346 rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
350 if (p_mb_params->p_data_src != NULL)
351 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
352 p_mb_params->p_data_src,
353 sizeof(*p_mb_params->p_data_src));
355 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
356 p_mb_params->param, &p_mb_params->mcp_resp,
357 &p_mb_params->mcp_param);
359 if (p_mb_params->p_data_dst != NULL)
360 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
362 sizeof(*p_mb_params->p_data_dst));
364 qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
369 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
370 struct qed_ptt *p_ptt,
376 struct qed_mcp_mb_params mb_params;
379 memset(&mb_params, 0, sizeof(mb_params));
381 mb_params.param = param;
382 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
386 *o_mcp_resp = mb_params.mcp_resp;
387 *o_mcp_param = mb_params.mcp_param;
392 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
393 struct qed_ptt *p_ptt,
397 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
399 struct qed_mcp_mb_params mb_params;
400 union drv_union_data union_data;
403 memset(&mb_params, 0, sizeof(mb_params));
405 mb_params.param = param;
406 mb_params.p_data_dst = &union_data;
407 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
411 *o_mcp_resp = mb_params.mcp_resp;
412 *o_mcp_param = mb_params.mcp_param;
414 *o_txn_size = *o_mcp_param;
415 memcpy(o_buf, &union_data.raw_data, *o_txn_size);
420 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
421 struct qed_ptt *p_ptt, u32 *p_load_code)
423 struct qed_dev *cdev = p_hwfn->cdev;
424 struct qed_mcp_mb_params mb_params;
425 union drv_union_data union_data;
428 memset(&mb_params, 0, sizeof(mb_params));
430 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
431 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
433 memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
434 mb_params.p_data_src = &union_data;
435 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
437 /* if mcp fails to respond we must abort */
439 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
443 *p_load_code = mb_params.mcp_resp;
445 /* If MFW refused (e.g. other port is in diagnostic mode) we
446 * must abort. This can happen in the following cases:
447 * - Other port is in diagnostic mode
448 * - Previously loaded function on the engine is not compliant with
450 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
453 if (!(*p_load_code) ||
454 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
455 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
456 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
457 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
464 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
465 struct qed_ptt *p_ptt)
467 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
469 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
470 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
471 QED_PATH_ID(p_hwfn));
472 u32 disabled_vfs[VF_MAX_STATIC / 32];
477 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
478 mfw_path_offsize, path_addr);
480 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
481 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
483 offsetof(struct public_path,
486 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
487 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
488 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
491 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
492 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
495 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
496 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
498 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
500 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
501 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
503 struct qed_mcp_mb_params mb_params;
504 union drv_union_data union_data;
508 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
509 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
510 "Acking VFs [%08x,...,%08x] - %08x\n",
511 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
513 memset(&mb_params, 0, sizeof(mb_params));
514 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
515 memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
516 mb_params.p_data_src = &union_data;
517 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
519 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
523 /* Clear the ACK bits */
524 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
525 qed_wr(p_hwfn, p_ptt,
527 offsetof(struct public_func, drv_ack_vf_disabled) +
533 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
534 struct qed_ptt *p_ptt)
536 u32 transceiver_state;
538 transceiver_state = qed_rd(p_hwfn, p_ptt,
539 p_hwfn->mcp_info->port_addr +
540 offsetof(struct public_port,
544 (NETIF_MSG_HW | QED_MSG_SP),
545 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
547 (u32)(p_hwfn->mcp_info->port_addr +
548 offsetof(struct public_port, transceiver_data)));
550 transceiver_state = GET_FIELD(transceiver_state,
551 ETH_TRANSCEIVER_STATE);
553 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
554 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
556 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
559 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
560 struct qed_ptt *p_ptt, bool b_reset)
562 struct qed_mcp_link_state *p_link;
566 p_link = &p_hwfn->mcp_info->link_output;
567 memset(p_link, 0, sizeof(*p_link));
569 status = qed_rd(p_hwfn, p_ptt,
570 p_hwfn->mcp_info->port_addr +
571 offsetof(struct public_port, link_status));
572 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
573 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
575 (u32)(p_hwfn->mcp_info->port_addr +
576 offsetof(struct public_port, link_status)));
578 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
579 "Resetting link indications\n");
583 if (p_hwfn->b_drv_link_init)
584 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
586 p_link->link_up = false;
588 p_link->full_duplex = true;
589 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
590 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
591 p_link->speed = 100000;
593 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
594 p_link->speed = 50000;
596 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
597 p_link->speed = 40000;
599 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
600 p_link->speed = 25000;
602 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
603 p_link->speed = 20000;
605 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
606 p_link->speed = 10000;
608 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
609 p_link->full_duplex = false;
611 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
612 p_link->speed = 1000;
618 if (p_link->link_up && p_link->speed)
619 p_link->line_speed = p_link->speed;
621 p_link->line_speed = 0;
623 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
624 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
626 /* Max bandwidth configuration */
627 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
629 /* Min bandwidth configuration */
630 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
631 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
633 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
634 p_link->an_complete = !!(status &
635 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
636 p_link->parallel_detection = !!(status &
637 LINK_STATUS_PARALLEL_DETECTION_USED);
638 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
640 p_link->partner_adv_speed |=
641 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
642 QED_LINK_PARTNER_SPEED_1G_FD : 0;
643 p_link->partner_adv_speed |=
644 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
645 QED_LINK_PARTNER_SPEED_1G_HD : 0;
646 p_link->partner_adv_speed |=
647 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
648 QED_LINK_PARTNER_SPEED_10G : 0;
649 p_link->partner_adv_speed |=
650 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
651 QED_LINK_PARTNER_SPEED_20G : 0;
652 p_link->partner_adv_speed |=
653 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
654 QED_LINK_PARTNER_SPEED_25G : 0;
655 p_link->partner_adv_speed |=
656 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
657 QED_LINK_PARTNER_SPEED_40G : 0;
658 p_link->partner_adv_speed |=
659 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
660 QED_LINK_PARTNER_SPEED_50G : 0;
661 p_link->partner_adv_speed |=
662 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
663 QED_LINK_PARTNER_SPEED_100G : 0;
665 p_link->partner_tx_flow_ctrl_en =
666 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
667 p_link->partner_rx_flow_ctrl_en =
668 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
670 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
671 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
672 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
674 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
675 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
677 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
678 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
681 p_link->partner_adv_pause = 0;
684 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
686 qed_link_update(p_hwfn);
689 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
691 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
692 struct qed_mcp_mb_params mb_params;
693 union drv_union_data union_data;
694 struct eth_phy_cfg *phy_cfg;
698 /* Set the shmem configuration according to params */
699 phy_cfg = &union_data.drv_phy_cfg;
700 memset(phy_cfg, 0, sizeof(*phy_cfg));
701 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
702 if (!params->speed.autoneg)
703 phy_cfg->speed = params->speed.forced_speed;
704 phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
705 phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
706 phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
707 phy_cfg->adv_speed = params->speed.advertised_speeds;
708 phy_cfg->loopback_mode = params->loopback_mode;
710 p_hwfn->b_drv_link_init = b_up;
713 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
714 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
718 phy_cfg->loopback_mode,
719 phy_cfg->feature_config_flags);
721 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
725 memset(&mb_params, 0, sizeof(mb_params));
727 mb_params.p_data_src = &union_data;
728 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
730 /* if mcp fails to respond we must abort */
732 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
736 /* Reset the link status if needed */
738 qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
743 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
744 struct qed_ptt *p_ptt,
745 enum MFW_DRV_MSG_TYPE type)
747 enum qed_mcp_protocol_type stats_type;
748 union qed_mcp_protocol_stats stats;
749 struct qed_mcp_mb_params mb_params;
750 union drv_union_data union_data;
754 case MFW_DRV_MSG_GET_LAN_STATS:
755 stats_type = QED_MCP_LAN_STATS;
756 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
758 case MFW_DRV_MSG_GET_FCOE_STATS:
759 stats_type = QED_MCP_FCOE_STATS;
760 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
762 case MFW_DRV_MSG_GET_ISCSI_STATS:
763 stats_type = QED_MCP_ISCSI_STATS;
764 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
766 case MFW_DRV_MSG_GET_RDMA_STATS:
767 stats_type = QED_MCP_RDMA_STATS;
768 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
771 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
775 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
777 memset(&mb_params, 0, sizeof(mb_params));
778 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
779 mb_params.param = hsi_param;
780 memcpy(&union_data, &stats, sizeof(stats));
781 mb_params.p_data_src = &union_data;
782 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
785 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
786 struct public_func *p_shmem_info)
788 struct qed_mcp_function_info *p_info;
790 p_info = &p_hwfn->mcp_info->func_info;
792 p_info->bandwidth_min = (p_shmem_info->config &
793 FUNC_MF_CFG_MIN_BW_MASK) >>
794 FUNC_MF_CFG_MIN_BW_SHIFT;
795 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
797 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
798 p_info->bandwidth_min);
799 p_info->bandwidth_min = 1;
802 p_info->bandwidth_max = (p_shmem_info->config &
803 FUNC_MF_CFG_MAX_BW_MASK) >>
804 FUNC_MF_CFG_MAX_BW_SHIFT;
805 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
807 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
808 p_info->bandwidth_max);
809 p_info->bandwidth_max = 100;
813 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
814 struct qed_ptt *p_ptt,
815 struct public_func *p_data, int pfid)
817 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
819 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
820 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
823 memset(p_data, 0, sizeof(*p_data));
825 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
826 for (i = 0; i < size / sizeof(u32); i++)
827 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
828 func_addr + (i << 2));
832 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
834 struct qed_mcp_function_info *p_info;
835 struct public_func shmem_info;
836 u32 resp = 0, param = 0;
838 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
840 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
842 p_info = &p_hwfn->mcp_info->func_info;
844 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
845 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
847 /* Acknowledge the MFW */
848 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
852 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
853 struct qed_ptt *p_ptt)
855 struct qed_mcp_info *info = p_hwfn->mcp_info;
860 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
862 /* Read Messages from MFW */
863 qed_mcp_read_mb(p_hwfn, p_ptt);
865 /* Compare current messages to old ones */
866 for (i = 0; i < info->mfw_mb_length; i++) {
867 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
872 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
873 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
874 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
877 case MFW_DRV_MSG_LINK_CHANGE:
878 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
880 case MFW_DRV_MSG_VF_DISABLED:
881 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
883 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
884 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
885 QED_DCBX_REMOTE_LLDP_MIB);
887 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
888 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
889 QED_DCBX_REMOTE_MIB);
891 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
892 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
893 QED_DCBX_OPERATIONAL_MIB);
895 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
896 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
898 case MFW_DRV_MSG_GET_LAN_STATS:
899 case MFW_DRV_MSG_GET_FCOE_STATS:
900 case MFW_DRV_MSG_GET_ISCSI_STATS:
901 case MFW_DRV_MSG_GET_RDMA_STATS:
902 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
904 case MFW_DRV_MSG_BW_UPDATE:
905 qed_mcp_update_bw(p_hwfn, p_ptt);
908 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
914 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
915 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
917 /* MFW expect answer in BE, so we force write in that format */
918 qed_wr(p_hwfn, p_ptt,
919 info->mfw_mb_addr + sizeof(u32) +
920 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
921 sizeof(u32) + i * sizeof(u32),
927 "Received an MFW message indication but no new message!\n");
931 /* Copy the new mfw messages into the shadow */
932 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
937 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
938 struct qed_ptt *p_ptt,
939 u32 *p_mfw_ver, u32 *p_running_bundle_id)
943 if (IS_VF(p_hwfn->cdev)) {
944 if (p_hwfn->vf_iov_info) {
945 struct pfvf_acquire_resp_tlv *p_resp;
947 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
948 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
953 "VF requested MFW version prior to ACQUIRE\n");
958 global_offsize = qed_rd(p_hwfn, p_ptt,
959 SECTION_OFFSIZE_ADDR(p_hwfn->
960 mcp_info->public_base,
963 qed_rd(p_hwfn, p_ptt,
964 SECTION_ADDR(global_offsize,
965 0) + offsetof(struct public_global, mfw_ver));
967 if (p_running_bundle_id != NULL) {
968 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
969 SECTION_ADDR(global_offsize, 0) +
970 offsetof(struct public_global,
977 int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
979 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
980 struct qed_ptt *p_ptt;
985 if (!qed_mcp_is_init(p_hwfn)) {
986 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
990 *p_media_type = MEDIA_UNSPECIFIED;
992 p_ptt = qed_ptt_acquire(p_hwfn);
996 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
997 offsetof(struct public_port, media_type));
999 qed_ptt_release(p_hwfn, p_ptt);
1005 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1006 struct public_func *p_info,
1007 enum qed_pci_personality *p_proto)
1011 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1012 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1013 if (test_bit(QED_DEV_CAP_ROCE,
1014 &p_hwfn->hw_info.device_capabilities))
1015 *p_proto = QED_PCI_ETH_ROCE;
1017 *p_proto = QED_PCI_ETH;
1019 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1020 *p_proto = QED_PCI_ISCSI;
1022 case FUNC_MF_CFG_PROTOCOL_ROCE:
1023 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1033 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1034 struct qed_ptt *p_ptt)
1036 struct qed_mcp_function_info *info;
1037 struct public_func shmem_info;
1039 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1040 info = &p_hwfn->mcp_info->func_info;
1042 info->pause_on_host = (shmem_info.config &
1043 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1045 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1046 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1047 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1051 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1053 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1054 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1055 info->mac[1] = (u8)(shmem_info.mac_upper);
1056 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1057 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1058 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1059 info->mac[5] = (u8)(shmem_info.mac_lower);
1061 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1064 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1065 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1066 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1067 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1069 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1071 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
1072 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
1073 info->pause_on_host, info->protocol,
1074 info->bandwidth_min, info->bandwidth_max,
1075 info->mac[0], info->mac[1], info->mac[2],
1076 info->mac[3], info->mac[4], info->mac[5],
1077 info->wwn_port, info->wwn_node, info->ovlan);
1082 struct qed_mcp_link_params
1083 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1085 if (!p_hwfn || !p_hwfn->mcp_info)
1087 return &p_hwfn->mcp_info->link_input;
1090 struct qed_mcp_link_state
1091 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1093 if (!p_hwfn || !p_hwfn->mcp_info)
1095 return &p_hwfn->mcp_info->link_output;
1098 struct qed_mcp_link_capabilities
1099 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1101 if (!p_hwfn || !p_hwfn->mcp_info)
1103 return &p_hwfn->mcp_info->link_capabilities;
1106 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1108 u32 resp = 0, param = 0;
1111 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1112 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1114 /* Wait for the drain to complete before returning */
1120 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1121 struct qed_ptt *p_ptt, u32 *p_flash_size)
1125 if (IS_VF(p_hwfn->cdev))
1128 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1129 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1130 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1131 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1133 *p_flash_size = flash_size;
1138 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1139 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1141 u32 resp = 0, param = 0, rc_param = 0;
1144 /* Only Leader can configure MSIX, and need to take CMT into account */
1145 if (!IS_LEAD_HWFN(p_hwfn))
1147 num *= p_hwfn->cdev->num_hwfns;
1149 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1150 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1151 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1152 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1154 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1157 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1158 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1161 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1162 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1170 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1171 struct qed_ptt *p_ptt,
1172 struct qed_mcp_drv_version *p_ver)
1174 struct drv_version_stc *p_drv_version;
1175 struct qed_mcp_mb_params mb_params;
1176 union drv_union_data union_data;
1181 p_drv_version = &union_data.drv_version;
1182 p_drv_version->version = p_ver->version;
1184 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1185 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
1186 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1189 memset(&mb_params, 0, sizeof(mb_params));
1190 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1191 mb_params.p_data_src = &union_data;
1192 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1194 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1199 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1201 u32 resp = 0, param = 0;
1204 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1207 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1212 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1214 u32 value, cpu_mode;
1216 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1218 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1219 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1220 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1221 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1223 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
1226 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
1227 struct qed_ptt *p_ptt, enum qed_led_mode mode)
1229 u32 resp = 0, param = 0, drv_mb_param;
1233 case QED_LED_MODE_ON:
1234 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1236 case QED_LED_MODE_OFF:
1237 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1239 case QED_LED_MODE_RESTORE:
1240 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1243 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
1247 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1248 drv_mb_param, &resp, ¶m);
1253 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1254 struct qed_ptt *p_ptt, u32 mask_parities)
1256 u32 resp = 0, param = 0;
1259 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1260 mask_parities, &resp, ¶m);
1264 "MCP response failure for mask parities, aborting\n");
1265 } else if (resp != FW_MSG_CODE_OK) {
1267 "MCP did not acknowledge mask parity request. Old MFW?\n");
1274 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1276 u32 drv_mb_param = 0, rsp, param;
1279 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
1280 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1282 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1283 drv_mb_param, &rsp, ¶m);
1288 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1289 (param != DRV_MB_PARAM_BIST_RC_PASSED))
1295 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1297 u32 drv_mb_param, rsp, param;
1300 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
1301 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1303 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1304 drv_mb_param, &rsp, ¶m);
1309 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1310 (param != DRV_MB_PARAM_BIST_RC_PASSED))