2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
15 * qla2x00_mailbox_command
16 * Issue mailbox command and waits for completion.
19 * ha = adapter block pointer.
20 * mcp = driver internal mbx struct pointer.
23 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
26 * 0 : QLA_SUCCESS = cmd performed success
27 * 1 : QLA_FUNCTION_FAILED (error encountered)
28 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
34 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
37 unsigned long flags = 0;
38 device_reg_t __iomem *reg;
43 uint16_t __iomem *optr;
46 unsigned long wait_time;
47 struct qla_hw_data *ha = vha->hw;
48 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
50 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
52 if (ha->pdev->error_state > pci_channel_io_frozen) {
53 ql_log(ql_log_warn, vha, 0x1001,
54 "error_state is greater than pci_channel_io_frozen, "
56 return QLA_FUNCTION_TIMEOUT;
59 if (vha->device_flags & DFLG_DEV_FAILED) {
60 ql_log(ql_log_warn, vha, 0x1002,
61 "Device in failed state, exiting.\n");
62 return QLA_FUNCTION_TIMEOUT;
66 io_lock_on = base_vha->flags.init_done;
69 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
72 if (ha->flags.pci_channel_io_perm_failure) {
73 ql_log(ql_log_warn, vha, 0x1003,
74 "Perm failure on EEH timeout MBX, exiting.\n");
75 return QLA_FUNCTION_TIMEOUT;
78 if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
79 /* Setting Link-Down error */
80 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
81 ql_log(ql_log_warn, vha, 0x1004,
82 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
83 return QLA_FUNCTION_TIMEOUT;
87 * Wait for active mailbox commands to finish by waiting at most tov
88 * seconds. This is to serialize actual issuing of mailbox cmds during
91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
92 /* Timeout occurred. Return error. */
93 ql_log(ql_log_warn, vha, 0x1005,
94 "Cmd access timeout, cmd=0x%x, Exiting.\n",
96 return QLA_FUNCTION_TIMEOUT;
99 ha->flags.mbox_busy = 1;
100 /* Save mailbox command for debug */
103 ql_dbg(ql_dbg_mbx, vha, 0x1006,
104 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
106 spin_lock_irqsave(&ha->hardware_lock, flags);
108 /* Load mailbox registers. */
110 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
111 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
112 optr = (uint16_t __iomem *)®->isp24.mailbox0;
114 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
117 command = mcp->mb[0];
118 mboxes = mcp->out_mb;
120 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
121 if (IS_QLA2200(ha) && cnt == 8)
123 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
125 WRT_REG_WORD(optr, *iptr);
132 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
133 "Loaded MBX registers (displayed in bytes) =.\n");
134 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
135 (uint8_t *)mcp->mb, 16);
136 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
138 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
139 ((uint8_t *)mcp->mb + 0x10), 16);
140 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
142 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
143 ((uint8_t *)mcp->mb + 0x20), 8);
144 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
145 "I/O Address = %p.\n", optr);
146 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
148 /* Issue set host interrupt command to send cmd out. */
149 ha->flags.mbox_int = 0;
150 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
152 /* Unlock mbx registers and wait for interrupt */
153 ql_dbg(ql_dbg_mbx, vha, 0x100f,
154 "Going to unlock irq & waiting for interrupts. "
155 "jiffies=%lx.\n", jiffies);
157 /* Wait for mbx cmd completion until timeout */
159 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
160 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
162 if (IS_QLA82XX(ha)) {
163 if (RD_REG_DWORD(®->isp82.hint) &
164 HINT_MBX_INT_PENDING) {
165 spin_unlock_irqrestore(&ha->hardware_lock,
167 ha->flags.mbox_busy = 0;
168 ql_dbg(ql_dbg_mbx, vha, 0x1010,
169 "Pending mailbox timeout, exiting.\n");
170 rval = QLA_FUNCTION_TIMEOUT;
173 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
174 } else if (IS_FWI2_CAPABLE(ha))
175 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
177 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
178 spin_unlock_irqrestore(&ha->hardware_lock, flags);
180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
182 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
185 ql_dbg(ql_dbg_mbx, vha, 0x1011,
186 "Cmd=%x Polling Mode.\n", command);
188 if (IS_QLA82XX(ha)) {
189 if (RD_REG_DWORD(®->isp82.hint) &
190 HINT_MBX_INT_PENDING) {
191 spin_unlock_irqrestore(&ha->hardware_lock,
193 ha->flags.mbox_busy = 0;
194 ql_dbg(ql_dbg_mbx, vha, 0x1012,
195 "Pending mailbox timeout, exiting.\n");
196 rval = QLA_FUNCTION_TIMEOUT;
199 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
200 } else if (IS_FWI2_CAPABLE(ha))
201 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
203 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
206 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
207 while (!ha->flags.mbox_int) {
208 if (time_after(jiffies, wait_time))
211 /* Check for pending interrupts. */
212 qla2x00_poll(ha->rsp_q_map[0]);
214 if (!ha->flags.mbox_int &&
216 command == MBC_LOAD_RISC_RAM_EXTENDED))
219 ql_dbg(ql_dbg_mbx, vha, 0x1013,
221 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
224 /* Check whether we timed out */
225 if (ha->flags.mbox_int) {
228 ql_dbg(ql_dbg_mbx, vha, 0x1014,
229 "Cmd=%x completed.\n", command);
231 /* Got interrupt. Clear the flag. */
232 ha->flags.mbox_int = 0;
233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
235 if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
236 ha->flags.mbox_busy = 0;
237 /* Setting Link-Down error */
238 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
240 rval = QLA_FUNCTION_FAILED;
241 ql_log(ql_log_warn, vha, 0x1015,
242 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
246 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
247 rval = QLA_FUNCTION_FAILED;
249 /* Load return mailbox registers. */
251 iptr = (uint16_t *)&ha->mailbox_out[0];
253 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
266 if (IS_FWI2_CAPABLE(ha)) {
267 mb0 = RD_REG_WORD(®->isp24.mailbox0);
268 ictrl = RD_REG_DWORD(®->isp24.ictrl);
270 mb0 = RD_MAILBOX_REG(ha, ®->isp, 0);
271 ictrl = RD_REG_WORD(®->isp.ictrl);
273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
274 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
275 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279 * Attempt to capture a firmware dump for further analysis
280 * of the current firmware state
282 ha->isp_ops->fw_dump(vha, 0);
284 rval = QLA_FUNCTION_TIMEOUT;
287 ha->flags.mbox_busy = 0;
292 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
293 ql_dbg(ql_dbg_mbx, vha, 0x101a,
294 "Checking for additional resp interrupt.\n");
296 /* polling mode for non isp_abort commands. */
297 qla2x00_poll(ha->rsp_q_map[0]);
300 if (rval == QLA_FUNCTION_TIMEOUT &&
301 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
302 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
303 ha->flags.eeh_busy) {
304 /* not in dpc. schedule it for dpc to take over. */
305 ql_dbg(ql_dbg_mbx, vha, 0x101b,
306 "Timeout, schedule isp_abort_needed.\n");
308 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
309 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
310 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
311 if (IS_QLA82XX(ha)) {
312 ql_dbg(ql_dbg_mbx, vha, 0x112a,
313 "disabling pause transmit on port "
316 QLA82XX_CRB_NIU + 0x98,
317 CRB_NIU_XG_PAUSE_CTL_P0|
318 CRB_NIU_XG_PAUSE_CTL_P1);
320 ql_log(ql_log_info, base_vha, 0x101c,
321 "Mailbox cmd timeout occurred, cmd=0x%x, "
322 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
323 "abort.\n", command, mcp->mb[0],
325 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
326 qla2xxx_wake_dpc(vha);
328 } else if (!abort_active) {
329 /* call abort directly since we are in the DPC thread */
330 ql_dbg(ql_dbg_mbx, vha, 0x101d,
331 "Timeout, calling abort_isp.\n");
333 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
334 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
335 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
336 if (IS_QLA82XX(ha)) {
337 ql_dbg(ql_dbg_mbx, vha, 0x112b,
338 "disabling pause transmit on port "
341 QLA82XX_CRB_NIU + 0x98,
342 CRB_NIU_XG_PAUSE_CTL_P0|
343 CRB_NIU_XG_PAUSE_CTL_P1);
345 ql_log(ql_log_info, base_vha, 0x101e,
346 "Mailbox cmd timeout occurred, cmd=0x%x, "
347 "mb[0]=0x%x. Scheduling ISP abort ",
348 command, mcp->mb[0]);
349 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
350 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
351 /* Allow next mbx cmd to come in. */
352 complete(&ha->mbx_cmd_comp);
353 if (ha->isp_ops->abort_isp(vha)) {
354 /* Failed. retry later. */
355 set_bit(ISP_ABORT_NEEDED,
358 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
359 ql_dbg(ql_dbg_mbx, vha, 0x101f,
360 "Finished abort_isp.\n");
367 /* Allow next mbx cmd to come in. */
368 complete(&ha->mbx_cmd_comp);
372 ql_log(ql_log_warn, base_vha, 0x1020,
373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
376 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
383 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
384 uint32_t risc_code_size)
387 struct qla_hw_data *ha = vha->hw;
389 mbx_cmd_t *mcp = &mc;
391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
392 "Entered %s.\n", __func__);
394 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
395 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
396 mcp->mb[8] = MSW(risc_addr);
397 mcp->out_mb = MBX_8|MBX_0;
399 mcp->mb[0] = MBC_LOAD_RISC_RAM;
402 mcp->mb[1] = LSW(risc_addr);
403 mcp->mb[2] = MSW(req_dma);
404 mcp->mb[3] = LSW(req_dma);
405 mcp->mb[6] = MSW(MSD(req_dma));
406 mcp->mb[7] = LSW(MSD(req_dma));
407 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
408 if (IS_FWI2_CAPABLE(ha)) {
409 mcp->mb[4] = MSW(risc_code_size);
410 mcp->mb[5] = LSW(risc_code_size);
411 mcp->out_mb |= MBX_5|MBX_4;
413 mcp->mb[4] = LSW(risc_code_size);
414 mcp->out_mb |= MBX_4;
418 mcp->tov = MBX_TOV_SECONDS;
420 rval = qla2x00_mailbox_command(vha, mcp);
422 if (rval != QLA_SUCCESS) {
423 ql_dbg(ql_dbg_mbx, vha, 0x1023,
424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
427 "Done %s.\n", __func__);
433 #define EXTENDED_BB_CREDITS BIT_0
436 * Start adapter firmware.
439 * ha = adapter block pointer.
440 * TARGET_QUEUE_LOCK must be released.
441 * ADAPTER_STATE_LOCK must be released.
444 * qla2x00 local function return status code.
450 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
453 struct qla_hw_data *ha = vha->hw;
455 mbx_cmd_t *mcp = &mc;
457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
458 "Entered %s.\n", __func__);
460 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
463 if (IS_FWI2_CAPABLE(ha)) {
464 mcp->mb[1] = MSW(risc_addr);
465 mcp->mb[2] = LSW(risc_addr);
467 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
468 struct nvram_81xx *nv = ha->nvram;
469 mcp->mb[4] = (nv->enhanced_features &
470 EXTENDED_BB_CREDITS);
473 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
476 mcp->mb[1] = LSW(risc_addr);
477 mcp->out_mb |= MBX_1;
478 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
480 mcp->out_mb |= MBX_2;
484 mcp->tov = MBX_TOV_SECONDS;
486 rval = qla2x00_mailbox_command(vha, mcp);
488 if (rval != QLA_SUCCESS) {
489 ql_dbg(ql_dbg_mbx, vha, 0x1026,
490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
492 if (IS_FWI2_CAPABLE(ha)) {
493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
494 "Done exchanges=%x.\n", mcp->mb[1]);
496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
497 "Done %s.\n", __func__);
505 * qla2x00_get_fw_version
506 * Get firmware version.
509 * ha: adapter state pointer.
510 * major: pointer for major number.
511 * minor: pointer for minor number.
512 * subminor: pointer for subminor number.
515 * qla2x00 local function return status code.
521 qla2x00_get_fw_version(scsi_qla_host_t *vha)
525 mbx_cmd_t *mcp = &mc;
526 struct qla_hw_data *ha = vha->hw;
528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
529 "Entered %s.\n", __func__);
531 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
536 if (IS_FWI2_CAPABLE(ha))
537 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
539 mcp->tov = MBX_TOV_SECONDS;
540 rval = qla2x00_mailbox_command(vha, mcp);
541 if (rval != QLA_SUCCESS)
544 /* Return mailbox data. */
545 ha->fw_major_version = mcp->mb[1];
546 ha->fw_minor_version = mcp->mb[2];
547 ha->fw_subminor_version = mcp->mb[3];
548 ha->fw_attributes = mcp->mb[6];
549 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
550 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
552 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
553 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
554 ha->mpi_version[0] = mcp->mb[10] & 0xff;
555 ha->mpi_version[1] = mcp->mb[11] >> 8;
556 ha->mpi_version[2] = mcp->mb[11] & 0xff;
557 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
558 ha->phy_version[0] = mcp->mb[8] & 0xff;
559 ha->phy_version[1] = mcp->mb[9] >> 8;
560 ha->phy_version[2] = mcp->mb[9] & 0xff;
562 if (IS_FWI2_CAPABLE(ha)) {
563 ha->fw_attributes_h = mcp->mb[15];
564 ha->fw_attributes_ext[0] = mcp->mb[16];
565 ha->fw_attributes_ext[1] = mcp->mb[17];
566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
567 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
568 __func__, mcp->mb[15], mcp->mb[6]);
569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
570 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
571 __func__, mcp->mb[17], mcp->mb[16]);
575 if (rval != QLA_SUCCESS) {
577 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
581 "Done %s.\n", __func__);
587 * qla2x00_get_fw_options
588 * Set firmware options.
591 * ha = adapter block pointer.
592 * fwopt = pointer for firmware options.
595 * qla2x00 local function return status code.
601 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
605 mbx_cmd_t *mcp = &mc;
607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
608 "Entered %s.\n", __func__);
610 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
612 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
613 mcp->tov = MBX_TOV_SECONDS;
615 rval = qla2x00_mailbox_command(vha, mcp);
617 if (rval != QLA_SUCCESS) {
619 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
621 fwopts[0] = mcp->mb[0];
622 fwopts[1] = mcp->mb[1];
623 fwopts[2] = mcp->mb[2];
624 fwopts[3] = mcp->mb[3];
626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
627 "Done %s.\n", __func__);
635 * qla2x00_set_fw_options
636 * Set firmware options.
639 * ha = adapter block pointer.
640 * fwopt = pointer for firmware options.
643 * qla2x00 local function return status code.
649 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
653 mbx_cmd_t *mcp = &mc;
655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
656 "Entered %s.\n", __func__);
658 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
659 mcp->mb[1] = fwopts[1];
660 mcp->mb[2] = fwopts[2];
661 mcp->mb[3] = fwopts[3];
662 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
664 if (IS_FWI2_CAPABLE(vha->hw)) {
667 mcp->mb[10] = fwopts[10];
668 mcp->mb[11] = fwopts[11];
669 mcp->mb[12] = 0; /* Undocumented, but used */
670 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
672 mcp->tov = MBX_TOV_SECONDS;
674 rval = qla2x00_mailbox_command(vha, mcp);
676 fwopts[0] = mcp->mb[0];
678 if (rval != QLA_SUCCESS) {
680 ql_dbg(ql_dbg_mbx, vha, 0x1030,
681 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
685 "Done %s.\n", __func__);
692 * qla2x00_mbx_reg_test
693 * Mailbox register wrap test.
696 * ha = adapter block pointer.
697 * TARGET_QUEUE_LOCK must be released.
698 * ADAPTER_STATE_LOCK must be released.
701 * qla2x00 local function return status code.
707 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
711 mbx_cmd_t *mcp = &mc;
713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
714 "Entered %s.\n", __func__);
716 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
724 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
725 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
726 mcp->tov = MBX_TOV_SECONDS;
728 rval = qla2x00_mailbox_command(vha, mcp);
730 if (rval == QLA_SUCCESS) {
731 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
732 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
733 rval = QLA_FUNCTION_FAILED;
734 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
735 mcp->mb[7] != 0x2525)
736 rval = QLA_FUNCTION_FAILED;
739 if (rval != QLA_SUCCESS) {
741 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
745 "Done %s.\n", __func__);
752 * qla2x00_verify_checksum
753 * Verify firmware checksum.
756 * ha = adapter block pointer.
757 * TARGET_QUEUE_LOCK must be released.
758 * ADAPTER_STATE_LOCK must be released.
761 * qla2x00 local function return status code.
767 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
771 mbx_cmd_t *mcp = &mc;
773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
774 "Entered %s.\n", __func__);
776 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
779 if (IS_FWI2_CAPABLE(vha->hw)) {
780 mcp->mb[1] = MSW(risc_addr);
781 mcp->mb[2] = LSW(risc_addr);
782 mcp->out_mb |= MBX_2|MBX_1;
783 mcp->in_mb |= MBX_2|MBX_1;
785 mcp->mb[1] = LSW(risc_addr);
786 mcp->out_mb |= MBX_1;
790 mcp->tov = MBX_TOV_SECONDS;
792 rval = qla2x00_mailbox_command(vha, mcp);
794 if (rval != QLA_SUCCESS) {
795 ql_dbg(ql_dbg_mbx, vha, 0x1036,
796 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
797 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
800 "Done %s.\n", __func__);
808 * Issue IOCB using mailbox command
811 * ha = adapter state pointer.
812 * buffer = buffer pointer.
813 * phys_addr = physical address of buffer.
814 * size = size of buffer.
815 * TARGET_QUEUE_LOCK must be released.
816 * ADAPTER_STATE_LOCK must be released.
819 * qla2x00 local function return status code.
825 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
826 dma_addr_t phys_addr, size_t size, uint32_t tov)
830 mbx_cmd_t *mcp = &mc;
832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
833 "Entered %s.\n", __func__);
835 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
837 mcp->mb[2] = MSW(phys_addr);
838 mcp->mb[3] = LSW(phys_addr);
839 mcp->mb[6] = MSW(MSD(phys_addr));
840 mcp->mb[7] = LSW(MSD(phys_addr));
841 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
842 mcp->in_mb = MBX_2|MBX_0;
845 rval = qla2x00_mailbox_command(vha, mcp);
847 if (rval != QLA_SUCCESS) {
849 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
851 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
853 /* Mask reserved bits. */
854 sts_entry->entry_status &=
855 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
857 "Done %s.\n", __func__);
864 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
867 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
872 * qla2x00_abort_command
873 * Abort command aborts a specified IOCB.
876 * ha = adapter block pointer.
877 * sp = SB structure pointer.
880 * qla2x00 local function return status code.
886 qla2x00_abort_command(srb_t *sp)
888 unsigned long flags = 0;
892 mbx_cmd_t *mcp = &mc;
893 fc_port_t *fcport = sp->fcport;
894 scsi_qla_host_t *vha = fcport->vha;
895 struct qla_hw_data *ha = vha->hw;
896 struct req_que *req = vha->req;
897 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
900 "Entered %s.\n", __func__);
902 spin_lock_irqsave(&ha->hardware_lock, flags);
903 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
904 if (req->outstanding_cmds[handle] == sp)
907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
909 if (handle == req->num_outstanding_cmds) {
910 /* command not found */
911 return QLA_FUNCTION_FAILED;
914 mcp->mb[0] = MBC_ABORT_COMMAND;
915 if (HAS_EXTENDED_IDS(ha))
916 mcp->mb[1] = fcport->loop_id;
918 mcp->mb[1] = fcport->loop_id << 8;
919 mcp->mb[2] = (uint16_t)handle;
920 mcp->mb[3] = (uint16_t)(handle >> 16);
921 mcp->mb[6] = (uint16_t)cmd->device->lun;
922 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
926 rval = qla2x00_mailbox_command(vha, mcp);
928 if (rval != QLA_SUCCESS) {
929 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
932 "Done %s.\n", __func__);
939 qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
943 mbx_cmd_t *mcp = &mc;
944 scsi_qla_host_t *vha;
951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
952 "Entered %s.\n", __func__);
954 req = vha->hw->req_q_map[0];
956 mcp->mb[0] = MBC_ABORT_TARGET;
957 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
958 if (HAS_EXTENDED_IDS(vha->hw)) {
959 mcp->mb[1] = fcport->loop_id;
961 mcp->out_mb |= MBX_10;
963 mcp->mb[1] = fcport->loop_id << 8;
965 mcp->mb[2] = vha->hw->loop_reset_delay;
966 mcp->mb[9] = vha->vp_idx;
969 mcp->tov = MBX_TOV_SECONDS;
971 rval = qla2x00_mailbox_command(vha, mcp);
972 if (rval != QLA_SUCCESS) {
973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
974 "Failed=%x.\n", rval);
977 /* Issue marker IOCB. */
978 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
980 if (rval2 != QLA_SUCCESS) {
981 ql_dbg(ql_dbg_mbx, vha, 0x1040,
982 "Failed to issue marker IOCB (%x).\n", rval2);
984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
985 "Done %s.\n", __func__);
992 qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
996 mbx_cmd_t *mcp = &mc;
997 scsi_qla_host_t *vha;
1003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1004 "Entered %s.\n", __func__);
1006 req = vha->hw->req_q_map[0];
1008 mcp->mb[0] = MBC_LUN_RESET;
1009 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1010 if (HAS_EXTENDED_IDS(vha->hw))
1011 mcp->mb[1] = fcport->loop_id;
1013 mcp->mb[1] = fcport->loop_id << 8;
1016 mcp->mb[9] = vha->vp_idx;
1019 mcp->tov = MBX_TOV_SECONDS;
1021 rval = qla2x00_mailbox_command(vha, mcp);
1022 if (rval != QLA_SUCCESS) {
1023 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1026 /* Issue marker IOCB. */
1027 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1029 if (rval2 != QLA_SUCCESS) {
1030 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1031 "Failed to issue marker IOCB (%x).\n", rval2);
1033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1034 "Done %s.\n", __func__);
1041 * qla2x00_get_adapter_id
1042 * Get adapter ID and topology.
1045 * ha = adapter block pointer.
1046 * id = pointer for loop ID.
1047 * al_pa = pointer for AL_PA.
1048 * area = pointer for area.
1049 * domain = pointer for domain.
1050 * top = pointer for topology.
1051 * TARGET_QUEUE_LOCK must be released.
1052 * ADAPTER_STATE_LOCK must be released.
1055 * qla2x00 local function return status code.
1061 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1062 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1066 mbx_cmd_t *mcp = &mc;
1068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1069 "Entered %s.\n", __func__);
1071 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1072 mcp->mb[9] = vha->vp_idx;
1073 mcp->out_mb = MBX_9|MBX_0;
1074 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1075 if (IS_CNA_CAPABLE(vha->hw))
1076 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1077 mcp->tov = MBX_TOV_SECONDS;
1079 rval = qla2x00_mailbox_command(vha, mcp);
1080 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1081 rval = QLA_COMMAND_ERROR;
1082 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1083 rval = QLA_INVALID_COMMAND;
1087 *al_pa = LSB(mcp->mb[2]);
1088 *area = MSB(mcp->mb[2]);
1089 *domain = LSB(mcp->mb[3]);
1091 *sw_cap = mcp->mb[7];
1093 if (rval != QLA_SUCCESS) {
1095 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1098 "Done %s.\n", __func__);
1100 if (IS_CNA_CAPABLE(vha->hw)) {
1101 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1102 vha->fcoe_fcf_idx = mcp->mb[10];
1103 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1104 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1105 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1106 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1107 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1108 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1116 * qla2x00_get_retry_cnt
1117 * Get current firmware login retry count and delay.
1120 * ha = adapter block pointer.
1121 * retry_cnt = pointer to login retry count.
1122 * tov = pointer to login timeout value.
1125 * qla2x00 local function return status code.
1131 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1137 mbx_cmd_t *mcp = &mc;
1139 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1140 "Entered %s.\n", __func__);
1142 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1143 mcp->out_mb = MBX_0;
1144 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1145 mcp->tov = MBX_TOV_SECONDS;
1147 rval = qla2x00_mailbox_command(vha, mcp);
1149 if (rval != QLA_SUCCESS) {
1151 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1152 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1154 /* Convert returned data and check our values. */
1155 *r_a_tov = mcp->mb[3] / 2;
1156 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1157 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1158 /* Update to the larger values */
1159 *retry_cnt = (uint8_t)mcp->mb[1];
1163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1164 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1171 * qla2x00_init_firmware
1172 * Initialize adapter firmware.
1175 * ha = adapter block pointer.
1176 * dptr = Initialization control block pointer.
1177 * size = size of initialization control block.
1178 * TARGET_QUEUE_LOCK must be released.
1179 * ADAPTER_STATE_LOCK must be released.
1182 * qla2x00 local function return status code.
1188 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1192 mbx_cmd_t *mcp = &mc;
1193 struct qla_hw_data *ha = vha->hw;
1195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1196 "Entered %s.\n", __func__);
1198 if (IS_QLA82XX(ha) && ql2xdbwr)
1199 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1200 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1202 if (ha->flags.npiv_supported)
1203 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1205 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1208 mcp->mb[2] = MSW(ha->init_cb_dma);
1209 mcp->mb[3] = LSW(ha->init_cb_dma);
1210 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1211 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1212 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1213 if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
1215 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1216 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1217 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1218 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1219 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1220 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1222 /* 1 and 2 should normally be captured. */
1223 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1225 /* mb3 is additional info about the installed SFP. */
1226 mcp->in_mb |= MBX_3;
1227 mcp->buf_size = size;
1228 mcp->flags = MBX_DMA_OUT;
1229 mcp->tov = MBX_TOV_SECONDS;
1230 rval = qla2x00_mailbox_command(vha, mcp);
1232 if (rval != QLA_SUCCESS) {
1234 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1235 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1236 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1239 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1240 "Done %s.\n", __func__);
1247 * qla2x00_get_node_name_list
1248 * Issue get node name list mailbox command, kmalloc()
1249 * and return the resulting list. Caller must kfree() it!
1252 * ha = adapter state pointer.
1253 * out_data = resulting list
1254 * out_len = length of the resulting list
1257 * qla2x00 local function return status code.
1263 qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1265 struct qla_hw_data *ha = vha->hw;
1266 struct qla_port_24xx_data *list = NULL;
1269 dma_addr_t pmap_dma;
1275 dma_size = left * sizeof(*list);
1276 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1277 &pmap_dma, GFP_KERNEL);
1279 ql_log(ql_log_warn, vha, 0x113f,
1280 "%s(%ld): DMA Alloc failed of %ld\n",
1281 __func__, vha->host_no, dma_size);
1282 rval = QLA_MEMORY_ALLOC_FAILED;
1286 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1287 mc.mb[1] = BIT_1 | BIT_3;
1288 mc.mb[2] = MSW(pmap_dma);
1289 mc.mb[3] = LSW(pmap_dma);
1290 mc.mb[6] = MSW(MSD(pmap_dma));
1291 mc.mb[7] = LSW(MSD(pmap_dma));
1292 mc.mb[8] = dma_size;
1293 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1294 mc.in_mb = MBX_0|MBX_1;
1296 mc.flags = MBX_DMA_IN;
1298 rval = qla2x00_mailbox_command(vha, &mc);
1299 if (rval != QLA_SUCCESS) {
1300 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1301 (mc.mb[1] == 0xA)) {
1302 left += le16_to_cpu(mc.mb[2]) /
1303 sizeof(struct qla_port_24xx_data);
1311 list = kzalloc(dma_size, GFP_KERNEL);
1313 ql_log(ql_log_warn, vha, 0x1140,
1314 "%s(%ld): failed to allocate node names list "
1315 "structure.\n", __func__, vha->host_no);
1316 rval = QLA_MEMORY_ALLOC_FAILED;
1320 memcpy(list, pmap, dma_size);
1322 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1326 *out_len = dma_size;
1332 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1337 * qla2x00_get_port_database
1338 * Issue normal/enhanced get port database mailbox command
1339 * and copy device name as necessary.
1342 * ha = adapter state pointer.
1343 * dev = structure pointer.
1344 * opt = enhanced cmd option byte.
1347 * qla2x00 local function return status code.
1353 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1357 mbx_cmd_t *mcp = &mc;
1358 port_database_t *pd;
1359 struct port_database_24xx *pd24;
1361 struct qla_hw_data *ha = vha->hw;
1363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1364 "Entered %s.\n", __func__);
1367 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1369 ql_log(ql_log_warn, vha, 0x1050,
1370 "Failed to allocate port database structure.\n");
1371 return QLA_MEMORY_ALLOC_FAILED;
1373 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1375 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1376 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1377 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1378 mcp->mb[2] = MSW(pd_dma);
1379 mcp->mb[3] = LSW(pd_dma);
1380 mcp->mb[6] = MSW(MSD(pd_dma));
1381 mcp->mb[7] = LSW(MSD(pd_dma));
1382 mcp->mb[9] = vha->vp_idx;
1383 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1385 if (IS_FWI2_CAPABLE(ha)) {
1386 mcp->mb[1] = fcport->loop_id;
1388 mcp->out_mb |= MBX_10|MBX_1;
1389 mcp->in_mb |= MBX_1;
1390 } else if (HAS_EXTENDED_IDS(ha)) {
1391 mcp->mb[1] = fcport->loop_id;
1393 mcp->out_mb |= MBX_10|MBX_1;
1395 mcp->mb[1] = fcport->loop_id << 8 | opt;
1396 mcp->out_mb |= MBX_1;
1398 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1399 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1400 mcp->flags = MBX_DMA_IN;
1401 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1402 rval = qla2x00_mailbox_command(vha, mcp);
1403 if (rval != QLA_SUCCESS)
1406 if (IS_FWI2_CAPABLE(ha)) {
1408 pd24 = (struct port_database_24xx *) pd;
1410 /* Check for logged in state. */
1411 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1412 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1413 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1414 "Unable to verify login-state (%x/%x) for "
1415 "loop_id %x.\n", pd24->current_login_state,
1416 pd24->last_login_state, fcport->loop_id);
1417 rval = QLA_FUNCTION_FAILED;
1421 if (fcport->loop_id == FC_NO_LOOP_ID ||
1422 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1423 memcmp(fcport->port_name, pd24->port_name, 8))) {
1424 /* We lost the device mid way. */
1425 rval = QLA_NOT_LOGGED_IN;
1429 /* Names are little-endian. */
1430 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1431 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1433 /* Get port_id of device. */
1434 fcport->d_id.b.domain = pd24->port_id[0];
1435 fcport->d_id.b.area = pd24->port_id[1];
1436 fcport->d_id.b.al_pa = pd24->port_id[2];
1437 fcport->d_id.b.rsvd_1 = 0;
1439 /* If not target must be initiator or unknown type. */
1440 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1441 fcport->port_type = FCT_INITIATOR;
1443 fcport->port_type = FCT_TARGET;
1445 /* Passback COS information. */
1446 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1447 FC_COS_CLASS2 : FC_COS_CLASS3;
1449 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1450 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1454 /* Check for logged in state. */
1455 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1456 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1457 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1458 "Unable to verify login-state (%x/%x) - "
1459 "portid=%02x%02x%02x.\n", pd->master_state,
1460 pd->slave_state, fcport->d_id.b.domain,
1461 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1462 rval = QLA_FUNCTION_FAILED;
1466 if (fcport->loop_id == FC_NO_LOOP_ID ||
1467 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1468 memcmp(fcport->port_name, pd->port_name, 8))) {
1469 /* We lost the device mid way. */
1470 rval = QLA_NOT_LOGGED_IN;
1474 /* Names are little-endian. */
1475 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1476 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1478 /* Get port_id of device. */
1479 fcport->d_id.b.domain = pd->port_id[0];
1480 fcport->d_id.b.area = pd->port_id[3];
1481 fcport->d_id.b.al_pa = pd->port_id[2];
1482 fcport->d_id.b.rsvd_1 = 0;
1484 /* If not target must be initiator or unknown type. */
1485 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1486 fcport->port_type = FCT_INITIATOR;
1488 fcport->port_type = FCT_TARGET;
1490 /* Passback COS information. */
1491 fcport->supported_classes = (pd->options & BIT_4) ?
1492 FC_COS_CLASS2: FC_COS_CLASS3;
1496 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1498 if (rval != QLA_SUCCESS) {
1499 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1500 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1501 mcp->mb[0], mcp->mb[1]);
1503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1504 "Done %s.\n", __func__);
1511 * qla2x00_get_firmware_state
1512 * Get adapter firmware state.
1515 * ha = adapter block pointer.
1516 * dptr = pointer for firmware state.
1517 * TARGET_QUEUE_LOCK must be released.
1518 * ADAPTER_STATE_LOCK must be released.
1521 * qla2x00 local function return status code.
1527 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1531 mbx_cmd_t *mcp = &mc;
1533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1534 "Entered %s.\n", __func__);
1536 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1537 mcp->out_mb = MBX_0;
1538 if (IS_FWI2_CAPABLE(vha->hw))
1539 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1541 mcp->in_mb = MBX_1|MBX_0;
1542 mcp->tov = MBX_TOV_SECONDS;
1544 rval = qla2x00_mailbox_command(vha, mcp);
1546 /* Return firmware states. */
1547 states[0] = mcp->mb[1];
1548 if (IS_FWI2_CAPABLE(vha->hw)) {
1549 states[1] = mcp->mb[2];
1550 states[2] = mcp->mb[3];
1551 states[3] = mcp->mb[4];
1552 states[4] = mcp->mb[5];
1555 if (rval != QLA_SUCCESS) {
1557 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1561 "Done %s.\n", __func__);
1568 * qla2x00_get_port_name
1569 * Issue get port name mailbox command.
1570 * Returned name is in big endian format.
1573 * ha = adapter block pointer.
1574 * loop_id = loop ID of device.
1575 * name = pointer for name.
1576 * TARGET_QUEUE_LOCK must be released.
1577 * ADAPTER_STATE_LOCK must be released.
1580 * qla2x00 local function return status code.
1586 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1591 mbx_cmd_t *mcp = &mc;
1593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1594 "Entered %s.\n", __func__);
1596 mcp->mb[0] = MBC_GET_PORT_NAME;
1597 mcp->mb[9] = vha->vp_idx;
1598 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1599 if (HAS_EXTENDED_IDS(vha->hw)) {
1600 mcp->mb[1] = loop_id;
1602 mcp->out_mb |= MBX_10;
1604 mcp->mb[1] = loop_id << 8 | opt;
1607 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1608 mcp->tov = MBX_TOV_SECONDS;
1610 rval = qla2x00_mailbox_command(vha, mcp);
1612 if (rval != QLA_SUCCESS) {
1614 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1617 /* This function returns name in big endian. */
1618 name[0] = MSB(mcp->mb[2]);
1619 name[1] = LSB(mcp->mb[2]);
1620 name[2] = MSB(mcp->mb[3]);
1621 name[3] = LSB(mcp->mb[3]);
1622 name[4] = MSB(mcp->mb[6]);
1623 name[5] = LSB(mcp->mb[6]);
1624 name[6] = MSB(mcp->mb[7]);
1625 name[7] = LSB(mcp->mb[7]);
1628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1629 "Done %s.\n", __func__);
1636 * qla24xx_link_initialization
1637 * Issue link initialization mailbox command.
1640 * ha = adapter block pointer.
1641 * TARGET_QUEUE_LOCK must be released.
1642 * ADAPTER_STATE_LOCK must be released.
1645 * qla2x00 local function return status code.
1651 qla24xx_link_initialize(scsi_qla_host_t *vha)
1655 mbx_cmd_t *mcp = &mc;
1657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
1658 "Entered %s.\n", __func__);
1660 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
1661 return QLA_FUNCTION_FAILED;
1663 mcp->mb[0] = MBC_LINK_INITIALIZATION;
1664 mcp->mb[1] = BIT_6|BIT_4;
1667 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1669 mcp->tov = MBX_TOV_SECONDS;
1671 rval = qla2x00_mailbox_command(vha, mcp);
1673 if (rval != QLA_SUCCESS) {
1674 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
1676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
1677 "Done %s.\n", __func__);
1685 * Issue LIP reset mailbox command.
1688 * ha = adapter block pointer.
1689 * TARGET_QUEUE_LOCK must be released.
1690 * ADAPTER_STATE_LOCK must be released.
1693 * qla2x00 local function return status code.
1699 qla2x00_lip_reset(scsi_qla_host_t *vha)
1703 mbx_cmd_t *mcp = &mc;
1705 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1706 "Entered %s.\n", __func__);
1708 if (IS_CNA_CAPABLE(vha->hw)) {
1709 /* Logout across all FCFs. */
1710 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1713 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1714 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1715 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1718 mcp->mb[3] = vha->hw->loop_reset_delay;
1719 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1721 mcp->mb[0] = MBC_LIP_RESET;
1722 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1723 if (HAS_EXTENDED_IDS(vha->hw)) {
1724 mcp->mb[1] = 0x00ff;
1726 mcp->out_mb |= MBX_10;
1728 mcp->mb[1] = 0xff00;
1730 mcp->mb[2] = vha->hw->loop_reset_delay;
1734 mcp->tov = MBX_TOV_SECONDS;
1736 rval = qla2x00_mailbox_command(vha, mcp);
1738 if (rval != QLA_SUCCESS) {
1740 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1744 "Done %s.\n", __func__);
1755 * ha = adapter block pointer.
1756 * sns = pointer for command.
1757 * cmd_size = command size.
1758 * buf_size = response/command size.
1759 * TARGET_QUEUE_LOCK must be released.
1760 * ADAPTER_STATE_LOCK must be released.
1763 * qla2x00 local function return status code.
1769 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1770 uint16_t cmd_size, size_t buf_size)
1774 mbx_cmd_t *mcp = &mc;
1776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1777 "Entered %s.\n", __func__);
1779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1780 "Retry cnt=%d ratov=%d total tov=%d.\n",
1781 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1783 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1784 mcp->mb[1] = cmd_size;
1785 mcp->mb[2] = MSW(sns_phys_address);
1786 mcp->mb[3] = LSW(sns_phys_address);
1787 mcp->mb[6] = MSW(MSD(sns_phys_address));
1788 mcp->mb[7] = LSW(MSD(sns_phys_address));
1789 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1790 mcp->in_mb = MBX_0|MBX_1;
1791 mcp->buf_size = buf_size;
1792 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1793 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1794 rval = qla2x00_mailbox_command(vha, mcp);
1796 if (rval != QLA_SUCCESS) {
1798 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1799 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1800 rval, mcp->mb[0], mcp->mb[1]);
1803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1804 "Done %s.\n", __func__);
1811 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1812 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1816 struct logio_entry_24xx *lg;
1819 struct qla_hw_data *ha = vha->hw;
1820 struct req_que *req;
1821 struct rsp_que *rsp;
1823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1824 "Entered %s.\n", __func__);
1826 if (ha->flags.cpu_affinity_enabled)
1827 req = ha->req_q_map[0];
1832 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1834 ql_log(ql_log_warn, vha, 0x1062,
1835 "Failed to allocate login IOCB.\n");
1836 return QLA_MEMORY_ALLOC_FAILED;
1838 memset(lg, 0, sizeof(struct logio_entry_24xx));
1840 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1841 lg->entry_count = 1;
1842 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1843 lg->nport_handle = cpu_to_le16(loop_id);
1844 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1846 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1848 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1849 lg->port_id[0] = al_pa;
1850 lg->port_id[1] = area;
1851 lg->port_id[2] = domain;
1852 lg->vp_index = vha->vp_idx;
1853 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
1854 (ha->r_a_tov / 10 * 2) + 2);
1855 if (rval != QLA_SUCCESS) {
1856 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1857 "Failed to issue login IOCB (%x).\n", rval);
1858 } else if (lg->entry_status != 0) {
1859 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1860 "Failed to complete IOCB -- error status (%x).\n",
1862 rval = QLA_FUNCTION_FAILED;
1863 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1864 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1865 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1867 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1868 "Failed to complete IOCB -- completion status (%x) "
1869 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1873 case LSC_SCODE_PORTID_USED:
1874 mb[0] = MBS_PORT_ID_USED;
1875 mb[1] = LSW(iop[1]);
1877 case LSC_SCODE_NPORT_USED:
1878 mb[0] = MBS_LOOP_ID_USED;
1880 case LSC_SCODE_NOLINK:
1881 case LSC_SCODE_NOIOCB:
1882 case LSC_SCODE_NOXCB:
1883 case LSC_SCODE_CMD_FAILED:
1884 case LSC_SCODE_NOFABRIC:
1885 case LSC_SCODE_FW_NOT_READY:
1886 case LSC_SCODE_NOT_LOGGED_IN:
1887 case LSC_SCODE_NOPCB:
1888 case LSC_SCODE_ELS_REJECT:
1889 case LSC_SCODE_CMD_PARAM_ERR:
1890 case LSC_SCODE_NONPORT:
1891 case LSC_SCODE_LOGGED_IN:
1892 case LSC_SCODE_NOFLOGI_ACC:
1894 mb[0] = MBS_COMMAND_ERROR;
1898 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1899 "Done %s.\n", __func__);
1901 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1903 mb[0] = MBS_COMMAND_COMPLETE;
1905 if (iop[0] & BIT_4) {
1911 /* Passback COS information. */
1913 if (lg->io_parameter[7] || lg->io_parameter[8])
1914 mb[10] |= BIT_0; /* Class 2. */
1915 if (lg->io_parameter[9] || lg->io_parameter[10])
1916 mb[10] |= BIT_1; /* Class 3. */
1917 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
1918 mb[10] |= BIT_7; /* Confirmed Completion
1923 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1929 * qla2x00_login_fabric
1930 * Issue login fabric port mailbox command.
1933 * ha = adapter block pointer.
1934 * loop_id = device loop ID.
1935 * domain = device domain.
1936 * area = device area.
1937 * al_pa = device AL_PA.
1938 * status = pointer for return status.
1939 * opt = command options.
1940 * TARGET_QUEUE_LOCK must be released.
1941 * ADAPTER_STATE_LOCK must be released.
1944 * qla2x00 local function return status code.
1950 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1951 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1955 mbx_cmd_t *mcp = &mc;
1956 struct qla_hw_data *ha = vha->hw;
1958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
1959 "Entered %s.\n", __func__);
1961 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1962 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1963 if (HAS_EXTENDED_IDS(ha)) {
1964 mcp->mb[1] = loop_id;
1966 mcp->out_mb |= MBX_10;
1968 mcp->mb[1] = (loop_id << 8) | opt;
1970 mcp->mb[2] = domain;
1971 mcp->mb[3] = area << 8 | al_pa;
1973 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1974 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1976 rval = qla2x00_mailbox_command(vha, mcp);
1978 /* Return mailbox statuses. */
1985 /* COS retrieved from Get-Port-Database mailbox command. */
1989 if (rval != QLA_SUCCESS) {
1990 /* RLU tmp code: need to change main mailbox_command function to
1991 * return ok even when the mailbox completion value is not
1992 * SUCCESS. The caller needs to be responsible to interpret
1993 * the return values of this mailbox command if we're not
1994 * to change too much of the existing code.
1996 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
1997 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
1998 mcp->mb[0] == 0x4006)
2002 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2003 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2004 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2008 "Done %s.\n", __func__);
2015 * qla2x00_login_local_device
2016 * Issue login loop port mailbox command.
2019 * ha = adapter block pointer.
2020 * loop_id = device loop ID.
2021 * opt = command options.
2024 * Return status code.
2031 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2032 uint16_t *mb_ret, uint8_t opt)
2036 mbx_cmd_t *mcp = &mc;
2037 struct qla_hw_data *ha = vha->hw;
2039 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2040 "Entered %s.\n", __func__);
2042 if (IS_FWI2_CAPABLE(ha))
2043 return qla24xx_login_fabric(vha, fcport->loop_id,
2044 fcport->d_id.b.domain, fcport->d_id.b.area,
2045 fcport->d_id.b.al_pa, mb_ret, opt);
2047 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2048 if (HAS_EXTENDED_IDS(ha))
2049 mcp->mb[1] = fcport->loop_id;
2051 mcp->mb[1] = fcport->loop_id << 8;
2053 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2054 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2055 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2057 rval = qla2x00_mailbox_command(vha, mcp);
2059 /* Return mailbox statuses. */
2060 if (mb_ret != NULL) {
2061 mb_ret[0] = mcp->mb[0];
2062 mb_ret[1] = mcp->mb[1];
2063 mb_ret[6] = mcp->mb[6];
2064 mb_ret[7] = mcp->mb[7];
2067 if (rval != QLA_SUCCESS) {
2068 /* AV tmp code: need to change main mailbox_command function to
2069 * return ok even when the mailbox completion value is not
2070 * SUCCESS. The caller needs to be responsible to interpret
2071 * the return values of this mailbox command if we're not
2072 * to change too much of the existing code.
2074 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2077 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2078 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2079 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2083 "Done %s.\n", __func__);
2090 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2091 uint8_t area, uint8_t al_pa)
2094 struct logio_entry_24xx *lg;
2096 struct qla_hw_data *ha = vha->hw;
2097 struct req_que *req;
2098 struct rsp_que *rsp;
2100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2101 "Entered %s.\n", __func__);
2103 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2105 ql_log(ql_log_warn, vha, 0x106e,
2106 "Failed to allocate logout IOCB.\n");
2107 return QLA_MEMORY_ALLOC_FAILED;
2109 memset(lg, 0, sizeof(struct logio_entry_24xx));
2111 if (ql2xmaxqueues > 1)
2112 req = ha->req_q_map[0];
2116 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2117 lg->entry_count = 1;
2118 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2119 lg->nport_handle = cpu_to_le16(loop_id);
2121 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2123 lg->port_id[0] = al_pa;
2124 lg->port_id[1] = area;
2125 lg->port_id[2] = domain;
2126 lg->vp_index = vha->vp_idx;
2127 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2128 (ha->r_a_tov / 10 * 2) + 2);
2129 if (rval != QLA_SUCCESS) {
2130 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2131 "Failed to issue logout IOCB (%x).\n", rval);
2132 } else if (lg->entry_status != 0) {
2133 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2134 "Failed to complete IOCB -- error status (%x).\n",
2136 rval = QLA_FUNCTION_FAILED;
2137 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2138 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2139 "Failed to complete IOCB -- completion status (%x) "
2140 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2141 le32_to_cpu(lg->io_parameter[0]),
2142 le32_to_cpu(lg->io_parameter[1]));
2145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2146 "Done %s.\n", __func__);
2149 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2155 * qla2x00_fabric_logout
2156 * Issue logout fabric port mailbox command.
2159 * ha = adapter block pointer.
2160 * loop_id = device loop ID.
2161 * TARGET_QUEUE_LOCK must be released.
2162 * ADAPTER_STATE_LOCK must be released.
2165 * qla2x00 local function return status code.
2171 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2172 uint8_t area, uint8_t al_pa)
2176 mbx_cmd_t *mcp = &mc;
2178 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2179 "Entered %s.\n", __func__);
2181 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2182 mcp->out_mb = MBX_1|MBX_0;
2183 if (HAS_EXTENDED_IDS(vha->hw)) {
2184 mcp->mb[1] = loop_id;
2186 mcp->out_mb |= MBX_10;
2188 mcp->mb[1] = loop_id << 8;
2191 mcp->in_mb = MBX_1|MBX_0;
2192 mcp->tov = MBX_TOV_SECONDS;
2194 rval = qla2x00_mailbox_command(vha, mcp);
2196 if (rval != QLA_SUCCESS) {
2198 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2199 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2202 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2203 "Done %s.\n", __func__);
2210 * qla2x00_full_login_lip
2211 * Issue full login LIP mailbox command.
2214 * ha = adapter block pointer.
2215 * TARGET_QUEUE_LOCK must be released.
2216 * ADAPTER_STATE_LOCK must be released.
2219 * qla2x00 local function return status code.
2225 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2229 mbx_cmd_t *mcp = &mc;
2231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2232 "Entered %s.\n", __func__);
2234 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2235 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2238 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2240 mcp->tov = MBX_TOV_SECONDS;
2242 rval = qla2x00_mailbox_command(vha, mcp);
2244 if (rval != QLA_SUCCESS) {
2246 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2250 "Done %s.\n", __func__);
2257 * qla2x00_get_id_list
2260 * ha = adapter block pointer.
2263 * qla2x00 local function return status code.
2269 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2274 mbx_cmd_t *mcp = &mc;
2276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2277 "Entered %s.\n", __func__);
2279 if (id_list == NULL)
2280 return QLA_FUNCTION_FAILED;
2282 mcp->mb[0] = MBC_GET_ID_LIST;
2283 mcp->out_mb = MBX_0;
2284 if (IS_FWI2_CAPABLE(vha->hw)) {
2285 mcp->mb[2] = MSW(id_list_dma);
2286 mcp->mb[3] = LSW(id_list_dma);
2287 mcp->mb[6] = MSW(MSD(id_list_dma));
2288 mcp->mb[7] = LSW(MSD(id_list_dma));
2290 mcp->mb[9] = vha->vp_idx;
2291 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2293 mcp->mb[1] = MSW(id_list_dma);
2294 mcp->mb[2] = LSW(id_list_dma);
2295 mcp->mb[3] = MSW(MSD(id_list_dma));
2296 mcp->mb[6] = LSW(MSD(id_list_dma));
2297 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2299 mcp->in_mb = MBX_1|MBX_0;
2300 mcp->tov = MBX_TOV_SECONDS;
2302 rval = qla2x00_mailbox_command(vha, mcp);
2304 if (rval != QLA_SUCCESS) {
2306 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2308 *entries = mcp->mb[1];
2309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2310 "Done %s.\n", __func__);
2317 * qla2x00_get_resource_cnts
2318 * Get current firmware resource counts.
2321 * ha = adapter block pointer.
2324 * qla2x00 local function return status code.
2330 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2331 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2332 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2336 mbx_cmd_t *mcp = &mc;
2338 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2339 "Entered %s.\n", __func__);
2341 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2342 mcp->out_mb = MBX_0;
2343 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2344 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
2345 mcp->in_mb |= MBX_12;
2346 mcp->tov = MBX_TOV_SECONDS;
2348 rval = qla2x00_mailbox_command(vha, mcp);
2350 if (rval != QLA_SUCCESS) {
2352 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2353 "Failed mb[0]=%x.\n", mcp->mb[0]);
2355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2356 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2357 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2358 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2359 mcp->mb[11], mcp->mb[12]);
2362 *cur_xchg_cnt = mcp->mb[3];
2364 *orig_xchg_cnt = mcp->mb[6];
2366 *cur_iocb_cnt = mcp->mb[7];
2368 *orig_iocb_cnt = mcp->mb[10];
2369 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2370 *max_npiv_vports = mcp->mb[11];
2371 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
2372 *max_fcfs = mcp->mb[12];
2379 * qla2x00_get_fcal_position_map
2380 * Get FCAL (LILP) position map using mailbox command
2383 * ha = adapter state pointer.
2384 * pos_map = buffer pointer (can be NULL).
2387 * qla2x00 local function return status code.
2393 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2397 mbx_cmd_t *mcp = &mc;
2399 dma_addr_t pmap_dma;
2400 struct qla_hw_data *ha = vha->hw;
2402 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2403 "Entered %s.\n", __func__);
2405 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2407 ql_log(ql_log_warn, vha, 0x1080,
2408 "Memory alloc failed.\n");
2409 return QLA_MEMORY_ALLOC_FAILED;
2411 memset(pmap, 0, FCAL_MAP_SIZE);
2413 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2414 mcp->mb[2] = MSW(pmap_dma);
2415 mcp->mb[3] = LSW(pmap_dma);
2416 mcp->mb[6] = MSW(MSD(pmap_dma));
2417 mcp->mb[7] = LSW(MSD(pmap_dma));
2418 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2419 mcp->in_mb = MBX_1|MBX_0;
2420 mcp->buf_size = FCAL_MAP_SIZE;
2421 mcp->flags = MBX_DMA_IN;
2422 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2423 rval = qla2x00_mailbox_command(vha, mcp);
2425 if (rval == QLA_SUCCESS) {
2426 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2427 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2428 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2429 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2433 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2435 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2437 if (rval != QLA_SUCCESS) {
2438 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2441 "Done %s.\n", __func__);
2448 * qla2x00_get_link_status
2451 * ha = adapter block pointer.
2452 * loop_id = device loop ID.
2453 * ret_buf = pointer to link status return buffer.
2457 * BIT_0 = mem alloc error.
2458 * BIT_1 = mailbox error.
2461 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2462 struct link_statistics *stats, dma_addr_t stats_dma)
2466 mbx_cmd_t *mcp = &mc;
2467 uint32_t *siter, *diter, dwords;
2468 struct qla_hw_data *ha = vha->hw;
2470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2471 "Entered %s.\n", __func__);
2473 mcp->mb[0] = MBC_GET_LINK_STATUS;
2474 mcp->mb[2] = MSW(stats_dma);
2475 mcp->mb[3] = LSW(stats_dma);
2476 mcp->mb[6] = MSW(MSD(stats_dma));
2477 mcp->mb[7] = LSW(MSD(stats_dma));
2478 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2480 if (IS_FWI2_CAPABLE(ha)) {
2481 mcp->mb[1] = loop_id;
2484 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2485 mcp->in_mb |= MBX_1;
2486 } else if (HAS_EXTENDED_IDS(ha)) {
2487 mcp->mb[1] = loop_id;
2489 mcp->out_mb |= MBX_10|MBX_1;
2491 mcp->mb[1] = loop_id << 8;
2492 mcp->out_mb |= MBX_1;
2494 mcp->tov = MBX_TOV_SECONDS;
2495 mcp->flags = IOCTL_CMD;
2496 rval = qla2x00_mailbox_command(vha, mcp);
2498 if (rval == QLA_SUCCESS) {
2499 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2500 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2501 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2502 rval = QLA_FUNCTION_FAILED;
2504 /* Copy over data -- firmware data is LE. */
2505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2506 "Done %s.\n", __func__);
2507 dwords = offsetof(struct link_statistics, unused1) / 4;
2508 siter = diter = &stats->link_fail_cnt;
2510 *diter++ = le32_to_cpu(*siter++);
2514 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2521 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2522 dma_addr_t stats_dma)
2526 mbx_cmd_t *mcp = &mc;
2527 uint32_t *siter, *diter, dwords;
2529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2530 "Entered %s.\n", __func__);
2532 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2533 mcp->mb[2] = MSW(stats_dma);
2534 mcp->mb[3] = LSW(stats_dma);
2535 mcp->mb[6] = MSW(MSD(stats_dma));
2536 mcp->mb[7] = LSW(MSD(stats_dma));
2537 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2538 mcp->mb[9] = vha->vp_idx;
2540 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2541 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2542 mcp->tov = MBX_TOV_SECONDS;
2543 mcp->flags = IOCTL_CMD;
2544 rval = qla2x00_mailbox_command(vha, mcp);
2546 if (rval == QLA_SUCCESS) {
2547 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2548 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2549 "Failed mb[0]=%x.\n", mcp->mb[0]);
2550 rval = QLA_FUNCTION_FAILED;
2552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2553 "Done %s.\n", __func__);
2554 /* Copy over data -- firmware data is LE. */
2555 dwords = sizeof(struct link_statistics) / 4;
2556 siter = diter = &stats->link_fail_cnt;
2558 *diter++ = le32_to_cpu(*siter++);
2562 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2569 qla24xx_abort_command(srb_t *sp)
2572 unsigned long flags = 0;
2574 struct abort_entry_24xx *abt;
2577 fc_port_t *fcport = sp->fcport;
2578 struct scsi_qla_host *vha = fcport->vha;
2579 struct qla_hw_data *ha = vha->hw;
2580 struct req_que *req = vha->req;
2582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2583 "Entered %s.\n", __func__);
2585 spin_lock_irqsave(&ha->hardware_lock, flags);
2586 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2587 if (req->outstanding_cmds[handle] == sp)
2590 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2591 if (handle == req->num_outstanding_cmds) {
2592 /* Command not found. */
2593 return QLA_FUNCTION_FAILED;
2596 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2598 ql_log(ql_log_warn, vha, 0x108d,
2599 "Failed to allocate abort IOCB.\n");
2600 return QLA_MEMORY_ALLOC_FAILED;
2602 memset(abt, 0, sizeof(struct abort_entry_24xx));
2604 abt->entry_type = ABORT_IOCB_TYPE;
2605 abt->entry_count = 1;
2606 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2607 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2608 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2609 abt->port_id[0] = fcport->d_id.b.al_pa;
2610 abt->port_id[1] = fcport->d_id.b.area;
2611 abt->port_id[2] = fcport->d_id.b.domain;
2612 abt->vp_index = fcport->vha->vp_idx;
2614 abt->req_que_no = cpu_to_le16(req->id);
2616 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2617 if (rval != QLA_SUCCESS) {
2618 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2619 "Failed to issue IOCB (%x).\n", rval);
2620 } else if (abt->entry_status != 0) {
2621 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2622 "Failed to complete IOCB -- error status (%x).\n",
2624 rval = QLA_FUNCTION_FAILED;
2625 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2626 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2627 "Failed to complete IOCB -- completion status (%x).\n",
2628 le16_to_cpu(abt->nport_handle));
2629 rval = QLA_FUNCTION_FAILED;
2631 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2632 "Done %s.\n", __func__);
2635 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2640 struct tsk_mgmt_cmd {
2642 struct tsk_mgmt_entry tsk;
2643 struct sts_entry_24xx sts;
2648 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2649 unsigned int l, int tag)
2652 struct tsk_mgmt_cmd *tsk;
2653 struct sts_entry_24xx *sts;
2655 scsi_qla_host_t *vha;
2656 struct qla_hw_data *ha;
2657 struct req_que *req;
2658 struct rsp_que *rsp;
2664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2665 "Entered %s.\n", __func__);
2667 if (ha->flags.cpu_affinity_enabled)
2668 rsp = ha->rsp_q_map[tag + 1];
2671 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2673 ql_log(ql_log_warn, vha, 0x1093,
2674 "Failed to allocate task management IOCB.\n");
2675 return QLA_MEMORY_ALLOC_FAILED;
2677 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2679 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2680 tsk->p.tsk.entry_count = 1;
2681 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2682 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2683 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2684 tsk->p.tsk.control_flags = cpu_to_le32(type);
2685 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2686 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2687 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2688 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2689 if (type == TCF_LUN_RESET) {
2690 int_to_scsilun(l, &tsk->p.tsk.lun);
2691 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2692 sizeof(tsk->p.tsk.lun));
2696 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2697 if (rval != QLA_SUCCESS) {
2698 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2699 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2700 } else if (sts->entry_status != 0) {
2701 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2702 "Failed to complete IOCB -- error status (%x).\n",
2704 rval = QLA_FUNCTION_FAILED;
2705 } else if (sts->comp_status !=
2706 __constant_cpu_to_le16(CS_COMPLETE)) {
2707 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2708 "Failed to complete IOCB -- completion status (%x).\n",
2709 le16_to_cpu(sts->comp_status));
2710 rval = QLA_FUNCTION_FAILED;
2711 } else if (le16_to_cpu(sts->scsi_status) &
2712 SS_RESPONSE_INFO_LEN_VALID) {
2713 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2715 "Ignoring inconsistent data length -- not enough "
2716 "response info (%d).\n",
2717 le32_to_cpu(sts->rsp_data_len));
2718 } else if (sts->data[3]) {
2719 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2720 "Failed to complete IOCB -- response (%x).\n",
2722 rval = QLA_FUNCTION_FAILED;
2726 /* Issue marker IOCB. */
2727 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2728 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2729 if (rval2 != QLA_SUCCESS) {
2730 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2731 "Failed to issue marker IOCB (%x).\n", rval2);
2733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2734 "Done %s.\n", __func__);
2737 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2743 qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2745 struct qla_hw_data *ha = fcport->vha->hw;
2747 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2748 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2750 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2754 qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2756 struct qla_hw_data *ha = fcport->vha->hw;
2758 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2759 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2761 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2765 qla2x00_system_error(scsi_qla_host_t *vha)
2769 mbx_cmd_t *mcp = &mc;
2770 struct qla_hw_data *ha = vha->hw;
2772 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2773 return QLA_FUNCTION_FAILED;
2775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2776 "Entered %s.\n", __func__);
2778 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2779 mcp->out_mb = MBX_0;
2783 rval = qla2x00_mailbox_command(vha, mcp);
2785 if (rval != QLA_SUCCESS) {
2786 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2789 "Done %s.\n", __func__);
2796 * qla2x00_set_serdes_params() -
2802 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2803 uint16_t sw_em_2g, uint16_t sw_em_4g)
2807 mbx_cmd_t *mcp = &mc;
2809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
2810 "Entered %s.\n", __func__);
2812 mcp->mb[0] = MBC_SERDES_PARAMS;
2814 mcp->mb[2] = sw_em_1g | BIT_15;
2815 mcp->mb[3] = sw_em_2g | BIT_15;
2816 mcp->mb[4] = sw_em_4g | BIT_15;
2817 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2819 mcp->tov = MBX_TOV_SECONDS;
2821 rval = qla2x00_mailbox_command(vha, mcp);
2823 if (rval != QLA_SUCCESS) {
2825 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2826 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
2830 "Done %s.\n", __func__);
2837 qla2x00_stop_firmware(scsi_qla_host_t *vha)
2841 mbx_cmd_t *mcp = &mc;
2843 if (!IS_FWI2_CAPABLE(vha->hw))
2844 return QLA_FUNCTION_FAILED;
2846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
2847 "Entered %s.\n", __func__);
2849 mcp->mb[0] = MBC_STOP_FIRMWARE;
2851 mcp->out_mb = MBX_1|MBX_0;
2855 rval = qla2x00_mailbox_command(vha, mcp);
2857 if (rval != QLA_SUCCESS) {
2858 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2859 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2860 rval = QLA_INVALID_COMMAND;
2862 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
2863 "Done %s.\n", __func__);
2870 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2875 mbx_cmd_t *mcp = &mc;
2877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
2878 "Entered %s.\n", __func__);
2880 if (!IS_FWI2_CAPABLE(vha->hw))
2881 return QLA_FUNCTION_FAILED;
2883 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2884 return QLA_FUNCTION_FAILED;
2886 mcp->mb[0] = MBC_TRACE_CONTROL;
2887 mcp->mb[1] = TC_EFT_ENABLE;
2888 mcp->mb[2] = LSW(eft_dma);
2889 mcp->mb[3] = MSW(eft_dma);
2890 mcp->mb[4] = LSW(MSD(eft_dma));
2891 mcp->mb[5] = MSW(MSD(eft_dma));
2892 mcp->mb[6] = buffers;
2893 mcp->mb[7] = TC_AEN_DISABLE;
2894 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2895 mcp->in_mb = MBX_1|MBX_0;
2896 mcp->tov = MBX_TOV_SECONDS;
2898 rval = qla2x00_mailbox_command(vha, mcp);
2899 if (rval != QLA_SUCCESS) {
2900 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2901 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2902 rval, mcp->mb[0], mcp->mb[1]);
2904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
2905 "Done %s.\n", __func__);
2912 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2916 mbx_cmd_t *mcp = &mc;
2918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
2919 "Entered %s.\n", __func__);
2921 if (!IS_FWI2_CAPABLE(vha->hw))
2922 return QLA_FUNCTION_FAILED;
2924 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2925 return QLA_FUNCTION_FAILED;
2927 mcp->mb[0] = MBC_TRACE_CONTROL;
2928 mcp->mb[1] = TC_EFT_DISABLE;
2929 mcp->out_mb = MBX_1|MBX_0;
2930 mcp->in_mb = MBX_1|MBX_0;
2931 mcp->tov = MBX_TOV_SECONDS;
2933 rval = qla2x00_mailbox_command(vha, mcp);
2934 if (rval != QLA_SUCCESS) {
2935 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2936 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2937 rval, mcp->mb[0], mcp->mb[1]);
2939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
2940 "Done %s.\n", __func__);
2947 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2948 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2952 mbx_cmd_t *mcp = &mc;
2954 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
2955 "Entered %s.\n", __func__);
2957 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2958 !IS_QLA83XX(vha->hw))
2959 return QLA_FUNCTION_FAILED;
2961 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2962 return QLA_FUNCTION_FAILED;
2964 mcp->mb[0] = MBC_TRACE_CONTROL;
2965 mcp->mb[1] = TC_FCE_ENABLE;
2966 mcp->mb[2] = LSW(fce_dma);
2967 mcp->mb[3] = MSW(fce_dma);
2968 mcp->mb[4] = LSW(MSD(fce_dma));
2969 mcp->mb[5] = MSW(MSD(fce_dma));
2970 mcp->mb[6] = buffers;
2971 mcp->mb[7] = TC_AEN_DISABLE;
2973 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2974 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2975 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2977 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2978 mcp->tov = MBX_TOV_SECONDS;
2980 rval = qla2x00_mailbox_command(vha, mcp);
2981 if (rval != QLA_SUCCESS) {
2982 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2983 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2984 rval, mcp->mb[0], mcp->mb[1]);
2986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
2987 "Done %s.\n", __func__);
2990 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2999 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3003 mbx_cmd_t *mcp = &mc;
3005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3006 "Entered %s.\n", __func__);
3008 if (!IS_FWI2_CAPABLE(vha->hw))
3009 return QLA_FUNCTION_FAILED;
3011 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3012 return QLA_FUNCTION_FAILED;
3014 mcp->mb[0] = MBC_TRACE_CONTROL;
3015 mcp->mb[1] = TC_FCE_DISABLE;
3016 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3017 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3018 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3020 mcp->tov = MBX_TOV_SECONDS;
3022 rval = qla2x00_mailbox_command(vha, mcp);
3023 if (rval != QLA_SUCCESS) {
3024 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3025 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3026 rval, mcp->mb[0], mcp->mb[1]);
3028 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3029 "Done %s.\n", __func__);
3032 *wr = (uint64_t) mcp->mb[5] << 48 |
3033 (uint64_t) mcp->mb[4] << 32 |
3034 (uint64_t) mcp->mb[3] << 16 |
3035 (uint64_t) mcp->mb[2];
3037 *rd = (uint64_t) mcp->mb[9] << 48 |
3038 (uint64_t) mcp->mb[8] << 32 |
3039 (uint64_t) mcp->mb[7] << 16 |
3040 (uint64_t) mcp->mb[6];
3047 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3048 uint16_t *port_speed, uint16_t *mb)
3052 mbx_cmd_t *mcp = &mc;
3054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3055 "Entered %s.\n", __func__);
3057 if (!IS_IIDMA_CAPABLE(vha->hw))
3058 return QLA_FUNCTION_FAILED;
3060 mcp->mb[0] = MBC_PORT_PARAMS;
3061 mcp->mb[1] = loop_id;
3062 mcp->mb[2] = mcp->mb[3] = 0;
3063 mcp->mb[9] = vha->vp_idx;
3064 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3065 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3066 mcp->tov = MBX_TOV_SECONDS;
3068 rval = qla2x00_mailbox_command(vha, mcp);
3070 /* Return mailbox statuses. */
3077 if (rval != QLA_SUCCESS) {
3078 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3081 "Done %s.\n", __func__);
3083 *port_speed = mcp->mb[3];
3090 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3091 uint16_t port_speed, uint16_t *mb)
3095 mbx_cmd_t *mcp = &mc;
3097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3098 "Entered %s.\n", __func__);
3100 if (!IS_IIDMA_CAPABLE(vha->hw))
3101 return QLA_FUNCTION_FAILED;
3103 mcp->mb[0] = MBC_PORT_PARAMS;
3104 mcp->mb[1] = loop_id;
3106 if (IS_CNA_CAPABLE(vha->hw))
3107 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3109 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3110 mcp->mb[9] = vha->vp_idx;
3111 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3112 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3113 mcp->tov = MBX_TOV_SECONDS;
3115 rval = qla2x00_mailbox_command(vha, mcp);
3117 /* Return mailbox statuses. */
3124 if (rval != QLA_SUCCESS) {
3125 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3126 "Failed=%x.\n", rval);
3128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3129 "Done %s.\n", __func__);
3136 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3137 struct vp_rpt_id_entry_24xx *rptid_entry)
3140 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
3141 struct qla_hw_data *ha = vha->hw;
3142 scsi_qla_host_t *vp;
3143 unsigned long flags;
3146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3147 "Entered %s.\n", __func__);
3149 if (rptid_entry->entry_status != 0)
3152 if (rptid_entry->format == 0) {
3153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
3154 "Format 0 : Number of VPs setup %d, number of "
3155 "VPs acquired %d.\n",
3156 MSB(le16_to_cpu(rptid_entry->vp_count)),
3157 LSB(le16_to_cpu(rptid_entry->vp_count)));
3158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
3159 "Primary port id %02x%02x%02x.\n",
3160 rptid_entry->port_id[2], rptid_entry->port_id[1],
3161 rptid_entry->port_id[0]);
3162 } else if (rptid_entry->format == 1) {
3164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
3165 "Format 1: VP[%d] enabled - status %d - with "
3166 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
3167 rptid_entry->port_id[2], rptid_entry->port_id[1],
3168 rptid_entry->port_id[0]);
3171 if (vp_idx == 0 && (MSB(stat) != 1))
3174 if (MSB(stat) != 0 && MSB(stat) != 2) {
3175 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3176 "Could not acquire ID for VP[%d].\n", vp_idx);
3181 spin_lock_irqsave(&ha->vport_slock, flags);
3182 list_for_each_entry(vp, &ha->vp_list, list) {
3183 if (vp_idx == vp->vp_idx) {
3188 spin_unlock_irqrestore(&ha->vport_slock, flags);
3193 vp->d_id.b.domain = rptid_entry->port_id[2];
3194 vp->d_id.b.area = rptid_entry->port_id[1];
3195 vp->d_id.b.al_pa = rptid_entry->port_id[0];
3198 * Cannot configure here as we are still sitting on the
3199 * response queue. Handle it in dpc context.
3201 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3204 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3205 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3206 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3207 qla2xxx_wake_dpc(vha);
3212 * qla24xx_modify_vp_config
3213 * Change VP configuration for vha
3216 * vha = adapter block pointer.
3219 * qla2xxx local function return status code.
3225 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3228 struct vp_config_entry_24xx *vpmod;
3229 dma_addr_t vpmod_dma;
3230 struct qla_hw_data *ha = vha->hw;
3231 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3233 /* This can be called by the parent */
3235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3236 "Entered %s.\n", __func__);
3238 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3240 ql_log(ql_log_warn, vha, 0x10bc,
3241 "Failed to allocate modify VP IOCB.\n");
3242 return QLA_MEMORY_ALLOC_FAILED;
3245 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3246 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3247 vpmod->entry_count = 1;
3248 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3249 vpmod->vp_count = 1;
3250 vpmod->vp_index1 = vha->vp_idx;
3251 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3253 qlt_modify_vp_config(vha, vpmod);
3255 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3256 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3257 vpmod->entry_count = 1;
3259 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3260 if (rval != QLA_SUCCESS) {
3261 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3262 "Failed to issue VP config IOCB (%x).\n", rval);
3263 } else if (vpmod->comp_status != 0) {
3264 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3265 "Failed to complete IOCB -- error status (%x).\n",
3266 vpmod->comp_status);
3267 rval = QLA_FUNCTION_FAILED;
3268 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3269 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3270 "Failed to complete IOCB -- completion status (%x).\n",
3271 le16_to_cpu(vpmod->comp_status));
3272 rval = QLA_FUNCTION_FAILED;
3275 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3276 "Done %s.\n", __func__);
3277 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3279 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3285 * qla24xx_control_vp
3286 * Enable a virtual port for given host
3289 * ha = adapter block pointer.
3290 * vhba = virtual adapter (unused)
3291 * index = index number for enabled VP
3294 * qla2xxx local function return status code.
3300 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3304 struct vp_ctrl_entry_24xx *vce;
3306 struct qla_hw_data *ha = vha->hw;
3307 int vp_index = vha->vp_idx;
3308 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3311 "Entered %s enabling index %d.\n", __func__, vp_index);
3313 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3314 return QLA_PARAMETER_ERROR;
3316 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3318 ql_log(ql_log_warn, vha, 0x10c2,
3319 "Failed to allocate VP control IOCB.\n");
3320 return QLA_MEMORY_ALLOC_FAILED;
3322 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3324 vce->entry_type = VP_CTRL_IOCB_TYPE;
3325 vce->entry_count = 1;
3326 vce->command = cpu_to_le16(cmd);
3327 vce->vp_count = __constant_cpu_to_le16(1);
3329 /* index map in firmware starts with 1; decrement index
3330 * this is ok as we never use index 0
3332 map = (vp_index - 1) / 8;
3333 pos = (vp_index - 1) & 7;
3334 mutex_lock(&ha->vport_lock);
3335 vce->vp_idx_map[map] |= 1 << pos;
3336 mutex_unlock(&ha->vport_lock);
3338 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3339 if (rval != QLA_SUCCESS) {
3340 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3341 "Failed to issue VP control IOCB (%x).\n", rval);
3342 } else if (vce->entry_status != 0) {
3343 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3344 "Failed to complete IOCB -- error status (%x).\n",
3346 rval = QLA_FUNCTION_FAILED;
3347 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3348 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3349 "Failed to complet IOCB -- completion status (%x).\n",
3350 le16_to_cpu(vce->comp_status));
3351 rval = QLA_FUNCTION_FAILED;
3353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3354 "Done %s.\n", __func__);
3357 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3363 * qla2x00_send_change_request
3364 * Receive or disable RSCN request from fabric controller
3367 * ha = adapter block pointer
3368 * format = registration format:
3370 * 1 - Fabric detected registration
3371 * 2 - N_port detected registration
3372 * 3 - Full registration
3373 * FF - clear registration
3374 * vp_idx = Virtual port index
3377 * qla2x00 local function return status code.
3384 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3389 mbx_cmd_t *mcp = &mc;
3391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3392 "Entered %s.\n", __func__);
3394 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3395 mcp->mb[1] = format;
3396 mcp->mb[9] = vp_idx;
3397 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3398 mcp->in_mb = MBX_0|MBX_1;
3399 mcp->tov = MBX_TOV_SECONDS;
3401 rval = qla2x00_mailbox_command(vha, mcp);
3403 if (rval == QLA_SUCCESS) {
3404 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3414 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3419 mbx_cmd_t *mcp = &mc;
3421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3422 "Entered %s.\n", __func__);
3424 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3425 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3426 mcp->mb[8] = MSW(addr);
3427 mcp->out_mb = MBX_8|MBX_0;
3429 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3430 mcp->out_mb = MBX_0;
3432 mcp->mb[1] = LSW(addr);
3433 mcp->mb[2] = MSW(req_dma);
3434 mcp->mb[3] = LSW(req_dma);
3435 mcp->mb[6] = MSW(MSD(req_dma));
3436 mcp->mb[7] = LSW(MSD(req_dma));
3437 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3438 if (IS_FWI2_CAPABLE(vha->hw)) {
3439 mcp->mb[4] = MSW(size);
3440 mcp->mb[5] = LSW(size);
3441 mcp->out_mb |= MBX_5|MBX_4;
3443 mcp->mb[4] = LSW(size);
3444 mcp->out_mb |= MBX_4;
3448 mcp->tov = MBX_TOV_SECONDS;
3450 rval = qla2x00_mailbox_command(vha, mcp);
3452 if (rval != QLA_SUCCESS) {
3453 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3454 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3457 "Done %s.\n", __func__);
3462 /* 84XX Support **************************************************************/
3464 struct cs84xx_mgmt_cmd {
3466 struct verify_chip_entry_84xx req;
3467 struct verify_chip_rsp_84xx rsp;
3472 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3475 struct cs84xx_mgmt_cmd *mn;
3478 unsigned long flags;
3479 struct qla_hw_data *ha = vha->hw;
3481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3482 "Entered %s.\n", __func__);
3484 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3486 return QLA_MEMORY_ALLOC_FAILED;
3490 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3491 /* Diagnostic firmware? */
3492 /* options |= MENLO_DIAG_FW; */
3493 /* We update the firmware with only one data sequence. */
3494 options |= VCO_END_OF_DATA;
3498 memset(mn, 0, sizeof(*mn));
3499 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3500 mn->p.req.entry_count = 1;
3501 mn->p.req.options = cpu_to_le16(options);
3503 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3504 "Dump of Verify Request.\n");
3505 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3506 (uint8_t *)mn, sizeof(*mn));
3508 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3509 if (rval != QLA_SUCCESS) {
3510 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3511 "Failed to issue verify IOCB (%x).\n", rval);
3515 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3516 "Dump of Verify Response.\n");
3517 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3518 (uint8_t *)mn, sizeof(*mn));
3520 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3521 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3522 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3524 "cs=%x fc=%x.\n", status[0], status[1]);
3526 if (status[0] != CS_COMPLETE) {
3527 rval = QLA_FUNCTION_FAILED;
3528 if (!(options & VCO_DONT_UPDATE_FW)) {
3529 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3530 "Firmware update failed. Retrying "
3531 "without update firmware.\n");
3532 options |= VCO_DONT_UPDATE_FW;
3533 options &= ~VCO_FORCE_UPDATE;
3537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3538 "Firmware updated to %x.\n",
3539 le32_to_cpu(mn->p.rsp.fw_ver));
3541 /* NOTE: we only update OP firmware. */
3542 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3543 ha->cs84xx->op_fw_version =
3544 le32_to_cpu(mn->p.rsp.fw_ver);
3545 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3551 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3553 if (rval != QLA_SUCCESS) {
3554 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3555 "Failed=%x.\n", rval);
3557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3558 "Done %s.\n", __func__);
3565 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3568 unsigned long flags;
3570 mbx_cmd_t *mcp = &mc;
3571 struct device_reg_25xxmq __iomem *reg;
3572 struct qla_hw_data *ha = vha->hw;
3574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3575 "Entered %s.\n", __func__);
3577 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3578 mcp->mb[1] = req->options;
3579 mcp->mb[2] = MSW(LSD(req->dma));
3580 mcp->mb[3] = LSW(LSD(req->dma));
3581 mcp->mb[6] = MSW(MSD(req->dma));
3582 mcp->mb[7] = LSW(MSD(req->dma));
3583 mcp->mb[5] = req->length;
3585 mcp->mb[10] = req->rsp->id;
3586 mcp->mb[12] = req->qos;
3587 mcp->mb[11] = req->vp_idx;
3588 mcp->mb[13] = req->rid;
3592 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3593 QLA_QUE_PAGE * req->id);
3595 mcp->mb[4] = req->id;
3596 /* que in ptr index */
3598 /* que out ptr index */
3600 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3601 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3603 mcp->flags = MBX_DMA_OUT;
3604 mcp->tov = MBX_TOV_SECONDS * 2;
3606 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3607 mcp->in_mb |= MBX_1;
3608 if (IS_QLA83XX(ha)) {
3609 mcp->out_mb |= MBX_15;
3610 /* debug q create issue in SR-IOV */
3611 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3614 spin_lock_irqsave(&ha->hardware_lock, flags);
3615 if (!(req->options & BIT_0)) {
3616 WRT_REG_DWORD(®->req_q_in, 0);
3617 if (!IS_QLA83XX(ha))
3618 WRT_REG_DWORD(®->req_q_out, 0);
3620 req->req_q_in = ®->req_q_in;
3621 req->req_q_out = ®->req_q_out;
3622 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3624 rval = qla2x00_mailbox_command(vha, mcp);
3625 if (rval != QLA_SUCCESS) {
3626 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3627 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3630 "Done %s.\n", __func__);
3637 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3640 unsigned long flags;
3642 mbx_cmd_t *mcp = &mc;
3643 struct device_reg_25xxmq __iomem *reg;
3644 struct qla_hw_data *ha = vha->hw;
3646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3647 "Entered %s.\n", __func__);
3649 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3650 mcp->mb[1] = rsp->options;
3651 mcp->mb[2] = MSW(LSD(rsp->dma));
3652 mcp->mb[3] = LSW(LSD(rsp->dma));
3653 mcp->mb[6] = MSW(MSD(rsp->dma));
3654 mcp->mb[7] = LSW(MSD(rsp->dma));
3655 mcp->mb[5] = rsp->length;
3656 mcp->mb[14] = rsp->msix->entry;
3657 mcp->mb[13] = rsp->rid;
3661 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3662 QLA_QUE_PAGE * rsp->id);
3664 mcp->mb[4] = rsp->id;
3665 /* que in ptr index */
3667 /* que out ptr index */
3669 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3670 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3672 mcp->flags = MBX_DMA_OUT;
3673 mcp->tov = MBX_TOV_SECONDS * 2;
3675 if (IS_QLA81XX(ha)) {
3676 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3677 mcp->in_mb |= MBX_1;
3678 } else if (IS_QLA83XX(ha)) {
3679 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3680 mcp->in_mb |= MBX_1;
3681 /* debug q create issue in SR-IOV */
3682 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3685 spin_lock_irqsave(&ha->hardware_lock, flags);
3686 if (!(rsp->options & BIT_0)) {
3687 WRT_REG_DWORD(®->rsp_q_out, 0);
3688 if (!IS_QLA83XX(ha))
3689 WRT_REG_DWORD(®->rsp_q_in, 0);
3692 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3694 rval = qla2x00_mailbox_command(vha, mcp);
3695 if (rval != QLA_SUCCESS) {
3696 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3697 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3700 "Done %s.\n", __func__);
3707 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3711 mbx_cmd_t *mcp = &mc;
3713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3714 "Entered %s.\n", __func__);
3716 mcp->mb[0] = MBC_IDC_ACK;
3717 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3718 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3720 mcp->tov = MBX_TOV_SECONDS;
3722 rval = qla2x00_mailbox_command(vha, mcp);
3724 if (rval != QLA_SUCCESS) {
3725 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3726 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3728 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3729 "Done %s.\n", __func__);
3736 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3740 mbx_cmd_t *mcp = &mc;
3742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3743 "Entered %s.\n", __func__);
3745 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3746 return QLA_FUNCTION_FAILED;
3748 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3749 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3750 mcp->out_mb = MBX_1|MBX_0;
3751 mcp->in_mb = MBX_1|MBX_0;
3752 mcp->tov = MBX_TOV_SECONDS;
3754 rval = qla2x00_mailbox_command(vha, mcp);
3756 if (rval != QLA_SUCCESS) {
3757 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3758 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3759 rval, mcp->mb[0], mcp->mb[1]);
3761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3762 "Done %s.\n", __func__);
3763 *sector_size = mcp->mb[1];
3770 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3774 mbx_cmd_t *mcp = &mc;
3776 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3777 return QLA_FUNCTION_FAILED;
3779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
3780 "Entered %s.\n", __func__);
3782 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3783 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
3784 FAC_OPT_CMD_WRITE_PROTECT;
3785 mcp->out_mb = MBX_1|MBX_0;
3786 mcp->in_mb = MBX_1|MBX_0;
3787 mcp->tov = MBX_TOV_SECONDS;
3789 rval = qla2x00_mailbox_command(vha, mcp);
3791 if (rval != QLA_SUCCESS) {
3792 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3793 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3794 rval, mcp->mb[0], mcp->mb[1]);
3796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
3797 "Done %s.\n", __func__);
3804 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3808 mbx_cmd_t *mcp = &mc;
3810 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3811 return QLA_FUNCTION_FAILED;
3813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
3814 "Entered %s.\n", __func__);
3816 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3817 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
3818 mcp->mb[2] = LSW(start);
3819 mcp->mb[3] = MSW(start);
3820 mcp->mb[4] = LSW(finish);
3821 mcp->mb[5] = MSW(finish);
3822 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3823 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3824 mcp->tov = MBX_TOV_SECONDS;
3826 rval = qla2x00_mailbox_command(vha, mcp);
3828 if (rval != QLA_SUCCESS) {
3829 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3830 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3831 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
3834 "Done %s.\n", __func__);
3841 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3845 mbx_cmd_t *mcp = &mc;
3847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
3848 "Entered %s.\n", __func__);
3850 mcp->mb[0] = MBC_RESTART_MPI_FW;
3851 mcp->out_mb = MBX_0;
3852 mcp->in_mb = MBX_0|MBX_1;
3853 mcp->tov = MBX_TOV_SECONDS;
3855 rval = qla2x00_mailbox_command(vha, mcp);
3857 if (rval != QLA_SUCCESS) {
3858 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3859 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3860 rval, mcp->mb[0], mcp->mb[1]);
3862 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
3863 "Done %s.\n", __func__);
3870 qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
3874 mbx_cmd_t *mcp = &mc;
3879 struct qla_hw_data *ha = vha->hw;
3881 if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
3882 return QLA_FUNCTION_FAILED;
3884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
3885 "Entered %s.\n", __func__);
3887 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
3889 ql_log(ql_log_warn, vha, 0x1156,
3890 "Failed to allocate driver version param.\n");
3891 return QLA_MEMORY_ALLOC_FAILED;
3894 memcpy(str, "\x7\x3\x11\x0", 4);
3896 len = dwlen * sizeof(uint32_t) - 4;
3897 memset(str + 4, 0, len);
3898 if (len > strlen(version))
3899 len = strlen(version);
3900 memcpy(str + 4, version, len);
3902 mcp->mb[0] = MBC_SET_RNID_PARAMS;
3903 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
3904 mcp->mb[2] = MSW(LSD(str_dma));
3905 mcp->mb[3] = LSW(LSD(str_dma));
3906 mcp->mb[6] = MSW(MSD(str_dma));
3907 mcp->mb[7] = LSW(MSD(str_dma));
3908 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3910 mcp->tov = MBX_TOV_SECONDS;
3912 rval = qla2x00_mailbox_command(vha, mcp);
3914 if (rval != QLA_SUCCESS) {
3915 ql_dbg(ql_dbg_mbx, vha, 0x1157,
3916 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
3919 "Done %s.\n", __func__);
3922 dma_pool_free(ha->s_dma_pool, str, str_dma);
3928 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
3932 mbx_cmd_t *mcp = &mc;
3934 if (!IS_FWI2_CAPABLE(vha->hw))
3935 return QLA_FUNCTION_FAILED;
3937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
3938 "Entered %s.\n", __func__);
3940 mcp->mb[0] = MBC_GET_RNID_PARAMS;
3941 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
3942 mcp->out_mb = MBX_1|MBX_0;
3943 mcp->in_mb = MBX_1|MBX_0;
3944 mcp->tov = MBX_TOV_SECONDS;
3946 rval = qla2x00_mailbox_command(vha, mcp);
3949 if (rval != QLA_SUCCESS) {
3950 ql_dbg(ql_dbg_mbx, vha, 0x115a,
3951 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
3953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
3954 "Done %s.\n", __func__);
3961 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3962 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3966 mbx_cmd_t *mcp = &mc;
3967 struct qla_hw_data *ha = vha->hw;
3969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
3970 "Entered %s.\n", __func__);
3972 if (!IS_FWI2_CAPABLE(ha))
3973 return QLA_FUNCTION_FAILED;
3978 mcp->mb[0] = MBC_READ_SFP;
3980 mcp->mb[2] = MSW(sfp_dma);
3981 mcp->mb[3] = LSW(sfp_dma);
3982 mcp->mb[6] = MSW(MSD(sfp_dma));
3983 mcp->mb[7] = LSW(MSD(sfp_dma));
3987 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3988 mcp->in_mb = MBX_1|MBX_0;
3989 mcp->tov = MBX_TOV_SECONDS;
3991 rval = qla2x00_mailbox_command(vha, mcp);
3996 if (rval != QLA_SUCCESS) {
3997 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3998 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4001 "Done %s.\n", __func__);
4008 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4009 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4013 mbx_cmd_t *mcp = &mc;
4014 struct qla_hw_data *ha = vha->hw;
4016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4017 "Entered %s.\n", __func__);
4019 if (!IS_FWI2_CAPABLE(ha))
4020 return QLA_FUNCTION_FAILED;
4028 mcp->mb[0] = MBC_WRITE_SFP;
4030 mcp->mb[2] = MSW(sfp_dma);
4031 mcp->mb[3] = LSW(sfp_dma);
4032 mcp->mb[6] = MSW(MSD(sfp_dma));
4033 mcp->mb[7] = LSW(MSD(sfp_dma));
4037 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4038 mcp->in_mb = MBX_1|MBX_0;
4039 mcp->tov = MBX_TOV_SECONDS;
4041 rval = qla2x00_mailbox_command(vha, mcp);
4043 if (rval != QLA_SUCCESS) {
4044 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4045 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4048 "Done %s.\n", __func__);
4055 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4056 uint16_t size_in_bytes, uint16_t *actual_size)
4060 mbx_cmd_t *mcp = &mc;
4062 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4063 "Entered %s.\n", __func__);
4065 if (!IS_CNA_CAPABLE(vha->hw))
4066 return QLA_FUNCTION_FAILED;
4068 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4069 mcp->mb[2] = MSW(stats_dma);
4070 mcp->mb[3] = LSW(stats_dma);
4071 mcp->mb[6] = MSW(MSD(stats_dma));
4072 mcp->mb[7] = LSW(MSD(stats_dma));
4073 mcp->mb[8] = size_in_bytes >> 2;
4074 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4075 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4076 mcp->tov = MBX_TOV_SECONDS;
4078 rval = qla2x00_mailbox_command(vha, mcp);
4080 if (rval != QLA_SUCCESS) {
4081 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4082 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4083 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4086 "Done %s.\n", __func__);
4089 *actual_size = mcp->mb[2] << 2;
4096 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4101 mbx_cmd_t *mcp = &mc;
4103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4104 "Entered %s.\n", __func__);
4106 if (!IS_CNA_CAPABLE(vha->hw))
4107 return QLA_FUNCTION_FAILED;
4109 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4111 mcp->mb[2] = MSW(tlv_dma);
4112 mcp->mb[3] = LSW(tlv_dma);
4113 mcp->mb[6] = MSW(MSD(tlv_dma));
4114 mcp->mb[7] = LSW(MSD(tlv_dma));
4116 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4117 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4118 mcp->tov = MBX_TOV_SECONDS;
4120 rval = qla2x00_mailbox_command(vha, mcp);
4122 if (rval != QLA_SUCCESS) {
4123 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4124 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4125 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4128 "Done %s.\n", __func__);
4135 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4139 mbx_cmd_t *mcp = &mc;
4141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4142 "Entered %s.\n", __func__);
4144 if (!IS_FWI2_CAPABLE(vha->hw))
4145 return QLA_FUNCTION_FAILED;
4147 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4148 mcp->mb[1] = LSW(risc_addr);
4149 mcp->mb[8] = MSW(risc_addr);
4150 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4151 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4154 rval = qla2x00_mailbox_command(vha, mcp);
4155 if (rval != QLA_SUCCESS) {
4156 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4157 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4160 "Done %s.\n", __func__);
4161 *data = mcp->mb[3] << 16 | mcp->mb[2];
4168 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4173 mbx_cmd_t *mcp = &mc;
4174 uint32_t iter_cnt = 0x1;
4176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4177 "Entered %s.\n", __func__);
4179 memset(mcp->mb, 0 , sizeof(mcp->mb));
4180 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4181 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4183 /* transfer count */
4184 mcp->mb[10] = LSW(mreq->transfer_size);
4185 mcp->mb[11] = MSW(mreq->transfer_size);
4187 /* send data address */
4188 mcp->mb[14] = LSW(mreq->send_dma);
4189 mcp->mb[15] = MSW(mreq->send_dma);
4190 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4191 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4193 /* receive data address */
4194 mcp->mb[16] = LSW(mreq->rcv_dma);
4195 mcp->mb[17] = MSW(mreq->rcv_dma);
4196 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4197 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4199 /* Iteration count */
4200 mcp->mb[18] = LSW(iter_cnt);
4201 mcp->mb[19] = MSW(iter_cnt);
4203 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4204 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4205 if (IS_CNA_CAPABLE(vha->hw))
4206 mcp->out_mb |= MBX_2;
4207 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4209 mcp->buf_size = mreq->transfer_size;
4210 mcp->tov = MBX_TOV_SECONDS;
4211 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4213 rval = qla2x00_mailbox_command(vha, mcp);
4215 if (rval != QLA_SUCCESS) {
4216 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4217 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4218 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4219 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4222 "Done %s.\n", __func__);
4225 /* Copy mailbox information */
4226 memcpy( mresp, mcp->mb, 64);
4231 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4236 mbx_cmd_t *mcp = &mc;
4237 struct qla_hw_data *ha = vha->hw;
4239 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4240 "Entered %s.\n", __func__);
4242 memset(mcp->mb, 0 , sizeof(mcp->mb));
4243 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4244 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
4245 if (IS_CNA_CAPABLE(ha)) {
4246 mcp->mb[1] |= BIT_15;
4247 mcp->mb[2] = vha->fcoe_fcf_idx;
4249 mcp->mb[16] = LSW(mreq->rcv_dma);
4250 mcp->mb[17] = MSW(mreq->rcv_dma);
4251 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4252 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4254 mcp->mb[10] = LSW(mreq->transfer_size);
4256 mcp->mb[14] = LSW(mreq->send_dma);
4257 mcp->mb[15] = MSW(mreq->send_dma);
4258 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4259 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4261 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4262 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4263 if (IS_CNA_CAPABLE(ha))
4264 mcp->out_mb |= MBX_2;
4267 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4268 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4269 mcp->in_mb |= MBX_1;
4270 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4271 mcp->in_mb |= MBX_3;
4273 mcp->tov = MBX_TOV_SECONDS;
4274 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4275 mcp->buf_size = mreq->transfer_size;
4277 rval = qla2x00_mailbox_command(vha, mcp);
4279 if (rval != QLA_SUCCESS) {
4280 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4281 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4282 rval, mcp->mb[0], mcp->mb[1]);
4284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4285 "Done %s.\n", __func__);
4288 /* Copy mailbox information */
4289 memcpy(mresp, mcp->mb, 64);
4294 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
4298 mbx_cmd_t *mcp = &mc;
4300 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
4301 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
4303 mcp->mb[0] = MBC_ISP84XX_RESET;
4304 mcp->mb[1] = enable_diagnostic;
4305 mcp->out_mb = MBX_1|MBX_0;
4306 mcp->in_mb = MBX_1|MBX_0;
4307 mcp->tov = MBX_TOV_SECONDS;
4308 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4309 rval = qla2x00_mailbox_command(vha, mcp);
4311 if (rval != QLA_SUCCESS)
4312 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
4314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4315 "Done %s.\n", __func__);
4321 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
4325 mbx_cmd_t *mcp = &mc;
4327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4328 "Entered %s.\n", __func__);
4330 if (!IS_FWI2_CAPABLE(vha->hw))
4331 return QLA_FUNCTION_FAILED;
4333 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
4334 mcp->mb[1] = LSW(risc_addr);
4335 mcp->mb[2] = LSW(data);
4336 mcp->mb[3] = MSW(data);
4337 mcp->mb[8] = MSW(risc_addr);
4338 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
4342 rval = qla2x00_mailbox_command(vha, mcp);
4343 if (rval != QLA_SUCCESS) {
4344 ql_dbg(ql_dbg_mbx, vha, 0x1101,
4345 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4348 "Done %s.\n", __func__);
4355 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4358 uint32_t stat, timer;
4360 struct qla_hw_data *ha = vha->hw;
4361 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4366 "Entered %s.\n", __func__);
4368 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4370 /* Write the MBC data to the registers */
4371 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
4372 WRT_REG_WORD(®->mailbox1, mb[0]);
4373 WRT_REG_WORD(®->mailbox2, mb[1]);
4374 WRT_REG_WORD(®->mailbox3, mb[2]);
4375 WRT_REG_WORD(®->mailbox4, mb[3]);
4377 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
4379 /* Poll for MBC interrupt */
4380 for (timer = 6000000; timer; timer--) {
4381 /* Check for pending interrupts. */
4382 stat = RD_REG_DWORD(®->host_status);
4383 if (stat & HSRX_RISC_INT) {
4386 if (stat == 0x1 || stat == 0x2 ||
4387 stat == 0x10 || stat == 0x11) {
4388 set_bit(MBX_INTERRUPT,
4389 &ha->mbx_cmd_flags);
4390 mb0 = RD_REG_WORD(®->mailbox0);
4391 WRT_REG_DWORD(®->hccr,
4392 HCCRX_CLR_RISC_INT);
4393 RD_REG_DWORD(®->hccr);
4400 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4401 rval = mb0 & MBS_MASK;
4403 rval = QLA_FUNCTION_FAILED;
4405 if (rval != QLA_SUCCESS) {
4406 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4407 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4410 "Done %s.\n", __func__);
4417 qla2x00_get_data_rate(scsi_qla_host_t *vha)
4421 mbx_cmd_t *mcp = &mc;
4422 struct qla_hw_data *ha = vha->hw;
4424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4425 "Entered %s.\n", __func__);
4427 if (!IS_FWI2_CAPABLE(ha))
4428 return QLA_FUNCTION_FAILED;
4430 mcp->mb[0] = MBC_DATA_RATE;
4432 mcp->out_mb = MBX_1|MBX_0;
4433 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4435 mcp->in_mb |= MBX_3;
4436 mcp->tov = MBX_TOV_SECONDS;
4438 rval = qla2x00_mailbox_command(vha, mcp);
4439 if (rval != QLA_SUCCESS) {
4440 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4441 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4443 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4444 "Done %s.\n", __func__);
4445 if (mcp->mb[1] != 0x7)
4446 ha->link_data_rate = mcp->mb[1];
4453 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4457 mbx_cmd_t *mcp = &mc;
4458 struct qla_hw_data *ha = vha->hw;
4460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4461 "Entered %s.\n", __func__);
4463 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4464 return QLA_FUNCTION_FAILED;
4465 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4466 mcp->out_mb = MBX_0;
4467 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4468 mcp->tov = MBX_TOV_SECONDS;
4471 rval = qla2x00_mailbox_command(vha, mcp);
4473 if (rval != QLA_SUCCESS) {
4474 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4475 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4477 /* Copy all bits to preserve original value */
4478 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4481 "Done %s.\n", __func__);
4487 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4491 mbx_cmd_t *mcp = &mc;
4493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4494 "Entered %s.\n", __func__);
4496 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4497 /* Copy all bits to preserve original setting */
4498 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4499 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4501 mcp->tov = MBX_TOV_SECONDS;
4503 rval = qla2x00_mailbox_command(vha, mcp);
4505 if (rval != QLA_SUCCESS) {
4506 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4507 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4510 "Done %s.\n", __func__);
4517 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4522 mbx_cmd_t *mcp = &mc;
4523 struct qla_hw_data *ha = vha->hw;
4525 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4526 "Entered %s.\n", __func__);
4528 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4529 return QLA_FUNCTION_FAILED;
4531 mcp->mb[0] = MBC_PORT_PARAMS;
4532 mcp->mb[1] = loop_id;
4533 if (ha->flags.fcp_prio_enabled)
4537 mcp->mb[4] = priority & 0xf;
4538 mcp->mb[9] = vha->vp_idx;
4539 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4540 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4543 rval = qla2x00_mailbox_command(vha, mcp);
4551 if (rval != QLA_SUCCESS) {
4552 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4555 "Done %s.\n", __func__);
4562 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
4564 int rval = QLA_FUNCTION_FAILED;
4565 struct qla_hw_data *ha = vha->hw;
4568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
4569 "Entered %s.\n", __func__);
4571 if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
4572 rval = qla2x00_read_sfp(vha, 0, &byte,
4573 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
4575 if (rval == QLA_SUCCESS)
4578 ql_log(ql_log_warn, vha, 0x10c9,
4579 "Thermal not supported by I2C.\n");
4580 ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
4583 if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
4584 rval = qla2x00_read_asic_temperature(vha, temp);
4585 if (rval == QLA_SUCCESS)
4588 ql_log(ql_log_warn, vha, 0x1019,
4589 "Thermal not supported by ISP.\n");
4590 ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
4593 ql_log(ql_log_warn, vha, 0x1150,
4594 "Thermal not supported by this card "
4595 "(ignoring further requests).\n");
4599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4600 "Done %s.\n", __func__);
4605 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4608 struct qla_hw_data *ha = vha->hw;
4610 mbx_cmd_t *mcp = &mc;
4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4613 "Entered %s.\n", __func__);
4615 if (!IS_FWI2_CAPABLE(ha))
4616 return QLA_FUNCTION_FAILED;
4618 memset(mcp, 0, sizeof(mbx_cmd_t));
4619 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4622 mcp->out_mb = MBX_1|MBX_0;
4627 rval = qla2x00_mailbox_command(vha, mcp);
4628 if (rval != QLA_SUCCESS) {
4629 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4630 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4633 "Done %s.\n", __func__);
4640 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4643 struct qla_hw_data *ha = vha->hw;
4645 mbx_cmd_t *mcp = &mc;
4647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4648 "Entered %s.\n", __func__);
4650 if (!IS_QLA82XX(ha))
4651 return QLA_FUNCTION_FAILED;
4653 memset(mcp, 0, sizeof(mbx_cmd_t));
4654 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4657 mcp->out_mb = MBX_1|MBX_0;
4662 rval = qla2x00_mailbox_command(vha, mcp);
4663 if (rval != QLA_SUCCESS) {
4664 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4665 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4668 "Done %s.\n", __func__);
4675 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4677 struct qla_hw_data *ha = vha->hw;
4679 mbx_cmd_t *mcp = &mc;
4680 int rval = QLA_FUNCTION_FAILED;
4682 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4683 "Entered %s.\n", __func__);
4685 memset(mcp->mb, 0 , sizeof(mcp->mb));
4686 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4687 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4688 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4689 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4691 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4692 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4693 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4695 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4696 mcp->tov = MBX_TOV_SECONDS;
4697 rval = qla2x00_mailbox_command(vha, mcp);
4699 /* Always copy back return mailbox values. */
4700 if (rval != QLA_SUCCESS) {
4701 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4702 "mailbox command FAILED=0x%x, subcode=%x.\n",
4703 (mcp->mb[1] << 16) | mcp->mb[0],
4704 (mcp->mb[3] << 16) | mcp->mb[2]);
4706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4707 "Done %s.\n", __func__);
4708 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4709 if (!ha->md_template_size) {
4710 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4711 "Null template size obtained.\n");
4712 rval = QLA_FUNCTION_FAILED;
4719 qla82xx_md_get_template(scsi_qla_host_t *vha)
4721 struct qla_hw_data *ha = vha->hw;
4723 mbx_cmd_t *mcp = &mc;
4724 int rval = QLA_FUNCTION_FAILED;
4726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
4727 "Entered %s.\n", __func__);
4729 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4730 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4731 if (!ha->md_tmplt_hdr) {
4732 ql_log(ql_log_warn, vha, 0x1124,
4733 "Unable to allocate memory for Minidump template.\n");
4737 memset(mcp->mb, 0 , sizeof(mcp->mb));
4738 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4739 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4740 mcp->mb[2] = LSW(RQST_TMPLT);
4741 mcp->mb[3] = MSW(RQST_TMPLT);
4742 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4743 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4744 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4745 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4746 mcp->mb[8] = LSW(ha->md_template_size);
4747 mcp->mb[9] = MSW(ha->md_template_size);
4749 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4750 mcp->tov = MBX_TOV_SECONDS;
4751 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4752 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4753 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4754 rval = qla2x00_mailbox_command(vha, mcp);
4756 if (rval != QLA_SUCCESS) {
4757 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4758 "mailbox command FAILED=0x%x, subcode=%x.\n",
4759 ((mcp->mb[1] << 16) | mcp->mb[0]),
4760 ((mcp->mb[3] << 16) | mcp->mb[2]));
4762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
4763 "Done %s.\n", __func__);
4768 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4771 struct qla_hw_data *ha = vha->hw;
4773 mbx_cmd_t *mcp = &mc;
4775 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4776 return QLA_FUNCTION_FAILED;
4778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
4779 "Entered %s.\n", __func__);
4781 memset(mcp, 0, sizeof(mbx_cmd_t));
4782 mcp->mb[0] = MBC_SET_LED_CONFIG;
4783 mcp->mb[1] = led_cfg[0];
4784 mcp->mb[2] = led_cfg[1];
4785 if (IS_QLA8031(ha)) {
4786 mcp->mb[3] = led_cfg[2];
4787 mcp->mb[4] = led_cfg[3];
4788 mcp->mb[5] = led_cfg[4];
4789 mcp->mb[6] = led_cfg[5];
4792 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4794 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4799 rval = qla2x00_mailbox_command(vha, mcp);
4800 if (rval != QLA_SUCCESS) {
4801 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4802 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4804 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
4805 "Done %s.\n", __func__);
4812 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4815 struct qla_hw_data *ha = vha->hw;
4817 mbx_cmd_t *mcp = &mc;
4819 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4820 return QLA_FUNCTION_FAILED;
4822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
4823 "Entered %s.\n", __func__);
4825 memset(mcp, 0, sizeof(mbx_cmd_t));
4826 mcp->mb[0] = MBC_GET_LED_CONFIG;
4828 mcp->out_mb = MBX_0;
4829 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4831 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4835 rval = qla2x00_mailbox_command(vha, mcp);
4836 if (rval != QLA_SUCCESS) {
4837 ql_dbg(ql_dbg_mbx, vha, 0x1137,
4838 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4840 led_cfg[0] = mcp->mb[1];
4841 led_cfg[1] = mcp->mb[2];
4842 if (IS_QLA8031(ha)) {
4843 led_cfg[2] = mcp->mb[3];
4844 led_cfg[3] = mcp->mb[4];
4845 led_cfg[4] = mcp->mb[5];
4846 led_cfg[5] = mcp->mb[6];
4848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
4849 "Done %s.\n", __func__);
4856 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4859 struct qla_hw_data *ha = vha->hw;
4861 mbx_cmd_t *mcp = &mc;
4863 if (!IS_QLA82XX(ha))
4864 return QLA_FUNCTION_FAILED;
4866 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
4867 "Entered %s.\n", __func__);
4869 memset(mcp, 0, sizeof(mbx_cmd_t));
4870 mcp->mb[0] = MBC_SET_LED_CONFIG;
4876 mcp->out_mb = MBX_7|MBX_0;
4878 mcp->tov = MBX_TOV_SECONDS;
4881 rval = qla2x00_mailbox_command(vha, mcp);
4882 if (rval != QLA_SUCCESS) {
4883 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4884 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
4887 "Done %s.\n", __func__);
4894 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4897 struct qla_hw_data *ha = vha->hw;
4899 mbx_cmd_t *mcp = &mc;
4901 if (!IS_QLA83XX(ha))
4902 return QLA_FUNCTION_FAILED;
4904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
4905 "Entered %s.\n", __func__);
4907 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4908 mcp->mb[1] = LSW(reg);
4909 mcp->mb[2] = MSW(reg);
4910 mcp->mb[3] = LSW(data);
4911 mcp->mb[4] = MSW(data);
4912 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4914 mcp->in_mb = MBX_1|MBX_0;
4915 mcp->tov = MBX_TOV_SECONDS;
4917 rval = qla2x00_mailbox_command(vha, mcp);
4919 if (rval != QLA_SUCCESS) {
4920 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4921 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
4924 "Done %s.\n", __func__);
4931 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4934 struct qla_hw_data *ha = vha->hw;
4936 mbx_cmd_t *mcp = &mc;
4938 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
4940 "Implicit LOGO Unsupported.\n");
4941 return QLA_FUNCTION_FAILED;
4945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
4946 "Entering %s.\n", __func__);
4948 /* Perform Implicit LOGO. */
4949 mcp->mb[0] = MBC_PORT_LOGOUT;
4950 mcp->mb[1] = fcport->loop_id;
4951 mcp->mb[10] = BIT_15;
4952 mcp->out_mb = MBX_10|MBX_1|MBX_0;
4954 mcp->tov = MBX_TOV_SECONDS;
4956 rval = qla2x00_mailbox_command(vha, mcp);
4957 if (rval != QLA_SUCCESS)
4958 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4959 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
4962 "Done %s.\n", __func__);
4968 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
4972 mbx_cmd_t *mcp = &mc;
4973 struct qla_hw_data *ha = vha->hw;
4974 unsigned long retry_max_time = jiffies + (2 * HZ);
4976 if (!IS_QLA83XX(ha))
4977 return QLA_FUNCTION_FAILED;
4979 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
4982 mcp->mb[0] = MBC_READ_REMOTE_REG;
4983 mcp->mb[1] = LSW(reg);
4984 mcp->mb[2] = MSW(reg);
4985 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4986 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4987 mcp->tov = MBX_TOV_SECONDS;
4989 rval = qla2x00_mailbox_command(vha, mcp);
4991 if (rval != QLA_SUCCESS) {
4992 ql_dbg(ql_dbg_mbx, vha, 0x114c,
4993 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4994 rval, mcp->mb[0], mcp->mb[1]);
4996 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
4997 if (*data == QLA8XXX_BAD_VALUE) {
4999 * During soft-reset CAMRAM register reads might
5000 * return 0xbad0bad0. So retry for MAX of 2 sec
5001 * while reading camram registers.
5003 if (time_after(jiffies, retry_max_time)) {
5004 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5005 "Failure to read CAMRAM register. "
5006 "data=0x%x.\n", *data);
5007 return QLA_FUNCTION_FAILED;
5012 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5019 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5023 mbx_cmd_t *mcp = &mc;
5024 struct qla_hw_data *ha = vha->hw;
5026 if (!IS_QLA83XX(ha))
5027 return QLA_FUNCTION_FAILED;
5029 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5031 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5032 mcp->out_mb = MBX_0;
5033 mcp->in_mb = MBX_1|MBX_0;
5034 mcp->tov = MBX_TOV_SECONDS;
5036 rval = qla2x00_mailbox_command(vha, mcp);
5038 if (rval != QLA_SUCCESS) {
5039 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5040 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5041 rval, mcp->mb[0], mcp->mb[1]);
5042 ha->isp_ops->fw_dump(vha, 0);
5044 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5051 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5052 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5056 mbx_cmd_t *mcp = &mc;
5057 uint8_t subcode = (uint8_t)options;
5058 struct qla_hw_data *ha = vha->hw;
5060 if (!IS_QLA8031(ha))
5061 return QLA_FUNCTION_FAILED;
5063 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5065 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5066 mcp->mb[1] = options;
5067 mcp->out_mb = MBX_1|MBX_0;
5068 if (subcode & BIT_2) {
5069 mcp->mb[2] = LSW(start_addr);
5070 mcp->mb[3] = MSW(start_addr);
5071 mcp->mb[4] = LSW(end_addr);
5072 mcp->mb[5] = MSW(end_addr);
5073 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5075 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5076 if (!(subcode & (BIT_2 | BIT_5)))
5077 mcp->in_mb |= MBX_4|MBX_3;
5078 mcp->tov = MBX_TOV_SECONDS;
5080 rval = qla2x00_mailbox_command(vha, mcp);
5082 if (rval != QLA_SUCCESS) {
5083 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5084 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5085 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5087 ha->isp_ops->fw_dump(vha, 0);
5089 if (subcode & BIT_5)
5090 *sector_size = mcp->mb[1];
5091 else if (subcode & (BIT_6 | BIT_7)) {
5092 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5093 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5094 } else if (subcode & (BIT_3 | BIT_4)) {
5095 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5096 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5098 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5105 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5110 mbx_cmd_t *mcp = &mc;
5112 if (!IS_MCTP_CAPABLE(vha->hw))
5113 return QLA_FUNCTION_FAILED;
5115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5116 "Entered %s.\n", __func__);
5118 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5119 mcp->mb[1] = LSW(addr);
5120 mcp->mb[2] = MSW(req_dma);
5121 mcp->mb[3] = LSW(req_dma);
5122 mcp->mb[4] = MSW(size);
5123 mcp->mb[5] = LSW(size);
5124 mcp->mb[6] = MSW(MSD(req_dma));
5125 mcp->mb[7] = LSW(MSD(req_dma));
5126 mcp->mb[8] = MSW(addr);
5127 /* Setting RAM ID to valid */
5128 mcp->mb[10] |= BIT_7;
5129 /* For MCTP RAM ID is 0x40 */
5130 mcp->mb[10] |= 0x40;
5132 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5136 mcp->tov = MBX_TOV_SECONDS;
5138 rval = qla2x00_mailbox_command(vha, mcp);
5140 if (rval != QLA_SUCCESS) {
5141 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5142 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5145 "Done %s.\n", __func__);