2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
14 * qla2x00_mailbox_command
15 * Issue mailbox command and waits for completion.
18 * ha = adapter block pointer.
19 * mcp = driver internal mbx struct pointer.
22 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
25 * 0 : QLA_SUCCESS = cmd performed success
26 * 1 : QLA_FUNCTION_FAILED (error encountered)
27 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
33 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
36 unsigned long flags = 0;
37 device_reg_t __iomem *reg;
42 uint16_t __iomem *optr;
45 unsigned long wait_time;
46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
55 return QLA_FUNCTION_TIMEOUT;
58 if (vha->device_flags & DFLG_DEV_FAILED) {
59 ql_log(ql_log_warn, base_vha, 0x1002,
60 "Device in failed state, exiting.\n");
61 return QLA_FUNCTION_TIMEOUT;
65 io_lock_on = base_vha->flags.init_done;
68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
71 if (ha->flags.pci_channel_io_perm_failure) {
72 ql_log(ql_log_warn, base_vha, 0x1003,
73 "Perm failure on EEH timeout MBX, exiting.\n");
74 return QLA_FUNCTION_TIMEOUT;
77 if (ha->flags.isp82xx_fw_hung) {
78 /* Setting Link-Down error */
79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
82 return QLA_FUNCTION_TIMEOUT;
86 * Wait for active mailbox commands to finish by waiting at most tov
87 * seconds. This is to serialize actual issuing of mailbox cmds during
90 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
91 /* Timeout occurred. Return error. */
92 ql_log(ql_log_warn, base_vha, 0x1005,
93 "Cmd access timeout, Exiting.\n");
94 return QLA_FUNCTION_TIMEOUT;
97 ha->flags.mbox_busy = 1;
98 /* Save mailbox command for debug */
101 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
102 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
104 spin_lock_irqsave(&ha->hardware_lock, flags);
106 /* Load mailbox registers. */
108 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
109 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
110 optr = (uint16_t __iomem *)®->isp24.mailbox0;
112 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
119 if (IS_QLA2200(ha) && cnt == 8)
121 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
123 WRT_REG_WORD(optr, *iptr);
130 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
131 "Loaded MBX registers (displayed in bytes) =.\n");
132 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
133 (uint8_t *)mcp->mb, 16);
134 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
137 ((uint8_t *)mcp->mb + 0x10), 16);
138 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
140 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
141 ((uint8_t *)mcp->mb + 0x20), 8);
142 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
143 "I/O Address = %p.\n", optr);
144 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
146 /* Issue set host interrupt command to send cmd out. */
147 ha->flags.mbox_int = 0;
148 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
150 /* Unlock mbx registers and wait for interrupt */
151 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
152 "Going to unlock irq & waiting for interrupts. "
153 "jiffies=%lx.\n", jiffies);
155 /* Wait for mbx cmd completion until timeout */
157 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
158 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
160 if (IS_QLA82XX(ha)) {
161 if (RD_REG_DWORD(®->isp82.hint) &
162 HINT_MBX_INT_PENDING) {
163 spin_unlock_irqrestore(&ha->hardware_lock,
165 ha->flags.mbox_busy = 0;
166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
167 "Pending mailbox timeout, exiting.\n");
168 rval = QLA_FUNCTION_TIMEOUT;
171 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
172 } else if (IS_FWI2_CAPABLE(ha))
173 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
175 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
176 spin_unlock_irqrestore(&ha->hardware_lock, flags);
178 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
184 "Cmd=%x Polling Mode.\n", command);
186 if (IS_QLA82XX(ha)) {
187 if (RD_REG_DWORD(®->isp82.hint) &
188 HINT_MBX_INT_PENDING) {
189 spin_unlock_irqrestore(&ha->hardware_lock,
191 ha->flags.mbox_busy = 0;
192 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
193 "Pending mailbox timeout, exiting.\n");
194 rval = QLA_FUNCTION_TIMEOUT;
197 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
198 } else if (IS_FWI2_CAPABLE(ha))
199 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
201 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
204 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
205 while (!ha->flags.mbox_int) {
206 if (time_after(jiffies, wait_time))
209 /* Check for pending interrupts. */
210 qla2x00_poll(ha->rsp_q_map[0]);
212 if (!ha->flags.mbox_int &&
214 command == MBC_LOAD_RISC_RAM_EXTENDED))
217 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
219 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
222 /* Check whether we timed out */
223 if (ha->flags.mbox_int) {
226 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
227 "Cmd=%x completed.\n", command);
229 /* Got interrupt. Clear the flag. */
230 ha->flags.mbox_int = 0;
231 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233 if (ha->flags.isp82xx_fw_hung) {
234 ha->flags.mbox_busy = 0;
235 /* Setting Link-Down error */
236 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
238 rval = QLA_FUNCTION_FAILED;
239 ql_log(ql_log_warn, base_vha, 0x1015,
240 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
244 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
245 rval = QLA_FUNCTION_FAILED;
247 /* Load return mailbox registers. */
249 iptr = (uint16_t *)&ha->mailbox_out[0];
251 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
264 if (IS_FWI2_CAPABLE(ha)) {
265 mb0 = RD_REG_WORD(®->isp24.mailbox0);
266 ictrl = RD_REG_DWORD(®->isp24.ictrl);
268 mb0 = RD_MAILBOX_REG(ha, ®->isp, 0);
269 ictrl = RD_REG_WORD(®->isp.ictrl);
271 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
272 "MBX Command timeout for cmd %x.\n", command);
273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
274 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
275 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
276 "mb[0] = 0x%x.\n", mb0);
277 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
279 rval = QLA_FUNCTION_TIMEOUT;
282 ha->flags.mbox_busy = 0;
287 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
288 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
289 "Checking for additional resp interrupt.\n");
291 /* polling mode for non isp_abort commands. */
292 qla2x00_poll(ha->rsp_q_map[0]);
295 if (rval == QLA_FUNCTION_TIMEOUT &&
296 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
297 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
298 ha->flags.eeh_busy) {
299 /* not in dpc. schedule it for dpc to take over. */
300 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
301 "Timeout, schedule isp_abort_needed.\n");
303 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
304 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
305 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
306 if (IS_QLA82XX(ha)) {
307 ql_dbg(ql_dbg_mbx, vha, 0x112a,
308 "disabling pause transmit on port "
311 QLA82XX_CRB_NIU + 0x98,
312 CRB_NIU_XG_PAUSE_CTL_P0|
313 CRB_NIU_XG_PAUSE_CTL_P1);
315 ql_log(ql_log_info, base_vha, 0x101c,
316 "Mailbox cmd timeout occured. "
317 "Scheduling ISP abort eeh_busy=0x%x.\n",
319 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
320 qla2xxx_wake_dpc(vha);
322 } else if (!abort_active) {
323 /* call abort directly since we are in the DPC thread */
324 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
325 "Timeout, calling abort_isp.\n");
327 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
328 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
329 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
330 if (IS_QLA82XX(ha)) {
331 ql_dbg(ql_dbg_mbx, vha, 0x112b,
332 "disabling pause transmit on port "
335 QLA82XX_CRB_NIU + 0x98,
336 CRB_NIU_XG_PAUSE_CTL_P0|
337 CRB_NIU_XG_PAUSE_CTL_P1);
339 ql_log(ql_log_info, base_vha, 0x101e,
340 "Mailbox cmd timeout occured. "
341 "Scheduling ISP abort.\n");
343 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
344 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
345 /* Allow next mbx cmd to come in. */
346 complete(&ha->mbx_cmd_comp);
347 if (ha->isp_ops->abort_isp(vha)) {
348 /* Failed. retry later. */
349 set_bit(ISP_ABORT_NEEDED,
352 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
353 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
354 "Finished abort_isp.\n");
361 /* Allow next mbx cmd to come in. */
362 complete(&ha->mbx_cmd_comp);
366 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
367 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
368 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
370 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
377 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
378 uint32_t risc_code_size)
381 struct qla_hw_data *ha = vha->hw;
383 mbx_cmd_t *mcp = &mc;
385 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
387 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
388 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
389 mcp->mb[8] = MSW(risc_addr);
390 mcp->out_mb = MBX_8|MBX_0;
392 mcp->mb[0] = MBC_LOAD_RISC_RAM;
395 mcp->mb[1] = LSW(risc_addr);
396 mcp->mb[2] = MSW(req_dma);
397 mcp->mb[3] = LSW(req_dma);
398 mcp->mb[6] = MSW(MSD(req_dma));
399 mcp->mb[7] = LSW(MSD(req_dma));
400 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
401 if (IS_FWI2_CAPABLE(ha)) {
402 mcp->mb[4] = MSW(risc_code_size);
403 mcp->mb[5] = LSW(risc_code_size);
404 mcp->out_mb |= MBX_5|MBX_4;
406 mcp->mb[4] = LSW(risc_code_size);
407 mcp->out_mb |= MBX_4;
411 mcp->tov = MBX_TOV_SECONDS;
413 rval = qla2x00_mailbox_command(vha, mcp);
415 if (rval != QLA_SUCCESS) {
416 ql_dbg(ql_dbg_mbx, vha, 0x1023,
417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
419 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
425 #define EXTENDED_BB_CREDITS BIT_0
428 * Start adapter firmware.
431 * ha = adapter block pointer.
432 * TARGET_QUEUE_LOCK must be released.
433 * ADAPTER_STATE_LOCK must be released.
436 * qla2x00 local function return status code.
442 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
445 struct qla_hw_data *ha = vha->hw;
447 mbx_cmd_t *mcp = &mc;
449 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
451 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
454 if (IS_FWI2_CAPABLE(ha)) {
455 mcp->mb[1] = MSW(risc_addr);
456 mcp->mb[2] = LSW(risc_addr);
458 if (IS_QLA81XX(ha)) {
459 struct nvram_81xx *nv = ha->nvram;
460 mcp->mb[4] = (nv->enhanced_features &
461 EXTENDED_BB_CREDITS);
464 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
467 mcp->mb[1] = LSW(risc_addr);
468 mcp->out_mb |= MBX_1;
469 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
471 mcp->out_mb |= MBX_2;
475 mcp->tov = MBX_TOV_SECONDS;
477 rval = qla2x00_mailbox_command(vha, mcp);
479 if (rval != QLA_SUCCESS) {
480 ql_dbg(ql_dbg_mbx, vha, 0x1026,
481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
483 if (IS_FWI2_CAPABLE(ha)) {
484 ql_dbg(ql_dbg_mbx, vha, 0x1027,
485 "Done exchanges=%x.\n", mcp->mb[1]);
487 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
495 * qla2x00_get_fw_version
496 * Get firmware version.
499 * ha: adapter state pointer.
500 * major: pointer for major number.
501 * minor: pointer for minor number.
502 * subminor: pointer for subminor number.
505 * qla2x00 local function return status code.
511 qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
512 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
513 uint32_t *mpi_caps, uint8_t *phy)
517 mbx_cmd_t *mcp = &mc;
519 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
521 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
523 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
524 if (IS_QLA81XX(vha->hw))
525 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
527 mcp->tov = MBX_TOV_SECONDS;
528 rval = qla2x00_mailbox_command(vha, mcp);
529 if (rval != QLA_SUCCESS)
532 /* Return mailbox data. */
535 *subminor = mcp->mb[3];
536 *attributes = mcp->mb[6];
537 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
538 *memory = 0x1FFFF; /* Defaults to 128KB. */
540 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
541 if (IS_QLA81XX(vha->hw)) {
542 mpi[0] = mcp->mb[10] & 0xff;
543 mpi[1] = mcp->mb[11] >> 8;
544 mpi[2] = mcp->mb[11] & 0xff;
545 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
546 phy[0] = mcp->mb[8] & 0xff;
547 phy[1] = mcp->mb[9] >> 8;
548 phy[2] = mcp->mb[9] & 0xff;
551 if (rval != QLA_SUCCESS) {
553 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
556 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
562 * qla2x00_get_fw_options
563 * Set firmware options.
566 * ha = adapter block pointer.
567 * fwopt = pointer for firmware options.
570 * qla2x00 local function return status code.
576 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
580 mbx_cmd_t *mcp = &mc;
582 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
584 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
586 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
587 mcp->tov = MBX_TOV_SECONDS;
589 rval = qla2x00_mailbox_command(vha, mcp);
591 if (rval != QLA_SUCCESS) {
593 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
595 fwopts[0] = mcp->mb[0];
596 fwopts[1] = mcp->mb[1];
597 fwopts[2] = mcp->mb[2];
598 fwopts[3] = mcp->mb[3];
600 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
608 * qla2x00_set_fw_options
609 * Set firmware options.
612 * ha = adapter block pointer.
613 * fwopt = pointer for firmware options.
616 * qla2x00 local function return status code.
622 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
626 mbx_cmd_t *mcp = &mc;
628 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
630 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
631 mcp->mb[1] = fwopts[1];
632 mcp->mb[2] = fwopts[2];
633 mcp->mb[3] = fwopts[3];
634 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
636 if (IS_FWI2_CAPABLE(vha->hw)) {
639 mcp->mb[10] = fwopts[10];
640 mcp->mb[11] = fwopts[11];
641 mcp->mb[12] = 0; /* Undocumented, but used */
642 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
644 mcp->tov = MBX_TOV_SECONDS;
646 rval = qla2x00_mailbox_command(vha, mcp);
648 fwopts[0] = mcp->mb[0];
650 if (rval != QLA_SUCCESS) {
652 ql_dbg(ql_dbg_mbx, vha, 0x1030,
653 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
656 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
663 * qla2x00_mbx_reg_test
664 * Mailbox register wrap test.
667 * ha = adapter block pointer.
668 * TARGET_QUEUE_LOCK must be released.
669 * ADAPTER_STATE_LOCK must be released.
672 * qla2x00 local function return status code.
678 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
682 mbx_cmd_t *mcp = &mc;
684 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
686 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
694 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
695 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
696 mcp->tov = MBX_TOV_SECONDS;
698 rval = qla2x00_mailbox_command(vha, mcp);
700 if (rval == QLA_SUCCESS) {
701 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
702 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
703 rval = QLA_FUNCTION_FAILED;
704 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
705 mcp->mb[7] != 0x2525)
706 rval = QLA_FUNCTION_FAILED;
709 if (rval != QLA_SUCCESS) {
711 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
714 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
721 * qla2x00_verify_checksum
722 * Verify firmware checksum.
725 * ha = adapter block pointer.
726 * TARGET_QUEUE_LOCK must be released.
727 * ADAPTER_STATE_LOCK must be released.
730 * qla2x00 local function return status code.
736 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
740 mbx_cmd_t *mcp = &mc;
742 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
744 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
747 if (IS_FWI2_CAPABLE(vha->hw)) {
748 mcp->mb[1] = MSW(risc_addr);
749 mcp->mb[2] = LSW(risc_addr);
750 mcp->out_mb |= MBX_2|MBX_1;
751 mcp->in_mb |= MBX_2|MBX_1;
753 mcp->mb[1] = LSW(risc_addr);
754 mcp->out_mb |= MBX_1;
758 mcp->tov = MBX_TOV_SECONDS;
760 rval = qla2x00_mailbox_command(vha, mcp);
762 if (rval != QLA_SUCCESS) {
763 ql_dbg(ql_dbg_mbx, vha, 0x1036,
764 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
765 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
767 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
775 * Issue IOCB using mailbox command
778 * ha = adapter state pointer.
779 * buffer = buffer pointer.
780 * phys_addr = physical address of buffer.
781 * size = size of buffer.
782 * TARGET_QUEUE_LOCK must be released.
783 * ADAPTER_STATE_LOCK must be released.
786 * qla2x00 local function return status code.
792 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
793 dma_addr_t phys_addr, size_t size, uint32_t tov)
797 mbx_cmd_t *mcp = &mc;
799 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
801 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
803 mcp->mb[2] = MSW(phys_addr);
804 mcp->mb[3] = LSW(phys_addr);
805 mcp->mb[6] = MSW(MSD(phys_addr));
806 mcp->mb[7] = LSW(MSD(phys_addr));
807 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
808 mcp->in_mb = MBX_2|MBX_0;
811 rval = qla2x00_mailbox_command(vha, mcp);
813 if (rval != QLA_SUCCESS) {
815 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
817 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
819 /* Mask reserved bits. */
820 sts_entry->entry_status &=
821 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
822 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
829 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
832 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
837 * qla2x00_abort_command
838 * Abort command aborts a specified IOCB.
841 * ha = adapter block pointer.
842 * sp = SB structure pointer.
845 * qla2x00 local function return status code.
851 qla2x00_abort_command(srb_t *sp)
853 unsigned long flags = 0;
857 mbx_cmd_t *mcp = &mc;
858 fc_port_t *fcport = sp->fcport;
859 scsi_qla_host_t *vha = fcport->vha;
860 struct qla_hw_data *ha = vha->hw;
861 struct req_que *req = vha->req;
863 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
865 spin_lock_irqsave(&ha->hardware_lock, flags);
866 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
867 if (req->outstanding_cmds[handle] == sp)
870 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 if (handle == MAX_OUTSTANDING_COMMANDS) {
873 /* command not found */
874 return QLA_FUNCTION_FAILED;
877 mcp->mb[0] = MBC_ABORT_COMMAND;
878 if (HAS_EXTENDED_IDS(ha))
879 mcp->mb[1] = fcport->loop_id;
881 mcp->mb[1] = fcport->loop_id << 8;
882 mcp->mb[2] = (uint16_t)handle;
883 mcp->mb[3] = (uint16_t)(handle >> 16);
884 mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
885 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
887 mcp->tov = MBX_TOV_SECONDS;
889 rval = qla2x00_mailbox_command(vha, mcp);
891 if (rval != QLA_SUCCESS) {
892 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
894 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
901 qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
905 mbx_cmd_t *mcp = &mc;
906 scsi_qla_host_t *vha;
913 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
915 req = vha->hw->req_q_map[0];
917 mcp->mb[0] = MBC_ABORT_TARGET;
918 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
919 if (HAS_EXTENDED_IDS(vha->hw)) {
920 mcp->mb[1] = fcport->loop_id;
922 mcp->out_mb |= MBX_10;
924 mcp->mb[1] = fcport->loop_id << 8;
926 mcp->mb[2] = vha->hw->loop_reset_delay;
927 mcp->mb[9] = vha->vp_idx;
930 mcp->tov = MBX_TOV_SECONDS;
932 rval = qla2x00_mailbox_command(vha, mcp);
933 if (rval != QLA_SUCCESS) {
934 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
937 /* Issue marker IOCB. */
938 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
940 if (rval2 != QLA_SUCCESS) {
941 ql_dbg(ql_dbg_mbx, vha, 0x1040,
942 "Failed to issue marker IOCB (%x).\n", rval2);
944 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
951 qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
955 mbx_cmd_t *mcp = &mc;
956 scsi_qla_host_t *vha;
962 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
964 req = vha->hw->req_q_map[0];
966 mcp->mb[0] = MBC_LUN_RESET;
967 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
968 if (HAS_EXTENDED_IDS(vha->hw))
969 mcp->mb[1] = fcport->loop_id;
971 mcp->mb[1] = fcport->loop_id << 8;
974 mcp->mb[9] = vha->vp_idx;
977 mcp->tov = MBX_TOV_SECONDS;
979 rval = qla2x00_mailbox_command(vha, mcp);
980 if (rval != QLA_SUCCESS) {
981 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
984 /* Issue marker IOCB. */
985 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
987 if (rval2 != QLA_SUCCESS) {
988 ql_dbg(ql_dbg_mbx, vha, 0x1044,
989 "Failed to issue marker IOCB (%x).\n", rval2);
991 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
998 * qla2x00_get_adapter_id
999 * Get adapter ID and topology.
1002 * ha = adapter block pointer.
1003 * id = pointer for loop ID.
1004 * al_pa = pointer for AL_PA.
1005 * area = pointer for area.
1006 * domain = pointer for domain.
1007 * top = pointer for topology.
1008 * TARGET_QUEUE_LOCK must be released.
1009 * ADAPTER_STATE_LOCK must be released.
1012 * qla2x00 local function return status code.
1018 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1019 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1023 mbx_cmd_t *mcp = &mc;
1025 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1027 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1028 mcp->mb[9] = vha->vp_idx;
1029 mcp->out_mb = MBX_9|MBX_0;
1030 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1031 if (IS_QLA8XXX_TYPE(vha->hw))
1032 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1033 mcp->tov = MBX_TOV_SECONDS;
1035 rval = qla2x00_mailbox_command(vha, mcp);
1036 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1037 rval = QLA_COMMAND_ERROR;
1038 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1039 rval = QLA_INVALID_COMMAND;
1043 *al_pa = LSB(mcp->mb[2]);
1044 *area = MSB(mcp->mb[2]);
1045 *domain = LSB(mcp->mb[3]);
1047 *sw_cap = mcp->mb[7];
1049 if (rval != QLA_SUCCESS) {
1051 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1053 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1055 if (IS_QLA8XXX_TYPE(vha->hw)) {
1056 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1057 vha->fcoe_fcf_idx = mcp->mb[10];
1058 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1059 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1060 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1061 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1062 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1063 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1071 * qla2x00_get_retry_cnt
1072 * Get current firmware login retry count and delay.
1075 * ha = adapter block pointer.
1076 * retry_cnt = pointer to login retry count.
1077 * tov = pointer to login timeout value.
1080 * qla2x00 local function return status code.
1086 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1092 mbx_cmd_t *mcp = &mc;
1094 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1096 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1097 mcp->out_mb = MBX_0;
1098 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1099 mcp->tov = MBX_TOV_SECONDS;
1101 rval = qla2x00_mailbox_command(vha, mcp);
1103 if (rval != QLA_SUCCESS) {
1105 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1106 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1108 /* Convert returned data and check our values. */
1109 *r_a_tov = mcp->mb[3] / 2;
1110 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1111 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1112 /* Update to the larger values */
1113 *retry_cnt = (uint8_t)mcp->mb[1];
1117 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1118 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1125 * qla2x00_init_firmware
1126 * Initialize adapter firmware.
1129 * ha = adapter block pointer.
1130 * dptr = Initialization control block pointer.
1131 * size = size of initialization control block.
1132 * TARGET_QUEUE_LOCK must be released.
1133 * ADAPTER_STATE_LOCK must be released.
1136 * qla2x00 local function return status code.
1142 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1146 mbx_cmd_t *mcp = &mc;
1147 struct qla_hw_data *ha = vha->hw;
1149 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1151 if (IS_QLA82XX(ha) && ql2xdbwr)
1152 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1153 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1155 if (ha->flags.npiv_supported)
1156 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1158 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1161 mcp->mb[2] = MSW(ha->init_cb_dma);
1162 mcp->mb[3] = LSW(ha->init_cb_dma);
1163 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1164 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1165 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1166 if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
1168 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1169 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1170 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1171 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1172 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1173 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1176 mcp->buf_size = size;
1177 mcp->flags = MBX_DMA_OUT;
1178 mcp->tov = MBX_TOV_SECONDS;
1179 rval = qla2x00_mailbox_command(vha, mcp);
1181 if (rval != QLA_SUCCESS) {
1183 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1184 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1187 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1194 * qla2x00_get_port_database
1195 * Issue normal/enhanced get port database mailbox command
1196 * and copy device name as necessary.
1199 * ha = adapter state pointer.
1200 * dev = structure pointer.
1201 * opt = enhanced cmd option byte.
1204 * qla2x00 local function return status code.
1210 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1214 mbx_cmd_t *mcp = &mc;
1215 port_database_t *pd;
1216 struct port_database_24xx *pd24;
1218 struct qla_hw_data *ha = vha->hw;
1220 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1223 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1225 ql_log(ql_log_warn, vha, 0x1050,
1226 "Failed to allocate port database structure.\n");
1227 return QLA_MEMORY_ALLOC_FAILED;
1229 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1231 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1232 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1233 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1234 mcp->mb[2] = MSW(pd_dma);
1235 mcp->mb[3] = LSW(pd_dma);
1236 mcp->mb[6] = MSW(MSD(pd_dma));
1237 mcp->mb[7] = LSW(MSD(pd_dma));
1238 mcp->mb[9] = vha->vp_idx;
1239 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1241 if (IS_FWI2_CAPABLE(ha)) {
1242 mcp->mb[1] = fcport->loop_id;
1244 mcp->out_mb |= MBX_10|MBX_1;
1245 mcp->in_mb |= MBX_1;
1246 } else if (HAS_EXTENDED_IDS(ha)) {
1247 mcp->mb[1] = fcport->loop_id;
1249 mcp->out_mb |= MBX_10|MBX_1;
1251 mcp->mb[1] = fcport->loop_id << 8 | opt;
1252 mcp->out_mb |= MBX_1;
1254 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1255 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1256 mcp->flags = MBX_DMA_IN;
1257 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1258 rval = qla2x00_mailbox_command(vha, mcp);
1259 if (rval != QLA_SUCCESS)
1262 if (IS_FWI2_CAPABLE(ha)) {
1263 pd24 = (struct port_database_24xx *) pd;
1265 /* Check for logged in state. */
1266 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1267 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1268 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1269 "Unable to verify login-state (%x/%x) for "
1270 "loop_id %x.\n", pd24->current_login_state,
1271 pd24->last_login_state, fcport->loop_id);
1272 rval = QLA_FUNCTION_FAILED;
1276 /* Names are little-endian. */
1277 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1278 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1280 /* Get port_id of device. */
1281 fcport->d_id.b.domain = pd24->port_id[0];
1282 fcport->d_id.b.area = pd24->port_id[1];
1283 fcport->d_id.b.al_pa = pd24->port_id[2];
1284 fcport->d_id.b.rsvd_1 = 0;
1286 /* If not target must be initiator or unknown type. */
1287 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1288 fcport->port_type = FCT_INITIATOR;
1290 fcport->port_type = FCT_TARGET;
1292 /* Check for logged in state. */
1293 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1294 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1295 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1296 "Unable to verify login-state (%x/%x) - "
1297 "portid=%02x%02x%02x.\n", pd->master_state,
1298 pd->slave_state, fcport->d_id.b.domain,
1299 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1300 rval = QLA_FUNCTION_FAILED;
1304 /* Names are little-endian. */
1305 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1306 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1308 /* Get port_id of device. */
1309 fcport->d_id.b.domain = pd->port_id[0];
1310 fcport->d_id.b.area = pd->port_id[3];
1311 fcport->d_id.b.al_pa = pd->port_id[2];
1312 fcport->d_id.b.rsvd_1 = 0;
1314 /* If not target must be initiator or unknown type. */
1315 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1316 fcport->port_type = FCT_INITIATOR;
1318 fcport->port_type = FCT_TARGET;
1320 /* Passback COS information. */
1321 fcport->supported_classes = (pd->options & BIT_4) ?
1322 FC_COS_CLASS2: FC_COS_CLASS3;
1326 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1328 if (rval != QLA_SUCCESS) {
1329 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1330 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1331 mcp->mb[0], mcp->mb[1]);
1333 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1340 * qla2x00_get_firmware_state
1341 * Get adapter firmware state.
1344 * ha = adapter block pointer.
1345 * dptr = pointer for firmware state.
1346 * TARGET_QUEUE_LOCK must be released.
1347 * ADAPTER_STATE_LOCK must be released.
1350 * qla2x00 local function return status code.
1356 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1360 mbx_cmd_t *mcp = &mc;
1362 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1364 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1365 mcp->out_mb = MBX_0;
1366 if (IS_FWI2_CAPABLE(vha->hw))
1367 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1369 mcp->in_mb = MBX_1|MBX_0;
1370 mcp->tov = MBX_TOV_SECONDS;
1372 rval = qla2x00_mailbox_command(vha, mcp);
1374 /* Return firmware states. */
1375 states[0] = mcp->mb[1];
1376 if (IS_FWI2_CAPABLE(vha->hw)) {
1377 states[1] = mcp->mb[2];
1378 states[2] = mcp->mb[3];
1379 states[3] = mcp->mb[4];
1380 states[4] = mcp->mb[5];
1383 if (rval != QLA_SUCCESS) {
1385 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1388 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1395 * qla2x00_get_port_name
1396 * Issue get port name mailbox command.
1397 * Returned name is in big endian format.
1400 * ha = adapter block pointer.
1401 * loop_id = loop ID of device.
1402 * name = pointer for name.
1403 * TARGET_QUEUE_LOCK must be released.
1404 * ADAPTER_STATE_LOCK must be released.
1407 * qla2x00 local function return status code.
1413 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1418 mbx_cmd_t *mcp = &mc;
1420 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1422 mcp->mb[0] = MBC_GET_PORT_NAME;
1423 mcp->mb[9] = vha->vp_idx;
1424 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1425 if (HAS_EXTENDED_IDS(vha->hw)) {
1426 mcp->mb[1] = loop_id;
1428 mcp->out_mb |= MBX_10;
1430 mcp->mb[1] = loop_id << 8 | opt;
1433 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1434 mcp->tov = MBX_TOV_SECONDS;
1436 rval = qla2x00_mailbox_command(vha, mcp);
1438 if (rval != QLA_SUCCESS) {
1440 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1443 /* This function returns name in big endian. */
1444 name[0] = MSB(mcp->mb[2]);
1445 name[1] = LSB(mcp->mb[2]);
1446 name[2] = MSB(mcp->mb[3]);
1447 name[3] = LSB(mcp->mb[3]);
1448 name[4] = MSB(mcp->mb[6]);
1449 name[5] = LSB(mcp->mb[6]);
1450 name[6] = MSB(mcp->mb[7]);
1451 name[7] = LSB(mcp->mb[7]);
1454 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1462 * Issue LIP reset mailbox command.
1465 * ha = adapter block pointer.
1466 * TARGET_QUEUE_LOCK must be released.
1467 * ADAPTER_STATE_LOCK must be released.
1470 * qla2x00 local function return status code.
1476 qla2x00_lip_reset(scsi_qla_host_t *vha)
1480 mbx_cmd_t *mcp = &mc;
1482 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1484 if (IS_QLA8XXX_TYPE(vha->hw)) {
1485 /* Logout across all FCFs. */
1486 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1489 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1490 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1491 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1494 mcp->mb[3] = vha->hw->loop_reset_delay;
1495 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1497 mcp->mb[0] = MBC_LIP_RESET;
1498 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1499 if (HAS_EXTENDED_IDS(vha->hw)) {
1500 mcp->mb[1] = 0x00ff;
1502 mcp->out_mb |= MBX_10;
1504 mcp->mb[1] = 0xff00;
1506 mcp->mb[2] = vha->hw->loop_reset_delay;
1510 mcp->tov = MBX_TOV_SECONDS;
1512 rval = qla2x00_mailbox_command(vha, mcp);
1514 if (rval != QLA_SUCCESS) {
1516 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1519 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1530 * ha = adapter block pointer.
1531 * sns = pointer for command.
1532 * cmd_size = command size.
1533 * buf_size = response/command size.
1534 * TARGET_QUEUE_LOCK must be released.
1535 * ADAPTER_STATE_LOCK must be released.
1538 * qla2x00 local function return status code.
1544 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1545 uint16_t cmd_size, size_t buf_size)
1549 mbx_cmd_t *mcp = &mc;
1551 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1553 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1554 "Retry cnt=%d ratov=%d total tov=%d.\n",
1555 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1557 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1558 mcp->mb[1] = cmd_size;
1559 mcp->mb[2] = MSW(sns_phys_address);
1560 mcp->mb[3] = LSW(sns_phys_address);
1561 mcp->mb[6] = MSW(MSD(sns_phys_address));
1562 mcp->mb[7] = LSW(MSD(sns_phys_address));
1563 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1564 mcp->in_mb = MBX_0|MBX_1;
1565 mcp->buf_size = buf_size;
1566 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1567 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1568 rval = qla2x00_mailbox_command(vha, mcp);
1570 if (rval != QLA_SUCCESS) {
1572 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1573 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1574 rval, mcp->mb[0], mcp->mb[1]);
1577 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1584 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1585 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1589 struct logio_entry_24xx *lg;
1592 struct qla_hw_data *ha = vha->hw;
1593 struct req_que *req;
1594 struct rsp_que *rsp;
1596 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1598 if (ha->flags.cpu_affinity_enabled)
1599 req = ha->req_q_map[0];
1604 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1606 ql_log(ql_log_warn, vha, 0x1062,
1607 "Failed to allocate login IOCB.\n");
1608 return QLA_MEMORY_ALLOC_FAILED;
1610 memset(lg, 0, sizeof(struct logio_entry_24xx));
1612 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1613 lg->entry_count = 1;
1614 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1615 lg->nport_handle = cpu_to_le16(loop_id);
1616 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1618 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1620 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1621 lg->port_id[0] = al_pa;
1622 lg->port_id[1] = area;
1623 lg->port_id[2] = domain;
1624 lg->vp_index = vha->vp_idx;
1625 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1626 if (rval != QLA_SUCCESS) {
1627 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1628 "Failed to issue login IOCB (%x).\n", rval);
1629 } else if (lg->entry_status != 0) {
1630 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1631 "Failed to complete IOCB -- error status (%x).\n",
1633 rval = QLA_FUNCTION_FAILED;
1634 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1635 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1636 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1638 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1639 "Failed to complete IOCB -- completion status (%x) "
1640 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1644 case LSC_SCODE_PORTID_USED:
1645 mb[0] = MBS_PORT_ID_USED;
1646 mb[1] = LSW(iop[1]);
1648 case LSC_SCODE_NPORT_USED:
1649 mb[0] = MBS_LOOP_ID_USED;
1651 case LSC_SCODE_NOLINK:
1652 case LSC_SCODE_NOIOCB:
1653 case LSC_SCODE_NOXCB:
1654 case LSC_SCODE_CMD_FAILED:
1655 case LSC_SCODE_NOFABRIC:
1656 case LSC_SCODE_FW_NOT_READY:
1657 case LSC_SCODE_NOT_LOGGED_IN:
1658 case LSC_SCODE_NOPCB:
1659 case LSC_SCODE_ELS_REJECT:
1660 case LSC_SCODE_CMD_PARAM_ERR:
1661 case LSC_SCODE_NONPORT:
1662 case LSC_SCODE_LOGGED_IN:
1663 case LSC_SCODE_NOFLOGI_ACC:
1665 mb[0] = MBS_COMMAND_ERROR;
1669 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1671 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1673 mb[0] = MBS_COMMAND_COMPLETE;
1675 if (iop[0] & BIT_4) {
1681 /* Passback COS information. */
1683 if (lg->io_parameter[7] || lg->io_parameter[8])
1684 mb[10] |= BIT_0; /* Class 2. */
1685 if (lg->io_parameter[9] || lg->io_parameter[10])
1686 mb[10] |= BIT_1; /* Class 3. */
1689 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1695 * qla2x00_login_fabric
1696 * Issue login fabric port mailbox command.
1699 * ha = adapter block pointer.
1700 * loop_id = device loop ID.
1701 * domain = device domain.
1702 * area = device area.
1703 * al_pa = device AL_PA.
1704 * status = pointer for return status.
1705 * opt = command options.
1706 * TARGET_QUEUE_LOCK must be released.
1707 * ADAPTER_STATE_LOCK must be released.
1710 * qla2x00 local function return status code.
1716 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1717 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1721 mbx_cmd_t *mcp = &mc;
1722 struct qla_hw_data *ha = vha->hw;
1724 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1726 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1727 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1728 if (HAS_EXTENDED_IDS(ha)) {
1729 mcp->mb[1] = loop_id;
1731 mcp->out_mb |= MBX_10;
1733 mcp->mb[1] = (loop_id << 8) | opt;
1735 mcp->mb[2] = domain;
1736 mcp->mb[3] = area << 8 | al_pa;
1738 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1739 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1741 rval = qla2x00_mailbox_command(vha, mcp);
1743 /* Return mailbox statuses. */
1750 /* COS retrieved from Get-Port-Database mailbox command. */
1754 if (rval != QLA_SUCCESS) {
1755 /* RLU tmp code: need to change main mailbox_command function to
1756 * return ok even when the mailbox completion value is not
1757 * SUCCESS. The caller needs to be responsible to interpret
1758 * the return values of this mailbox command if we're not
1759 * to change too much of the existing code.
1761 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
1762 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
1763 mcp->mb[0] == 0x4006)
1767 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1769 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1772 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1779 * qla2x00_login_local_device
1780 * Issue login loop port mailbox command.
1783 * ha = adapter block pointer.
1784 * loop_id = device loop ID.
1785 * opt = command options.
1788 * Return status code.
1795 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1796 uint16_t *mb_ret, uint8_t opt)
1800 mbx_cmd_t *mcp = &mc;
1801 struct qla_hw_data *ha = vha->hw;
1803 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1805 if (IS_FWI2_CAPABLE(ha))
1806 return qla24xx_login_fabric(vha, fcport->loop_id,
1807 fcport->d_id.b.domain, fcport->d_id.b.area,
1808 fcport->d_id.b.al_pa, mb_ret, opt);
1810 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1811 if (HAS_EXTENDED_IDS(ha))
1812 mcp->mb[1] = fcport->loop_id;
1814 mcp->mb[1] = fcport->loop_id << 8;
1816 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1817 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1818 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1820 rval = qla2x00_mailbox_command(vha, mcp);
1822 /* Return mailbox statuses. */
1823 if (mb_ret != NULL) {
1824 mb_ret[0] = mcp->mb[0];
1825 mb_ret[1] = mcp->mb[1];
1826 mb_ret[6] = mcp->mb[6];
1827 mb_ret[7] = mcp->mb[7];
1830 if (rval != QLA_SUCCESS) {
1831 /* AV tmp code: need to change main mailbox_command function to
1832 * return ok even when the mailbox completion value is not
1833 * SUCCESS. The caller needs to be responsible to interpret
1834 * the return values of this mailbox command if we're not
1835 * to change too much of the existing code.
1837 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1840 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1841 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1842 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1845 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1852 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1853 uint8_t area, uint8_t al_pa)
1856 struct logio_entry_24xx *lg;
1858 struct qla_hw_data *ha = vha->hw;
1859 struct req_que *req;
1860 struct rsp_que *rsp;
1862 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1864 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1866 ql_log(ql_log_warn, vha, 0x106e,
1867 "Failed to allocate logout IOCB.\n");
1868 return QLA_MEMORY_ALLOC_FAILED;
1870 memset(lg, 0, sizeof(struct logio_entry_24xx));
1872 if (ql2xmaxqueues > 1)
1873 req = ha->req_q_map[0];
1877 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1878 lg->entry_count = 1;
1879 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1880 lg->nport_handle = cpu_to_le16(loop_id);
1882 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
1884 lg->port_id[0] = al_pa;
1885 lg->port_id[1] = area;
1886 lg->port_id[2] = domain;
1887 lg->vp_index = vha->vp_idx;
1889 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1890 if (rval != QLA_SUCCESS) {
1891 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1892 "Failed to issue logout IOCB (%x).\n", rval);
1893 } else if (lg->entry_status != 0) {
1894 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1895 "Failed to complete IOCB -- error status (%x).\n",
1897 rval = QLA_FUNCTION_FAILED;
1898 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1899 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1900 "Failed to complete IOCB -- completion status (%x) "
1901 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1902 le32_to_cpu(lg->io_parameter[0]),
1903 le32_to_cpu(lg->io_parameter[1]));
1906 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1909 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1915 * qla2x00_fabric_logout
1916 * Issue logout fabric port mailbox command.
1919 * ha = adapter block pointer.
1920 * loop_id = device loop ID.
1921 * TARGET_QUEUE_LOCK must be released.
1922 * ADAPTER_STATE_LOCK must be released.
1925 * qla2x00 local function return status code.
1931 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1932 uint8_t area, uint8_t al_pa)
1936 mbx_cmd_t *mcp = &mc;
1938 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1940 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1941 mcp->out_mb = MBX_1|MBX_0;
1942 if (HAS_EXTENDED_IDS(vha->hw)) {
1943 mcp->mb[1] = loop_id;
1945 mcp->out_mb |= MBX_10;
1947 mcp->mb[1] = loop_id << 8;
1950 mcp->in_mb = MBX_1|MBX_0;
1951 mcp->tov = MBX_TOV_SECONDS;
1953 rval = qla2x00_mailbox_command(vha, mcp);
1955 if (rval != QLA_SUCCESS) {
1957 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1958 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1961 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1968 * qla2x00_full_login_lip
1969 * Issue full login LIP mailbox command.
1972 * ha = adapter block pointer.
1973 * TARGET_QUEUE_LOCK must be released.
1974 * ADAPTER_STATE_LOCK must be released.
1977 * qla2x00 local function return status code.
1983 qla2x00_full_login_lip(scsi_qla_host_t *vha)
1987 mbx_cmd_t *mcp = &mc;
1989 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
1991 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1992 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1995 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1997 mcp->tov = MBX_TOV_SECONDS;
1999 rval = qla2x00_mailbox_command(vha, mcp);
2001 if (rval != QLA_SUCCESS) {
2003 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2006 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2013 * qla2x00_get_id_list
2016 * ha = adapter block pointer.
2019 * qla2x00 local function return status code.
2025 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2030 mbx_cmd_t *mcp = &mc;
2032 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2034 if (id_list == NULL)
2035 return QLA_FUNCTION_FAILED;
2037 mcp->mb[0] = MBC_GET_ID_LIST;
2038 mcp->out_mb = MBX_0;
2039 if (IS_FWI2_CAPABLE(vha->hw)) {
2040 mcp->mb[2] = MSW(id_list_dma);
2041 mcp->mb[3] = LSW(id_list_dma);
2042 mcp->mb[6] = MSW(MSD(id_list_dma));
2043 mcp->mb[7] = LSW(MSD(id_list_dma));
2045 mcp->mb[9] = vha->vp_idx;
2046 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2048 mcp->mb[1] = MSW(id_list_dma);
2049 mcp->mb[2] = LSW(id_list_dma);
2050 mcp->mb[3] = MSW(MSD(id_list_dma));
2051 mcp->mb[6] = LSW(MSD(id_list_dma));
2052 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2054 mcp->in_mb = MBX_1|MBX_0;
2055 mcp->tov = MBX_TOV_SECONDS;
2057 rval = qla2x00_mailbox_command(vha, mcp);
2059 if (rval != QLA_SUCCESS) {
2061 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2063 *entries = mcp->mb[1];
2064 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2071 * qla2x00_get_resource_cnts
2072 * Get current firmware resource counts.
2075 * ha = adapter block pointer.
2078 * qla2x00 local function return status code.
2084 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2085 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2086 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2090 mbx_cmd_t *mcp = &mc;
2092 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2094 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2095 mcp->out_mb = MBX_0;
2096 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2097 if (IS_QLA81XX(vha->hw))
2098 mcp->in_mb |= MBX_12;
2099 mcp->tov = MBX_TOV_SECONDS;
2101 rval = qla2x00_mailbox_command(vha, mcp);
2103 if (rval != QLA_SUCCESS) {
2105 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2106 "Failed mb[0]=%x.\n", mcp->mb[0]);
2108 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2109 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2110 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2111 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2112 mcp->mb[11], mcp->mb[12]);
2115 *cur_xchg_cnt = mcp->mb[3];
2117 *orig_xchg_cnt = mcp->mb[6];
2119 *cur_iocb_cnt = mcp->mb[7];
2121 *orig_iocb_cnt = mcp->mb[10];
2122 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2123 *max_npiv_vports = mcp->mb[11];
2124 if (IS_QLA81XX(vha->hw) && max_fcfs)
2125 *max_fcfs = mcp->mb[12];
2132 * qla2x00_get_fcal_position_map
2133 * Get FCAL (LILP) position map using mailbox command
2136 * ha = adapter state pointer.
2137 * pos_map = buffer pointer (can be NULL).
2140 * qla2x00 local function return status code.
2146 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2150 mbx_cmd_t *mcp = &mc;
2152 dma_addr_t pmap_dma;
2153 struct qla_hw_data *ha = vha->hw;
2155 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2157 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2159 ql_log(ql_log_warn, vha, 0x1080,
2160 "Memory alloc failed.\n");
2161 return QLA_MEMORY_ALLOC_FAILED;
2163 memset(pmap, 0, FCAL_MAP_SIZE);
2165 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2166 mcp->mb[2] = MSW(pmap_dma);
2167 mcp->mb[3] = LSW(pmap_dma);
2168 mcp->mb[6] = MSW(MSD(pmap_dma));
2169 mcp->mb[7] = LSW(MSD(pmap_dma));
2170 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2171 mcp->in_mb = MBX_1|MBX_0;
2172 mcp->buf_size = FCAL_MAP_SIZE;
2173 mcp->flags = MBX_DMA_IN;
2174 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2175 rval = qla2x00_mailbox_command(vha, mcp);
2177 if (rval == QLA_SUCCESS) {
2178 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2179 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2180 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2181 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2185 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2187 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2189 if (rval != QLA_SUCCESS) {
2190 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2192 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2199 * qla2x00_get_link_status
2202 * ha = adapter block pointer.
2203 * loop_id = device loop ID.
2204 * ret_buf = pointer to link status return buffer.
2208 * BIT_0 = mem alloc error.
2209 * BIT_1 = mailbox error.
2212 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2213 struct link_statistics *stats, dma_addr_t stats_dma)
2217 mbx_cmd_t *mcp = &mc;
2218 uint32_t *siter, *diter, dwords;
2219 struct qla_hw_data *ha = vha->hw;
2221 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2223 mcp->mb[0] = MBC_GET_LINK_STATUS;
2224 mcp->mb[2] = MSW(stats_dma);
2225 mcp->mb[3] = LSW(stats_dma);
2226 mcp->mb[6] = MSW(MSD(stats_dma));
2227 mcp->mb[7] = LSW(MSD(stats_dma));
2228 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2230 if (IS_FWI2_CAPABLE(ha)) {
2231 mcp->mb[1] = loop_id;
2234 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2235 mcp->in_mb |= MBX_1;
2236 } else if (HAS_EXTENDED_IDS(ha)) {
2237 mcp->mb[1] = loop_id;
2239 mcp->out_mb |= MBX_10|MBX_1;
2241 mcp->mb[1] = loop_id << 8;
2242 mcp->out_mb |= MBX_1;
2244 mcp->tov = MBX_TOV_SECONDS;
2245 mcp->flags = IOCTL_CMD;
2246 rval = qla2x00_mailbox_command(vha, mcp);
2248 if (rval == QLA_SUCCESS) {
2249 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2250 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2251 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2252 rval = QLA_FUNCTION_FAILED;
2254 /* Copy over data -- firmware data is LE. */
2255 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2256 dwords = offsetof(struct link_statistics, unused1) / 4;
2257 siter = diter = &stats->link_fail_cnt;
2259 *diter++ = le32_to_cpu(*siter++);
2263 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2270 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2271 dma_addr_t stats_dma)
2275 mbx_cmd_t *mcp = &mc;
2276 uint32_t *siter, *diter, dwords;
2278 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2280 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2281 mcp->mb[2] = MSW(stats_dma);
2282 mcp->mb[3] = LSW(stats_dma);
2283 mcp->mb[6] = MSW(MSD(stats_dma));
2284 mcp->mb[7] = LSW(MSD(stats_dma));
2285 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2286 mcp->mb[9] = vha->vp_idx;
2288 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2289 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2290 mcp->tov = MBX_TOV_SECONDS;
2291 mcp->flags = IOCTL_CMD;
2292 rval = qla2x00_mailbox_command(vha, mcp);
2294 if (rval == QLA_SUCCESS) {
2295 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2296 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2297 "Failed mb[0]=%x.\n", mcp->mb[0]);
2298 rval = QLA_FUNCTION_FAILED;
2300 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2301 /* Copy over data -- firmware data is LE. */
2302 dwords = sizeof(struct link_statistics) / 4;
2303 siter = diter = &stats->link_fail_cnt;
2305 *diter++ = le32_to_cpu(*siter++);
2309 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2316 qla24xx_abort_command(srb_t *sp)
2319 unsigned long flags = 0;
2321 struct abort_entry_24xx *abt;
2324 fc_port_t *fcport = sp->fcport;
2325 struct scsi_qla_host *vha = fcport->vha;
2326 struct qla_hw_data *ha = vha->hw;
2327 struct req_que *req = vha->req;
2329 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2331 spin_lock_irqsave(&ha->hardware_lock, flags);
2332 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2333 if (req->outstanding_cmds[handle] == sp)
2336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2337 if (handle == MAX_OUTSTANDING_COMMANDS) {
2338 /* Command not found. */
2339 return QLA_FUNCTION_FAILED;
2342 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2344 ql_log(ql_log_warn, vha, 0x108d,
2345 "Failed to allocate abort IOCB.\n");
2346 return QLA_MEMORY_ALLOC_FAILED;
2348 memset(abt, 0, sizeof(struct abort_entry_24xx));
2350 abt->entry_type = ABORT_IOCB_TYPE;
2351 abt->entry_count = 1;
2352 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2353 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2354 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2355 abt->port_id[0] = fcport->d_id.b.al_pa;
2356 abt->port_id[1] = fcport->d_id.b.area;
2357 abt->port_id[2] = fcport->d_id.b.domain;
2358 abt->vp_index = fcport->vp_idx;
2360 abt->req_que_no = cpu_to_le16(req->id);
2362 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2363 if (rval != QLA_SUCCESS) {
2364 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2365 "Failed to issue IOCB (%x).\n", rval);
2366 } else if (abt->entry_status != 0) {
2367 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2368 "Failed to complete IOCB -- error status (%x).\n",
2370 rval = QLA_FUNCTION_FAILED;
2371 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2372 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2373 "Failed to complete IOCB -- completion status (%x).\n",
2374 le16_to_cpu(abt->nport_handle));
2375 rval = QLA_FUNCTION_FAILED;
2377 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2380 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2385 struct tsk_mgmt_cmd {
2387 struct tsk_mgmt_entry tsk;
2388 struct sts_entry_24xx sts;
2393 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2394 unsigned int l, int tag)
2397 struct tsk_mgmt_cmd *tsk;
2398 struct sts_entry_24xx *sts;
2400 scsi_qla_host_t *vha;
2401 struct qla_hw_data *ha;
2402 struct req_que *req;
2403 struct rsp_que *rsp;
2409 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2411 if (ha->flags.cpu_affinity_enabled)
2412 rsp = ha->rsp_q_map[tag + 1];
2415 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2417 ql_log(ql_log_warn, vha, 0x1093,
2418 "Failed to allocate task management IOCB.\n");
2419 return QLA_MEMORY_ALLOC_FAILED;
2421 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2423 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2424 tsk->p.tsk.entry_count = 1;
2425 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2426 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2427 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2428 tsk->p.tsk.control_flags = cpu_to_le32(type);
2429 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2430 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2431 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2432 tsk->p.tsk.vp_index = fcport->vp_idx;
2433 if (type == TCF_LUN_RESET) {
2434 int_to_scsilun(l, &tsk->p.tsk.lun);
2435 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2436 sizeof(tsk->p.tsk.lun));
2440 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2441 if (rval != QLA_SUCCESS) {
2442 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2443 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2444 } else if (sts->entry_status != 0) {
2445 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2446 "Failed to complete IOCB -- error status (%x).\n",
2448 rval = QLA_FUNCTION_FAILED;
2449 } else if (sts->comp_status !=
2450 __constant_cpu_to_le16(CS_COMPLETE)) {
2451 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2452 "Failed to complete IOCB -- completion status (%x).\n",
2453 le16_to_cpu(sts->comp_status));
2454 rval = QLA_FUNCTION_FAILED;
2455 } else if (le16_to_cpu(sts->scsi_status) &
2456 SS_RESPONSE_INFO_LEN_VALID) {
2457 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2458 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2459 "Ignoring inconsistent data length -- not enough "
2460 "response info (%d).\n",
2461 le32_to_cpu(sts->rsp_data_len));
2462 } else if (sts->data[3]) {
2463 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2464 "Failed to complete IOCB -- response (%x).\n",
2466 rval = QLA_FUNCTION_FAILED;
2470 /* Issue marker IOCB. */
2471 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2472 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2473 if (rval2 != QLA_SUCCESS) {
2474 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2475 "Failed to issue marker IOCB (%x).\n", rval2);
2477 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2480 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2486 qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2488 struct qla_hw_data *ha = fcport->vha->hw;
2490 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2491 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2493 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2497 qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2499 struct qla_hw_data *ha = fcport->vha->hw;
2501 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2502 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2504 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2508 qla2x00_system_error(scsi_qla_host_t *vha)
2512 mbx_cmd_t *mcp = &mc;
2513 struct qla_hw_data *ha = vha->hw;
2515 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2516 return QLA_FUNCTION_FAILED;
2518 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2520 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2521 mcp->out_mb = MBX_0;
2525 rval = qla2x00_mailbox_command(vha, mcp);
2527 if (rval != QLA_SUCCESS) {
2528 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2530 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2537 * qla2x00_set_serdes_params() -
2543 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2544 uint16_t sw_em_2g, uint16_t sw_em_4g)
2548 mbx_cmd_t *mcp = &mc;
2550 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2552 mcp->mb[0] = MBC_SERDES_PARAMS;
2554 mcp->mb[2] = sw_em_1g | BIT_15;
2555 mcp->mb[3] = sw_em_2g | BIT_15;
2556 mcp->mb[4] = sw_em_4g | BIT_15;
2557 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2559 mcp->tov = MBX_TOV_SECONDS;
2561 rval = qla2x00_mailbox_command(vha, mcp);
2563 if (rval != QLA_SUCCESS) {
2565 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2566 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2569 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2576 qla2x00_stop_firmware(scsi_qla_host_t *vha)
2580 mbx_cmd_t *mcp = &mc;
2582 if (!IS_FWI2_CAPABLE(vha->hw))
2583 return QLA_FUNCTION_FAILED;
2585 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2587 mcp->mb[0] = MBC_STOP_FIRMWARE;
2589 mcp->out_mb = MBX_1|MBX_0;
2593 rval = qla2x00_mailbox_command(vha, mcp);
2595 if (rval != QLA_SUCCESS) {
2596 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2597 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2598 rval = QLA_INVALID_COMMAND;
2600 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2607 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2612 mbx_cmd_t *mcp = &mc;
2614 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2616 if (!IS_FWI2_CAPABLE(vha->hw))
2617 return QLA_FUNCTION_FAILED;
2619 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2620 return QLA_FUNCTION_FAILED;
2622 mcp->mb[0] = MBC_TRACE_CONTROL;
2623 mcp->mb[1] = TC_EFT_ENABLE;
2624 mcp->mb[2] = LSW(eft_dma);
2625 mcp->mb[3] = MSW(eft_dma);
2626 mcp->mb[4] = LSW(MSD(eft_dma));
2627 mcp->mb[5] = MSW(MSD(eft_dma));
2628 mcp->mb[6] = buffers;
2629 mcp->mb[7] = TC_AEN_DISABLE;
2630 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2631 mcp->in_mb = MBX_1|MBX_0;
2632 mcp->tov = MBX_TOV_SECONDS;
2634 rval = qla2x00_mailbox_command(vha, mcp);
2635 if (rval != QLA_SUCCESS) {
2636 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2637 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2638 rval, mcp->mb[0], mcp->mb[1]);
2640 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2647 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2651 mbx_cmd_t *mcp = &mc;
2653 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2655 if (!IS_FWI2_CAPABLE(vha->hw))
2656 return QLA_FUNCTION_FAILED;
2658 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2659 return QLA_FUNCTION_FAILED;
2661 mcp->mb[0] = MBC_TRACE_CONTROL;
2662 mcp->mb[1] = TC_EFT_DISABLE;
2663 mcp->out_mb = MBX_1|MBX_0;
2664 mcp->in_mb = MBX_1|MBX_0;
2665 mcp->tov = MBX_TOV_SECONDS;
2667 rval = qla2x00_mailbox_command(vha, mcp);
2668 if (rval != QLA_SUCCESS) {
2669 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2670 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2671 rval, mcp->mb[0], mcp->mb[1]);
2673 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2680 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2681 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2685 mbx_cmd_t *mcp = &mc;
2687 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2689 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2690 return QLA_FUNCTION_FAILED;
2692 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2693 return QLA_FUNCTION_FAILED;
2695 mcp->mb[0] = MBC_TRACE_CONTROL;
2696 mcp->mb[1] = TC_FCE_ENABLE;
2697 mcp->mb[2] = LSW(fce_dma);
2698 mcp->mb[3] = MSW(fce_dma);
2699 mcp->mb[4] = LSW(MSD(fce_dma));
2700 mcp->mb[5] = MSW(MSD(fce_dma));
2701 mcp->mb[6] = buffers;
2702 mcp->mb[7] = TC_AEN_DISABLE;
2704 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2705 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2706 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2708 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2709 mcp->tov = MBX_TOV_SECONDS;
2711 rval = qla2x00_mailbox_command(vha, mcp);
2712 if (rval != QLA_SUCCESS) {
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2714 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2715 rval, mcp->mb[0], mcp->mb[1]);
2717 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2720 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2729 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2733 mbx_cmd_t *mcp = &mc;
2735 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2737 if (!IS_FWI2_CAPABLE(vha->hw))
2738 return QLA_FUNCTION_FAILED;
2740 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2741 return QLA_FUNCTION_FAILED;
2743 mcp->mb[0] = MBC_TRACE_CONTROL;
2744 mcp->mb[1] = TC_FCE_DISABLE;
2745 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
2746 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2747 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2749 mcp->tov = MBX_TOV_SECONDS;
2751 rval = qla2x00_mailbox_command(vha, mcp);
2752 if (rval != QLA_SUCCESS) {
2753 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2754 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2755 rval, mcp->mb[0], mcp->mb[1]);
2757 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2760 *wr = (uint64_t) mcp->mb[5] << 48 |
2761 (uint64_t) mcp->mb[4] << 32 |
2762 (uint64_t) mcp->mb[3] << 16 |
2763 (uint64_t) mcp->mb[2];
2765 *rd = (uint64_t) mcp->mb[9] << 48 |
2766 (uint64_t) mcp->mb[8] << 32 |
2767 (uint64_t) mcp->mb[7] << 16 |
2768 (uint64_t) mcp->mb[6];
2775 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2776 uint16_t *port_speed, uint16_t *mb)
2780 mbx_cmd_t *mcp = &mc;
2782 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2784 if (!IS_IIDMA_CAPABLE(vha->hw))
2785 return QLA_FUNCTION_FAILED;
2787 mcp->mb[0] = MBC_PORT_PARAMS;
2788 mcp->mb[1] = loop_id;
2789 mcp->mb[2] = mcp->mb[3] = 0;
2790 mcp->mb[9] = vha->vp_idx;
2791 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2792 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2793 mcp->tov = MBX_TOV_SECONDS;
2795 rval = qla2x00_mailbox_command(vha, mcp);
2797 /* Return mailbox statuses. */
2804 if (rval != QLA_SUCCESS) {
2805 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2807 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2809 *port_speed = mcp->mb[3];
2816 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2817 uint16_t port_speed, uint16_t *mb)
2821 mbx_cmd_t *mcp = &mc;
2823 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2825 if (!IS_IIDMA_CAPABLE(vha->hw))
2826 return QLA_FUNCTION_FAILED;
2828 mcp->mb[0] = MBC_PORT_PARAMS;
2829 mcp->mb[1] = loop_id;
2831 if (IS_QLA8XXX_TYPE(vha->hw))
2832 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2834 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2835 mcp->mb[9] = vha->vp_idx;
2836 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2837 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2838 mcp->tov = MBX_TOV_SECONDS;
2840 rval = qla2x00_mailbox_command(vha, mcp);
2842 /* Return mailbox statuses. */
2849 if (rval != QLA_SUCCESS) {
2850 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2852 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2859 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2860 struct vp_rpt_id_entry_24xx *rptid_entry)
2863 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2864 struct qla_hw_data *ha = vha->hw;
2865 scsi_qla_host_t *vp;
2866 unsigned long flags;
2868 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2870 if (rptid_entry->entry_status != 0)
2873 if (rptid_entry->format == 0) {
2874 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2875 "Format 0 : Number of VPs setup %d, number of "
2876 "VPs acquired %d.\n",
2877 MSB(le16_to_cpu(rptid_entry->vp_count)),
2878 LSB(le16_to_cpu(rptid_entry->vp_count)));
2879 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2880 "Primary port id %02x%02x%02x.\n",
2881 rptid_entry->port_id[2], rptid_entry->port_id[1],
2882 rptid_entry->port_id[0]);
2883 } else if (rptid_entry->format == 1) {
2885 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2886 "Format 1: VP[%d] enabled - status %d - with "
2887 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2888 rptid_entry->port_id[2], rptid_entry->port_id[1],
2889 rptid_entry->port_id[0]);
2892 if (vp_idx == 0 && (MSB(stat) != 1))
2895 if (MSB(stat) != 0) {
2896 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2897 "Could not acquire ID for VP[%d].\n", vp_idx);
2901 spin_lock_irqsave(&ha->vport_slock, flags);
2902 list_for_each_entry(vp, &ha->vp_list, list)
2903 if (vp_idx == vp->vp_idx)
2905 spin_unlock_irqrestore(&ha->vport_slock, flags);
2910 vp->d_id.b.domain = rptid_entry->port_id[2];
2911 vp->d_id.b.area = rptid_entry->port_id[1];
2912 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2915 * Cannot configure here as we are still sitting on the
2916 * response queue. Handle it in dpc context.
2918 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2921 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
2922 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
2923 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2924 qla2xxx_wake_dpc(vha);
2929 * qla24xx_modify_vp_config
2930 * Change VP configuration for vha
2933 * vha = adapter block pointer.
2936 * qla2xxx local function return status code.
2942 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2945 struct vp_config_entry_24xx *vpmod;
2946 dma_addr_t vpmod_dma;
2947 struct qla_hw_data *ha = vha->hw;
2948 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2950 /* This can be called by the parent */
2952 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2954 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2956 ql_log(ql_log_warn, vha, 0x10bc,
2957 "Failed to allocate modify VP IOCB.\n");
2958 return QLA_MEMORY_ALLOC_FAILED;
2961 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
2962 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
2963 vpmod->entry_count = 1;
2964 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
2965 vpmod->vp_count = 1;
2966 vpmod->vp_index1 = vha->vp_idx;
2967 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
2968 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
2969 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2970 vpmod->entry_count = 1;
2972 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2973 if (rval != QLA_SUCCESS) {
2974 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2975 "Failed to issue VP config IOCB (%x).\n", rval);
2976 } else if (vpmod->comp_status != 0) {
2977 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2978 "Failed to complete IOCB -- error status (%x).\n",
2979 vpmod->comp_status);
2980 rval = QLA_FUNCTION_FAILED;
2981 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2982 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2983 "Failed to complete IOCB -- completion status (%x).\n",
2984 le16_to_cpu(vpmod->comp_status));
2985 rval = QLA_FUNCTION_FAILED;
2988 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
2989 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2991 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2997 * qla24xx_control_vp
2998 * Enable a virtual port for given host
3001 * ha = adapter block pointer.
3002 * vhba = virtual adapter (unused)
3003 * index = index number for enabled VP
3006 * qla2xxx local function return status code.
3012 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3016 struct vp_ctrl_entry_24xx *vce;
3018 struct qla_hw_data *ha = vha->hw;
3019 int vp_index = vha->vp_idx;
3020 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3022 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3023 "Entered %s enabling index %d.\n", __func__, vp_index);
3025 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3026 return QLA_PARAMETER_ERROR;
3028 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3030 ql_log(ql_log_warn, vha, 0x10c2,
3031 "Failed to allocate VP control IOCB.\n");
3032 return QLA_MEMORY_ALLOC_FAILED;
3034 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3036 vce->entry_type = VP_CTRL_IOCB_TYPE;
3037 vce->entry_count = 1;
3038 vce->command = cpu_to_le16(cmd);
3039 vce->vp_count = __constant_cpu_to_le16(1);
3041 /* index map in firmware starts with 1; decrement index
3042 * this is ok as we never use index 0
3044 map = (vp_index - 1) / 8;
3045 pos = (vp_index - 1) & 7;
3046 mutex_lock(&ha->vport_lock);
3047 vce->vp_idx_map[map] |= 1 << pos;
3048 mutex_unlock(&ha->vport_lock);
3050 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3051 if (rval != QLA_SUCCESS) {
3052 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3053 "Failed to issue VP control IOCB (%x).\n", rval);
3054 } else if (vce->entry_status != 0) {
3055 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3056 "Failed to complete IOCB -- error status (%x).\n",
3058 rval = QLA_FUNCTION_FAILED;
3059 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3060 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3061 "Failed to complet IOCB -- completion status (%x).\n",
3062 le16_to_cpu(vce->comp_status));
3063 rval = QLA_FUNCTION_FAILED;
3065 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3068 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3074 * qla2x00_send_change_request
3075 * Receive or disable RSCN request from fabric controller
3078 * ha = adapter block pointer
3079 * format = registration format:
3081 * 1 - Fabric detected registration
3082 * 2 - N_port detected registration
3083 * 3 - Full registration
3084 * FF - clear registration
3085 * vp_idx = Virtual port index
3088 * qla2x00 local function return status code.
3095 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3100 mbx_cmd_t *mcp = &mc;
3102 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3105 * This command is implicitly executed by firmware during login for the
3109 return QLA_FUNCTION_FAILED;
3111 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3112 mcp->mb[1] = format;
3113 mcp->mb[9] = vp_idx;
3114 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3115 mcp->in_mb = MBX_0|MBX_1;
3116 mcp->tov = MBX_TOV_SECONDS;
3118 rval = qla2x00_mailbox_command(vha, mcp);
3120 if (rval == QLA_SUCCESS) {
3121 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3131 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3136 mbx_cmd_t *mcp = &mc;
3138 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3140 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3141 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3142 mcp->mb[8] = MSW(addr);
3143 mcp->out_mb = MBX_8|MBX_0;
3145 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3146 mcp->out_mb = MBX_0;
3148 mcp->mb[1] = LSW(addr);
3149 mcp->mb[2] = MSW(req_dma);
3150 mcp->mb[3] = LSW(req_dma);
3151 mcp->mb[6] = MSW(MSD(req_dma));
3152 mcp->mb[7] = LSW(MSD(req_dma));
3153 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3154 if (IS_FWI2_CAPABLE(vha->hw)) {
3155 mcp->mb[4] = MSW(size);
3156 mcp->mb[5] = LSW(size);
3157 mcp->out_mb |= MBX_5|MBX_4;
3159 mcp->mb[4] = LSW(size);
3160 mcp->out_mb |= MBX_4;
3164 mcp->tov = MBX_TOV_SECONDS;
3166 rval = qla2x00_mailbox_command(vha, mcp);
3168 if (rval != QLA_SUCCESS) {
3169 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3170 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3172 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3178 /* 84XX Support **************************************************************/
3180 struct cs84xx_mgmt_cmd {
3182 struct verify_chip_entry_84xx req;
3183 struct verify_chip_rsp_84xx rsp;
3188 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3191 struct cs84xx_mgmt_cmd *mn;
3194 unsigned long flags;
3195 struct qla_hw_data *ha = vha->hw;
3197 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3199 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3201 return QLA_MEMORY_ALLOC_FAILED;
3205 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3206 /* Diagnostic firmware? */
3207 /* options |= MENLO_DIAG_FW; */
3208 /* We update the firmware with only one data sequence. */
3209 options |= VCO_END_OF_DATA;
3213 memset(mn, 0, sizeof(*mn));
3214 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3215 mn->p.req.entry_count = 1;
3216 mn->p.req.options = cpu_to_le16(options);
3218 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3219 "Dump of Verify Request.\n");
3220 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3221 (uint8_t *)mn, sizeof(*mn));
3223 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3224 if (rval != QLA_SUCCESS) {
3225 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3226 "Failed to issue verify IOCB (%x).\n", rval);
3230 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3231 "Dump of Verify Response.\n");
3232 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3233 (uint8_t *)mn, sizeof(*mn));
3235 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3236 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3237 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3238 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3239 "cs=%x fc=%x.\n", status[0], status[1]);
3241 if (status[0] != CS_COMPLETE) {
3242 rval = QLA_FUNCTION_FAILED;
3243 if (!(options & VCO_DONT_UPDATE_FW)) {
3244 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3245 "Firmware update failed. Retrying "
3246 "without update firmware.\n");
3247 options |= VCO_DONT_UPDATE_FW;
3248 options &= ~VCO_FORCE_UPDATE;
3252 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3253 "Firmware updated to %x.\n",
3254 le32_to_cpu(mn->p.rsp.fw_ver));
3256 /* NOTE: we only update OP firmware. */
3257 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3258 ha->cs84xx->op_fw_version =
3259 le32_to_cpu(mn->p.rsp.fw_ver);
3260 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3266 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3268 if (rval != QLA_SUCCESS) {
3269 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3271 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3278 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3281 unsigned long flags;
3283 mbx_cmd_t *mcp = &mc;
3284 struct device_reg_25xxmq __iomem *reg;
3285 struct qla_hw_data *ha = vha->hw;
3287 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3289 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3290 mcp->mb[1] = req->options;
3291 mcp->mb[2] = MSW(LSD(req->dma));
3292 mcp->mb[3] = LSW(LSD(req->dma));
3293 mcp->mb[6] = MSW(MSD(req->dma));
3294 mcp->mb[7] = LSW(MSD(req->dma));
3295 mcp->mb[5] = req->length;
3297 mcp->mb[10] = req->rsp->id;
3298 mcp->mb[12] = req->qos;
3299 mcp->mb[11] = req->vp_idx;
3300 mcp->mb[13] = req->rid;
3302 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3303 QLA_QUE_PAGE * req->id);
3305 mcp->mb[4] = req->id;
3306 /* que in ptr index */
3308 /* que out ptr index */
3310 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3311 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3313 mcp->flags = MBX_DMA_OUT;
3316 spin_lock_irqsave(&ha->hardware_lock, flags);
3317 if (!(req->options & BIT_0)) {
3318 WRT_REG_DWORD(®->req_q_in, 0);
3319 WRT_REG_DWORD(®->req_q_out, 0);
3321 req->req_q_in = ®->req_q_in;
3322 req->req_q_out = ®->req_q_out;
3323 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3325 rval = qla2x00_mailbox_command(vha, mcp);
3326 if (rval != QLA_SUCCESS) {
3327 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3328 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3330 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3337 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3340 unsigned long flags;
3342 mbx_cmd_t *mcp = &mc;
3343 struct device_reg_25xxmq __iomem *reg;
3344 struct qla_hw_data *ha = vha->hw;
3346 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3348 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3349 mcp->mb[1] = rsp->options;
3350 mcp->mb[2] = MSW(LSD(rsp->dma));
3351 mcp->mb[3] = LSW(LSD(rsp->dma));
3352 mcp->mb[6] = MSW(MSD(rsp->dma));
3353 mcp->mb[7] = LSW(MSD(rsp->dma));
3354 mcp->mb[5] = rsp->length;
3355 mcp->mb[14] = rsp->msix->entry;
3356 mcp->mb[13] = rsp->rid;
3358 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3359 QLA_QUE_PAGE * rsp->id);
3361 mcp->mb[4] = rsp->id;
3362 /* que in ptr index */
3364 /* que out ptr index */
3366 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3367 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3369 mcp->flags = MBX_DMA_OUT;
3372 spin_lock_irqsave(&ha->hardware_lock, flags);
3373 if (!(rsp->options & BIT_0)) {
3374 WRT_REG_DWORD(®->rsp_q_out, 0);
3375 WRT_REG_DWORD(®->rsp_q_in, 0);
3378 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3380 rval = qla2x00_mailbox_command(vha, mcp);
3381 if (rval != QLA_SUCCESS) {
3382 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3383 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3385 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3392 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3396 mbx_cmd_t *mcp = &mc;
3398 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3400 mcp->mb[0] = MBC_IDC_ACK;
3401 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3402 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3404 mcp->tov = MBX_TOV_SECONDS;
3406 rval = qla2x00_mailbox_command(vha, mcp);
3408 if (rval != QLA_SUCCESS) {
3409 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3410 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3412 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3419 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3423 mbx_cmd_t *mcp = &mc;
3425 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3427 if (!IS_QLA81XX(vha->hw))
3428 return QLA_FUNCTION_FAILED;
3430 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3431 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3432 mcp->out_mb = MBX_1|MBX_0;
3433 mcp->in_mb = MBX_1|MBX_0;
3434 mcp->tov = MBX_TOV_SECONDS;
3436 rval = qla2x00_mailbox_command(vha, mcp);
3438 if (rval != QLA_SUCCESS) {
3439 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3440 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3441 rval, mcp->mb[0], mcp->mb[1]);
3443 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3444 *sector_size = mcp->mb[1];
3451 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3455 mbx_cmd_t *mcp = &mc;
3457 if (!IS_QLA81XX(vha->hw))
3458 return QLA_FUNCTION_FAILED;
3460 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3462 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3463 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
3464 FAC_OPT_CMD_WRITE_PROTECT;
3465 mcp->out_mb = MBX_1|MBX_0;
3466 mcp->in_mb = MBX_1|MBX_0;
3467 mcp->tov = MBX_TOV_SECONDS;
3469 rval = qla2x00_mailbox_command(vha, mcp);
3471 if (rval != QLA_SUCCESS) {
3472 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3473 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3474 rval, mcp->mb[0], mcp->mb[1]);
3476 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3483 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3487 mbx_cmd_t *mcp = &mc;
3489 if (!IS_QLA81XX(vha->hw))
3490 return QLA_FUNCTION_FAILED;
3492 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3494 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3495 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
3496 mcp->mb[2] = LSW(start);
3497 mcp->mb[3] = MSW(start);
3498 mcp->mb[4] = LSW(finish);
3499 mcp->mb[5] = MSW(finish);
3500 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3501 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3502 mcp->tov = MBX_TOV_SECONDS;
3504 rval = qla2x00_mailbox_command(vha, mcp);
3506 if (rval != QLA_SUCCESS) {
3507 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3508 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3509 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3511 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3518 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3522 mbx_cmd_t *mcp = &mc;
3524 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3526 mcp->mb[0] = MBC_RESTART_MPI_FW;
3527 mcp->out_mb = MBX_0;
3528 mcp->in_mb = MBX_0|MBX_1;
3529 mcp->tov = MBX_TOV_SECONDS;
3531 rval = qla2x00_mailbox_command(vha, mcp);
3533 if (rval != QLA_SUCCESS) {
3534 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3535 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3536 rval, mcp->mb[0], mcp->mb[1]);
3538 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3545 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3546 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3550 mbx_cmd_t *mcp = &mc;
3551 struct qla_hw_data *ha = vha->hw;
3553 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3555 if (!IS_FWI2_CAPABLE(ha))
3556 return QLA_FUNCTION_FAILED;
3561 mcp->mb[0] = MBC_READ_SFP;
3563 mcp->mb[2] = MSW(sfp_dma);
3564 mcp->mb[3] = LSW(sfp_dma);
3565 mcp->mb[6] = MSW(MSD(sfp_dma));
3566 mcp->mb[7] = LSW(MSD(sfp_dma));
3570 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3571 mcp->in_mb = MBX_1|MBX_0;
3572 mcp->tov = MBX_TOV_SECONDS;
3574 rval = qla2x00_mailbox_command(vha, mcp);
3579 if (rval != QLA_SUCCESS) {
3580 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3581 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3583 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3590 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3591 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3595 mbx_cmd_t *mcp = &mc;
3596 struct qla_hw_data *ha = vha->hw;
3598 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3600 if (!IS_FWI2_CAPABLE(ha))
3601 return QLA_FUNCTION_FAILED;
3609 mcp->mb[0] = MBC_WRITE_SFP;
3611 mcp->mb[2] = MSW(sfp_dma);
3612 mcp->mb[3] = LSW(sfp_dma);
3613 mcp->mb[6] = MSW(MSD(sfp_dma));
3614 mcp->mb[7] = LSW(MSD(sfp_dma));
3618 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3619 mcp->in_mb = MBX_1|MBX_0;
3620 mcp->tov = MBX_TOV_SECONDS;
3622 rval = qla2x00_mailbox_command(vha, mcp);
3624 if (rval != QLA_SUCCESS) {
3625 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3626 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3628 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3635 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3636 uint16_t size_in_bytes, uint16_t *actual_size)
3640 mbx_cmd_t *mcp = &mc;
3642 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3644 if (!IS_QLA8XXX_TYPE(vha->hw))
3645 return QLA_FUNCTION_FAILED;
3647 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3648 mcp->mb[2] = MSW(stats_dma);
3649 mcp->mb[3] = LSW(stats_dma);
3650 mcp->mb[6] = MSW(MSD(stats_dma));
3651 mcp->mb[7] = LSW(MSD(stats_dma));
3652 mcp->mb[8] = size_in_bytes >> 2;
3653 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3654 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3655 mcp->tov = MBX_TOV_SECONDS;
3657 rval = qla2x00_mailbox_command(vha, mcp);
3659 if (rval != QLA_SUCCESS) {
3660 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3661 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3662 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3664 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3667 *actual_size = mcp->mb[2] << 2;
3674 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3679 mbx_cmd_t *mcp = &mc;
3681 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3683 if (!IS_QLA8XXX_TYPE(vha->hw))
3684 return QLA_FUNCTION_FAILED;
3686 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3688 mcp->mb[2] = MSW(tlv_dma);
3689 mcp->mb[3] = LSW(tlv_dma);
3690 mcp->mb[6] = MSW(MSD(tlv_dma));
3691 mcp->mb[7] = LSW(MSD(tlv_dma));
3693 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3694 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3695 mcp->tov = MBX_TOV_SECONDS;
3697 rval = qla2x00_mailbox_command(vha, mcp);
3699 if (rval != QLA_SUCCESS) {
3700 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3701 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3702 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3704 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3711 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3715 mbx_cmd_t *mcp = &mc;
3717 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3719 if (!IS_FWI2_CAPABLE(vha->hw))
3720 return QLA_FUNCTION_FAILED;
3722 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3723 mcp->mb[1] = LSW(risc_addr);
3724 mcp->mb[8] = MSW(risc_addr);
3725 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3726 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3729 rval = qla2x00_mailbox_command(vha, mcp);
3730 if (rval != QLA_SUCCESS) {
3731 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3732 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3734 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3735 *data = mcp->mb[3] << 16 | mcp->mb[2];
3742 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3747 mbx_cmd_t *mcp = &mc;
3748 uint32_t iter_cnt = 0x1;
3750 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3752 memset(mcp->mb, 0 , sizeof(mcp->mb));
3753 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3754 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3756 /* transfer count */
3757 mcp->mb[10] = LSW(mreq->transfer_size);
3758 mcp->mb[11] = MSW(mreq->transfer_size);
3760 /* send data address */
3761 mcp->mb[14] = LSW(mreq->send_dma);
3762 mcp->mb[15] = MSW(mreq->send_dma);
3763 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3764 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3766 /* receive data address */
3767 mcp->mb[16] = LSW(mreq->rcv_dma);
3768 mcp->mb[17] = MSW(mreq->rcv_dma);
3769 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3770 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3772 /* Iteration count */
3773 mcp->mb[18] = LSW(iter_cnt);
3774 mcp->mb[19] = MSW(iter_cnt);
3776 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3777 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3778 if (IS_QLA8XXX_TYPE(vha->hw))
3779 mcp->out_mb |= MBX_2;
3780 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3782 mcp->buf_size = mreq->transfer_size;
3783 mcp->tov = MBX_TOV_SECONDS;
3784 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3786 rval = qla2x00_mailbox_command(vha, mcp);
3788 if (rval != QLA_SUCCESS) {
3789 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3790 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3791 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3792 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3794 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3797 /* Copy mailbox information */
3798 memcpy( mresp, mcp->mb, 64);
3803 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3808 mbx_cmd_t *mcp = &mc;
3809 struct qla_hw_data *ha = vha->hw;
3811 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3813 memset(mcp->mb, 0 , sizeof(mcp->mb));
3814 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3815 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3816 if (IS_QLA8XXX_TYPE(ha)) {
3817 mcp->mb[1] |= BIT_15;
3818 mcp->mb[2] = vha->fcoe_fcf_idx;
3820 mcp->mb[16] = LSW(mreq->rcv_dma);
3821 mcp->mb[17] = MSW(mreq->rcv_dma);
3822 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3823 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3825 mcp->mb[10] = LSW(mreq->transfer_size);
3827 mcp->mb[14] = LSW(mreq->send_dma);
3828 mcp->mb[15] = MSW(mreq->send_dma);
3829 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3830 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3832 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3833 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3834 if (IS_QLA8XXX_TYPE(ha))
3835 mcp->out_mb |= MBX_2;
3838 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
3839 mcp->in_mb |= MBX_1;
3840 if (IS_QLA8XXX_TYPE(ha))
3841 mcp->in_mb |= MBX_3;
3843 mcp->tov = MBX_TOV_SECONDS;
3844 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3845 mcp->buf_size = mreq->transfer_size;
3847 rval = qla2x00_mailbox_command(vha, mcp);
3849 if (rval != QLA_SUCCESS) {
3850 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3851 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3852 rval, mcp->mb[0], mcp->mb[1]);
3854 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3857 /* Copy mailbox information */
3858 memcpy(mresp, mcp->mb, 64);
3863 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3867 mbx_cmd_t *mcp = &mc;
3869 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3870 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3872 mcp->mb[0] = MBC_ISP84XX_RESET;
3873 mcp->mb[1] = enable_diagnostic;
3874 mcp->out_mb = MBX_1|MBX_0;
3875 mcp->in_mb = MBX_1|MBX_0;
3876 mcp->tov = MBX_TOV_SECONDS;
3877 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3878 rval = qla2x00_mailbox_command(vha, mcp);
3880 if (rval != QLA_SUCCESS)
3881 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3883 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3889 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3893 mbx_cmd_t *mcp = &mc;
3895 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3897 if (!IS_FWI2_CAPABLE(vha->hw))
3898 return QLA_FUNCTION_FAILED;
3900 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3901 mcp->mb[1] = LSW(risc_addr);
3902 mcp->mb[2] = LSW(data);
3903 mcp->mb[3] = MSW(data);
3904 mcp->mb[8] = MSW(risc_addr);
3905 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3909 rval = qla2x00_mailbox_command(vha, mcp);
3910 if (rval != QLA_SUCCESS) {
3911 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3912 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3914 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3921 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3924 uint32_t stat, timer;
3926 struct qla_hw_data *ha = vha->hw;
3927 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3931 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3933 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3935 /* Write the MBC data to the registers */
3936 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
3937 WRT_REG_WORD(®->mailbox1, mb[0]);
3938 WRT_REG_WORD(®->mailbox2, mb[1]);
3939 WRT_REG_WORD(®->mailbox3, mb[2]);
3940 WRT_REG_WORD(®->mailbox4, mb[3]);
3942 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
3944 /* Poll for MBC interrupt */
3945 for (timer = 6000000; timer; timer--) {
3946 /* Check for pending interrupts. */
3947 stat = RD_REG_DWORD(®->host_status);
3948 if (stat & HSRX_RISC_INT) {
3951 if (stat == 0x1 || stat == 0x2 ||
3952 stat == 0x10 || stat == 0x11) {
3953 set_bit(MBX_INTERRUPT,
3954 &ha->mbx_cmd_flags);
3955 mb0 = RD_REG_WORD(®->mailbox0);
3956 WRT_REG_DWORD(®->hccr,
3957 HCCRX_CLR_RISC_INT);
3958 RD_REG_DWORD(®->hccr);
3965 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
3966 rval = mb0 & MBS_MASK;
3968 rval = QLA_FUNCTION_FAILED;
3970 if (rval != QLA_SUCCESS) {
3971 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3972 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3974 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3980 qla2x00_get_data_rate(scsi_qla_host_t *vha)
3984 mbx_cmd_t *mcp = &mc;
3985 struct qla_hw_data *ha = vha->hw;
3987 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3989 if (!IS_FWI2_CAPABLE(ha))
3990 return QLA_FUNCTION_FAILED;
3992 mcp->mb[0] = MBC_DATA_RATE;
3994 mcp->out_mb = MBX_1|MBX_0;
3995 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3996 mcp->tov = MBX_TOV_SECONDS;
3998 rval = qla2x00_mailbox_command(vha, mcp);
3999 if (rval != QLA_SUCCESS) {
4000 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4001 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4003 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
4004 if (mcp->mb[1] != 0x7)
4005 ha->link_data_rate = mcp->mb[1];
4012 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4016 mbx_cmd_t *mcp = &mc;
4017 struct qla_hw_data *ha = vha->hw;
4019 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4021 if (!IS_QLA81XX(ha))
4022 return QLA_FUNCTION_FAILED;
4023 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4024 mcp->out_mb = MBX_0;
4025 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4026 mcp->tov = MBX_TOV_SECONDS;
4029 rval = qla2x00_mailbox_command(vha, mcp);
4031 if (rval != QLA_SUCCESS) {
4032 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4033 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4035 /* Copy all bits to preserve original value */
4036 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4038 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4044 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4048 mbx_cmd_t *mcp = &mc;
4050 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4052 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4053 /* Copy all bits to preserve original setting */
4054 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4055 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4057 mcp->tov = MBX_TOV_SECONDS;
4059 rval = qla2x00_mailbox_command(vha, mcp);
4061 if (rval != QLA_SUCCESS) {
4062 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4063 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4065 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4072 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4077 mbx_cmd_t *mcp = &mc;
4078 struct qla_hw_data *ha = vha->hw;
4080 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4082 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4083 return QLA_FUNCTION_FAILED;
4085 mcp->mb[0] = MBC_PORT_PARAMS;
4086 mcp->mb[1] = loop_id;
4087 if (ha->flags.fcp_prio_enabled)
4091 mcp->mb[4] = priority & 0xf;
4092 mcp->mb[9] = vha->vp_idx;
4093 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4094 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4097 rval = qla2x00_mailbox_command(vha, mcp);
4105 if (rval != QLA_SUCCESS) {
4106 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4108 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4115 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4119 struct qla_hw_data *ha = vha->hw;
4121 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4124 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4125 if (rval != QLA_SUCCESS) {
4126 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4127 ha->flags.thermal_supported = 0;
4133 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4134 if (rval != QLA_SUCCESS) {
4135 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4136 ha->flags.thermal_supported = 0;
4139 *frac = (byte >> 6) * 25;
4141 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4147 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4150 struct qla_hw_data *ha = vha->hw;
4152 mbx_cmd_t *mcp = &mc;
4154 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4156 if (!IS_FWI2_CAPABLE(ha))
4157 return QLA_FUNCTION_FAILED;
4159 memset(mcp, 0, sizeof(mbx_cmd_t));
4160 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4163 mcp->out_mb = MBX_1|MBX_0;
4168 rval = qla2x00_mailbox_command(vha, mcp);
4169 if (rval != QLA_SUCCESS) {
4170 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4171 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4173 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4180 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4183 struct qla_hw_data *ha = vha->hw;
4185 mbx_cmd_t *mcp = &mc;
4187 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4189 if (!IS_QLA82XX(ha))
4190 return QLA_FUNCTION_FAILED;
4192 memset(mcp, 0, sizeof(mbx_cmd_t));
4193 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4196 mcp->out_mb = MBX_1|MBX_0;
4201 rval = qla2x00_mailbox_command(vha, mcp);
4202 if (rval != QLA_SUCCESS) {
4203 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4204 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4206 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4213 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4215 struct qla_hw_data *ha = vha->hw;
4217 mbx_cmd_t *mcp = &mc;
4218 int rval = QLA_FUNCTION_FAILED;
4220 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
4222 memset(mcp->mb, 0 , sizeof(mcp->mb));
4223 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4224 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4225 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4226 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4228 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4229 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4230 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4232 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4233 mcp->tov = MBX_TOV_SECONDS;
4234 rval = qla2x00_mailbox_command(vha, mcp);
4236 /* Always copy back return mailbox values. */
4237 if (rval != QLA_SUCCESS) {
4238 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4239 "mailbox command FAILED=0x%x, subcode=%x.\n",
4240 (mcp->mb[1] << 16) | mcp->mb[0],
4241 (mcp->mb[3] << 16) | mcp->mb[2]);
4243 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
4244 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4245 if (!ha->md_template_size) {
4246 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4247 "Null template size obtained.\n");
4248 rval = QLA_FUNCTION_FAILED;
4255 qla82xx_md_get_template(scsi_qla_host_t *vha)
4257 struct qla_hw_data *ha = vha->hw;
4259 mbx_cmd_t *mcp = &mc;
4260 int rval = QLA_FUNCTION_FAILED;
4262 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
4264 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4265 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4266 if (!ha->md_tmplt_hdr) {
4267 ql_log(ql_log_warn, vha, 0x1124,
4268 "Unable to allocate memory for Minidump template.\n");
4272 memset(mcp->mb, 0 , sizeof(mcp->mb));
4273 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4274 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4275 mcp->mb[2] = LSW(RQST_TMPLT);
4276 mcp->mb[3] = MSW(RQST_TMPLT);
4277 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4278 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4279 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4280 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4281 mcp->mb[8] = LSW(ha->md_template_size);
4282 mcp->mb[9] = MSW(ha->md_template_size);
4284 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4285 mcp->tov = MBX_TOV_SECONDS;
4286 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4287 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4288 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4289 rval = qla2x00_mailbox_command(vha, mcp);
4291 if (rval != QLA_SUCCESS) {
4292 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4293 "mailbox command FAILED=0x%x, subcode=%x.\n",
4294 ((mcp->mb[1] << 16) | mcp->mb[0]),
4295 ((mcp->mb[3] << 16) | mcp->mb[2]));
4297 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
4302 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4305 struct qla_hw_data *ha = vha->hw;
4307 mbx_cmd_t *mcp = &mc;
4309 if (!IS_QLA82XX(ha))
4310 return QLA_FUNCTION_FAILED;
4312 ql_dbg(ql_dbg_mbx, vha, 0x1127,
4313 "Entered %s.\n", __func__);
4315 memset(mcp, 0, sizeof(mbx_cmd_t));
4316 mcp->mb[0] = MBC_SET_LED_CONFIG;
4322 mcp->out_mb = MBX_7|MBX_0;
4327 rval = qla2x00_mailbox_command(vha, mcp);
4328 if (rval != QLA_SUCCESS) {
4329 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4330 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4332 ql_dbg(ql_dbg_mbx, vha, 0x1129,
4333 "Done %s.\n", __func__);