1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
33 #include <linux/firmware.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_transport_fc.h>
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
54 unsigned long _dump_buf_data_order;
56 unsigned long _dump_buf_dif_order;
57 spinlock_t _dump_buf_lock;
59 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
60 static int lpfc_post_rcv_buf(struct lpfc_hba *);
61 static int lpfc_sli4_queue_create(struct lpfc_hba *);
62 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
63 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64 static int lpfc_setup_endian_order(struct lpfc_hba *);
65 static int lpfc_sli4_read_config(struct lpfc_hba *);
66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
67 static void lpfc_free_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_sgl_list(struct lpfc_hba *);
69 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
70 static void lpfc_free_active_sgl(struct lpfc_hba *);
71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
77 static struct scsi_transport_template *lpfc_transport_template = NULL;
78 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
79 static DEFINE_IDR(lpfc_hba_index);
82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
83 * @phba: pointer to lpfc hba data structure.
85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
86 * mailbox command. It retrieves the revision information from the HBA and
87 * collects the Vital Product Data (VPD) about the HBA for preparing the
88 * configuration of the HBA.
92 * -ERESTART - requests the SLI layer to reset the HBA and try again.
93 * Any other value - indicates an error.
96 lpfc_config_port_prep(struct lpfc_hba *phba)
98 lpfc_vpd_t *vp = &phba->vpd;
102 char *lpfc_vpd_data = NULL;
104 static char licensed[56] =
105 "key unlock for use with gnu public licensed code only\0";
106 static int init_key = 1;
108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
110 phba->link_state = LPFC_HBA_ERROR;
115 phba->link_state = LPFC_INIT_MBX_CMDS;
117 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
119 uint32_t *ptext = (uint32_t *) licensed;
121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
122 *ptext = cpu_to_be32(*ptext);
126 lpfc_read_nv(phba, pmb);
127 memset((char*)mb->un.varRDnvp.rsvd3, 0,
128 sizeof (mb->un.varRDnvp.rsvd3));
129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
134 if (rc != MBX_SUCCESS) {
135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
136 "0324 Config Port initialization "
137 "error, mbxCmd x%x READ_NVPARM, "
139 mb->mbxCommand, mb->mbxStatus);
140 mempool_free(pmb, phba->mbox_mem_pool);
143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
149 phba->sli3_options = 0x0;
151 /* Setup and issue mailbox READ REV command */
152 lpfc_read_rev(phba, pmb);
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154 if (rc != MBX_SUCCESS) {
155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
156 "0439 Adapter failed to init, mbxCmd x%x "
157 "READ_REV, mbxStatus x%x\n",
158 mb->mbxCommand, mb->mbxStatus);
159 mempool_free( pmb, phba->mbox_mem_pool);
165 * The value of rr must be 1 since the driver set the cv field to 1.
166 * This setting requires the FW to set all revision fields.
168 if (mb->un.varRdRev.rr == 0) {
170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
171 "0440 Adapter failed to init, READ_REV has "
172 "missing revision information.\n");
173 mempool_free(pmb, phba->mbox_mem_pool);
177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
178 mempool_free(pmb, phba->mbox_mem_pool);
182 /* Save information as VPD data */
184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
189 vp->rev.biuRev = mb->un.varRdRev.biuRev;
190 vp->rev.smRev = mb->un.varRdRev.smRev;
191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
192 vp->rev.endecRev = mb->un.varRdRev.endecRev;
193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
200 /* If the sli feature level is less then 9, we must
201 * tear down all RPIs and VPIs on link down if NPIV
204 if (vp->rev.feaLevelHigh < 9)
205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
207 if (lpfc_is_LC_HBA(phba->pcidev->device))
208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
209 sizeof (phba->RandomData));
211 /* Get adapter VPD information */
212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 "0441 VPD not present on adapter, "
222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 mb->mbxCommand, mb->mbxStatus);
224 mb->un.varDmp.word_cnt = 0;
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
229 if (mb->un.varDmp.word_cnt == 0)
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
235 mb->un.varDmp.word_cnt);
236 offset += mb->un.varDmp.word_cnt;
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240 kfree(lpfc_vpd_data);
242 mempool_free(pmb, phba->mbox_mem_pool);
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 phba->temp_sensor_support = 1;
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
281 uint32_t prog_id_word;
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 mempool_free(pmboxq, phba->mbox_mem_pool);
291 prg = (struct prog_id *) &prog_id_word;
293 /* word 7 contain option rom version */
294 prog_id_word = pmboxq->u.mb.un.varWords[7];
296 /* Decode the Option rom version word to a readable string */
298 dist = dist_char[prg->dist];
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
307 mempool_free(pmboxq, phba->mbox_mem_pool);
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
321 lpfc_update_vport_wwn(struct lpfc_vport *vport)
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
351 * lpfc_config_port_post - Perform lpfc initialization after config port
352 * @phba: pointer to lpfc hba data structure.
354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
355 * command call. It performs all internal resource and state setups on the
356 * port: post IOCB buffers, enable appropriate host interrupt attentions,
357 * ELS ring timers, etc.
361 * Any other value - error.
364 lpfc_config_port_post(struct lpfc_hba *phba)
366 struct lpfc_vport *vport = phba->pport;
367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
370 struct lpfc_dmabuf *mp;
371 struct lpfc_sli *psli = &phba->sli;
372 uint32_t status, timeout;
376 spin_lock_irq(&phba->hbalock);
378 * If the Config port completed correctly the HBA is not
379 * over heated any more.
381 if (phba->over_temp_state == HBA_OVER_TEMP)
382 phba->over_temp_state = HBA_NORMAL_TEMP;
383 spin_unlock_irq(&phba->hbalock);
385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387 phba->link_state = LPFC_HBA_ERROR;
392 /* Get login parameters for NID. */
393 rc = lpfc_read_sparam(phba, pmb, 0);
395 mempool_free(pmb, phba->mbox_mem_pool);
400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
402 "0448 Adapter failed init, mbxCmd x%x "
403 "READ_SPARM mbxStatus x%x\n",
404 mb->mbxCommand, mb->mbxStatus);
405 phba->link_state = LPFC_HBA_ERROR;
406 mp = (struct lpfc_dmabuf *) pmb->context1;
407 mempool_free(pmb, phba->mbox_mem_pool);
408 lpfc_mbuf_free(phba, mp->virt, mp->phys);
413 mp = (struct lpfc_dmabuf *) pmb->context1;
415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
418 pmb->context1 = NULL;
419 lpfc_update_vport_wwn(vport);
421 /* Update the fc_host data structures with new wwn. */
422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
424 fc_host_max_npiv_vports(shost) = phba->max_vpi;
426 /* If no serial number in VPD data, use low 6 bytes of WWNN */
427 /* This should be consolidated into parse_vpd ? - mr */
428 if (phba->SerialNumber[0] == 0) {
431 outptr = &vport->fc_nodename.u.s.IEEE[0];
432 for (i = 0; i < 12; i++) {
434 j = ((status & 0xf0) >> 4);
436 phba->SerialNumber[i] =
437 (char)((uint8_t) 0x30 + (uint8_t) j);
439 phba->SerialNumber[i] =
440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
444 phba->SerialNumber[i] =
445 (char)((uint8_t) 0x30 + (uint8_t) j);
447 phba->SerialNumber[i] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
452 lpfc_read_config(phba, pmb);
454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
456 "0453 Adapter failed to init, mbxCmd x%x "
457 "READ_CONFIG, mbxStatus x%x\n",
458 mb->mbxCommand, mb->mbxStatus);
459 phba->link_state = LPFC_HBA_ERROR;
460 mempool_free( pmb, phba->mbox_mem_pool);
464 /* Check if the port is disabled */
465 lpfc_sli_read_link_ste(phba);
467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
469 phba->cfg_hba_queue_depth =
470 (mb->un.varRdConfig.max_xri + 1) -
471 lpfc_sli4_get_els_iocb_cnt(phba);
473 phba->lmt = mb->un.varRdConfig.lmt;
475 /* Get the default values for Model Name and Description */
476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
478 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
479 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
480 && !(phba->lmt & LMT_1Gb))
481 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
482 && !(phba->lmt & LMT_2Gb))
483 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
484 && !(phba->lmt & LMT_4Gb))
485 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
486 && !(phba->lmt & LMT_8Gb))
487 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
488 && !(phba->lmt & LMT_10Gb))
489 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
490 && !(phba->lmt & LMT_16Gb))) {
491 /* Reset link speed to auto */
492 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
493 "1302 Invalid speed for this board: "
494 "Reset link speed to auto: x%x\n",
495 phba->cfg_link_speed);
496 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
499 phba->link_state = LPFC_LINK_DOWN;
501 /* Only process IOCBs on ELS ring till hba_state is READY */
502 if (psli->ring[psli->extra_ring].cmdringaddr)
503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
504 if (psli->ring[psli->fcp_ring].cmdringaddr)
505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
506 if (psli->ring[psli->next_ring].cmdringaddr)
507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
509 /* Post receive buffers for desired rings */
510 if (phba->sli_rev != 3)
511 lpfc_post_rcv_buf(phba);
514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
516 if (phba->intr_type == MSIX) {
517 rc = lpfc_config_msi(phba, pmb);
519 mempool_free(pmb, phba->mbox_mem_pool);
522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
523 if (rc != MBX_SUCCESS) {
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "0352 Config MSI mailbox command "
526 "failed, mbxCmd x%x, mbxStatus x%x\n",
527 pmb->u.mb.mbxCommand,
528 pmb->u.mb.mbxStatus);
529 mempool_free(pmb, phba->mbox_mem_pool);
534 spin_lock_irq(&phba->hbalock);
535 /* Initialize ERATT handling flag */
536 phba->hba_flag &= ~HBA_ERATT_HANDLED;
538 /* Enable appropriate host interrupts */
539 if (lpfc_readl(phba->HCregaddr, &status)) {
540 spin_unlock_irq(&phba->hbalock);
543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
544 if (psli->num_rings > 0)
545 status |= HC_R0INT_ENA;
546 if (psli->num_rings > 1)
547 status |= HC_R1INT_ENA;
548 if (psli->num_rings > 2)
549 status |= HC_R2INT_ENA;
550 if (psli->num_rings > 3)
551 status |= HC_R3INT_ENA;
553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
554 (phba->cfg_poll & DISABLE_FCP_RING_INT))
555 status &= ~(HC_R0INT_ENA);
557 writel(status, phba->HCregaddr);
558 readl(phba->HCregaddr); /* flush */
559 spin_unlock_irq(&phba->hbalock);
561 /* Set up ring-0 (ELS) timer */
562 timeout = phba->fc_ratov * 2;
563 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
564 /* Set up heart beat (HB) timer */
565 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
566 phba->hb_outstanding = 0;
567 phba->last_completion_time = jiffies;
568 /* Set up error attention (ERATT) polling timer */
569 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
571 if (phba->hba_flag & LINK_DISABLED) {
572 lpfc_printf_log(phba,
574 "2598 Adapter Link is disabled.\n");
575 lpfc_down_link(phba, pmb);
576 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
578 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
579 lpfc_printf_log(phba,
581 "2599 Adapter failed to issue DOWN_LINK"
582 " mbox command rc 0x%x\n", rc);
584 mempool_free(pmb, phba->mbox_mem_pool);
587 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
588 lpfc_init_link(phba, pmb, phba->cfg_topology,
589 phba->cfg_link_speed);
590 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
591 lpfc_set_loopback_flag(phba);
592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
593 if (rc != MBX_SUCCESS) {
594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
595 "0454 Adapter failed to init, mbxCmd x%x "
596 "INIT_LINK, mbxStatus x%x\n",
597 mb->mbxCommand, mb->mbxStatus);
599 /* Clear all interrupt enable conditions */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 /* Clear all pending interrupts */
603 writel(0xffffffff, phba->HAregaddr);
604 readl(phba->HAregaddr); /* flush */
605 phba->link_state = LPFC_HBA_ERROR;
607 mempool_free(pmb, phba->mbox_mem_pool);
611 /* MBOX buffer will be freed in mbox compl */
612 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
614 phba->link_state = LPFC_HBA_ERROR;
618 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
619 pmb->mbox_cmpl = lpfc_config_async_cmpl;
620 pmb->vport = phba->pport;
621 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
623 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
624 lpfc_printf_log(phba,
627 "0456 Adapter failed to issue "
628 "ASYNCEVT_ENABLE mbox status x%x\n",
630 mempool_free(pmb, phba->mbox_mem_pool);
633 /* Get Option rom version */
634 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
636 phba->link_state = LPFC_HBA_ERROR;
640 lpfc_dump_wakeup_param(phba, pmb);
641 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
642 pmb->vport = phba->pport;
643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
645 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
647 "to get Option ROM version status x%x\n", rc);
648 mempool_free(pmb, phba->mbox_mem_pool);
655 * lpfc_hba_init_link - Initialize the FC link
656 * @phba: pointer to lpfc hba data structure.
657 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
659 * This routine will issue the INIT_LINK mailbox command call.
660 * It is available to other drivers through the lpfc_hba data
661 * structure for use as a delayed link up mechanism with the
662 * module parameter lpfc_suppress_link_up.
666 * Any other value - error
669 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
671 struct lpfc_vport *vport = phba->pport;
676 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
678 phba->link_state = LPFC_HBA_ERROR;
684 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
685 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
686 lpfc_set_loopback_flag(phba);
687 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
688 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
690 "0498 Adapter failed to init, mbxCmd x%x "
691 "INIT_LINK, mbxStatus x%x\n",
692 mb->mbxCommand, mb->mbxStatus);
693 if (phba->sli_rev <= LPFC_SLI_REV3) {
694 /* Clear all interrupt enable conditions */
695 writel(0, phba->HCregaddr);
696 readl(phba->HCregaddr); /* flush */
697 /* Clear all pending interrupts */
698 writel(0xffffffff, phba->HAregaddr);
699 readl(phba->HAregaddr); /* flush */
701 phba->link_state = LPFC_HBA_ERROR;
702 if (rc != MBX_BUSY || flag == MBX_POLL)
703 mempool_free(pmb, phba->mbox_mem_pool);
706 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
707 if (flag == MBX_POLL)
708 mempool_free(pmb, phba->mbox_mem_pool);
714 * lpfc_hba_down_link - this routine downs the FC link
715 * @phba: pointer to lpfc hba data structure.
716 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
718 * This routine will issue the DOWN_LINK mailbox command call.
719 * It is available to other drivers through the lpfc_hba data
720 * structure for use to stop the link.
724 * Any other value - error
727 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
732 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
734 phba->link_state = LPFC_HBA_ERROR;
738 lpfc_printf_log(phba,
740 "0491 Adapter Link is disabled.\n");
741 lpfc_down_link(phba, pmb);
742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
745 lpfc_printf_log(phba,
747 "2522 Adapter failed to issue DOWN_LINK"
748 " mbox command rc 0x%x\n", rc);
750 mempool_free(pmb, phba->mbox_mem_pool);
753 if (flag == MBX_POLL)
754 mempool_free(pmb, phba->mbox_mem_pool);
760 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
761 * @phba: pointer to lpfc HBA data structure.
763 * This routine will do LPFC uninitialization before the HBA is reset when
764 * bringing down the SLI Layer.
768 * Any other value - error.
771 lpfc_hba_down_prep(struct lpfc_hba *phba)
773 struct lpfc_vport **vports;
776 if (phba->sli_rev <= LPFC_SLI_REV3) {
777 /* Disable interrupts */
778 writel(0, phba->HCregaddr);
779 readl(phba->HCregaddr); /* flush */
782 if (phba->pport->load_flag & FC_UNLOADING)
783 lpfc_cleanup_discovery_resources(phba->pport);
785 vports = lpfc_create_vport_work_array(phba);
787 for (i = 0; i <= phba->max_vports &&
788 vports[i] != NULL; i++)
789 lpfc_cleanup_discovery_resources(vports[i]);
790 lpfc_destroy_vport_work_array(phba, vports);
796 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
797 * @phba: pointer to lpfc HBA data structure.
799 * This routine will do uninitialization after the HBA is reset when bring
800 * down the SLI Layer.
804 * Any other value - error.
807 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
809 struct lpfc_sli *psli = &phba->sli;
810 struct lpfc_sli_ring *pring;
811 struct lpfc_dmabuf *mp, *next_mp;
812 LIST_HEAD(completions);
815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
816 lpfc_sli_hbqbuf_free_all(phba);
818 /* Cleanup preposted buffers on the ELS ring */
819 pring = &psli->ring[LPFC_ELS_RING];
820 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
822 pring->postbufq_cnt--;
823 lpfc_mbuf_free(phba, mp->virt, mp->phys);
828 spin_lock_irq(&phba->hbalock);
829 for (i = 0; i < psli->num_rings; i++) {
830 pring = &psli->ring[i];
832 /* At this point in time the HBA is either reset or DOA. Either
833 * way, nothing should be on txcmplq as it will NEVER complete.
835 list_splice_init(&pring->txcmplq, &completions);
836 pring->txcmplq_cnt = 0;
837 spin_unlock_irq(&phba->hbalock);
839 /* Cancel all the IOCBs from the completions list */
840 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
843 lpfc_sli_abort_iocb_ring(phba, pring);
844 spin_lock_irq(&phba->hbalock);
846 spin_unlock_irq(&phba->hbalock);
852 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
853 * @phba: pointer to lpfc HBA data structure.
855 * This routine will do uninitialization after the HBA is reset when bring
856 * down the SLI Layer.
860 * Any other value - error.
863 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
865 struct lpfc_scsi_buf *psb, *psb_next;
868 unsigned long iflag = 0;
869 struct lpfc_sglq *sglq_entry = NULL;
871 ret = lpfc_hba_down_post_s3(phba);
874 /* At this point in time the HBA is either reset or DOA. Either
875 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
876 * on the lpfc_sgl_list so that it can either be freed if the
877 * driver is unloading or reposted if the driver is restarting
880 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
882 /* abts_sgl_list_lock required because worker thread uses this
885 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
886 list_for_each_entry(sglq_entry,
887 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
888 sglq_entry->state = SGL_FREED;
890 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
891 &phba->sli4_hba.lpfc_sgl_list);
892 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
893 /* abts_scsi_buf_list_lock required because worker thread uses this
896 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
897 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
899 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
900 spin_unlock_irq(&phba->hbalock);
902 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
904 psb->status = IOSTAT_SUCCESS;
906 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
907 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
908 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
913 * lpfc_hba_down_post - Wrapper func for hba down post routine
914 * @phba: pointer to lpfc HBA data structure.
916 * This routine wraps the actual SLI3 or SLI4 routine for performing
917 * uninitialization after the HBA is reset when bring down the SLI Layer.
921 * Any other value - error.
924 lpfc_hba_down_post(struct lpfc_hba *phba)
926 return (*phba->lpfc_hba_down_post)(phba);
930 * lpfc_hb_timeout - The HBA-timer timeout handler
931 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
933 * This is the HBA-timer timeout handler registered to the lpfc driver. When
934 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
935 * work-port-events bitmap and the worker thread is notified. This timeout
936 * event will be used by the worker thread to invoke the actual timeout
937 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
938 * be performed in the timeout handler and the HBA timeout event bit shall
939 * be cleared by the worker thread after it has taken the event bitmap out.
942 lpfc_hb_timeout(unsigned long ptr)
944 struct lpfc_hba *phba;
948 phba = (struct lpfc_hba *)ptr;
950 /* Check for heart beat timeout conditions */
951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
952 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
954 phba->pport->work_port_events |= WORKER_HB_TMO;
955 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
957 /* Tell the worker thread there is work to do */
959 lpfc_worker_wake_up(phba);
964 * lpfc_rrq_timeout - The RRQ-timer timeout handler
965 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
967 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
968 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
969 * work-port-events bitmap and the worker thread is notified. This timeout
970 * event will be used by the worker thread to invoke the actual timeout
971 * handler routine, lpfc_rrq_handler. Any periodical operations will
972 * be performed in the timeout handler and the RRQ timeout event bit shall
973 * be cleared by the worker thread after it has taken the event bitmap out.
976 lpfc_rrq_timeout(unsigned long ptr)
978 struct lpfc_hba *phba;
981 phba = (struct lpfc_hba *)ptr;
982 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
983 phba->hba_flag |= HBA_RRQ_ACTIVE;
984 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
985 lpfc_worker_wake_up(phba);
989 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
990 * @phba: pointer to lpfc hba data structure.
991 * @pmboxq: pointer to the driver internal queue element for mailbox command.
993 * This is the callback function to the lpfc heart-beat mailbox command.
994 * If configured, the lpfc driver issues the heart-beat mailbox command to
995 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
996 * heart-beat mailbox command is issued, the driver shall set up heart-beat
997 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
998 * heart-beat outstanding state. Once the mailbox command comes back and
999 * no error conditions detected, the heart-beat mailbox command timer is
1000 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1001 * state is cleared for the next heart-beat. If the timer expired with the
1002 * heart-beat outstanding state set, the driver will put the HBA offline.
1005 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1007 unsigned long drvr_flag;
1009 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1010 phba->hb_outstanding = 0;
1011 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1013 /* Check and reset heart-beat timer is necessary */
1014 mempool_free(pmboxq, phba->mbox_mem_pool);
1015 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1016 !(phba->link_state == LPFC_HBA_ERROR) &&
1017 !(phba->pport->load_flag & FC_UNLOADING))
1018 mod_timer(&phba->hb_tmofunc,
1019 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1024 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1025 * @phba: pointer to lpfc hba data structure.
1027 * This is the actual HBA-timer timeout handler to be invoked by the worker
1028 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1029 * handler performs any periodic operations needed for the device. If such
1030 * periodic event has already been attended to either in the interrupt handler
1031 * or by processing slow-ring or fast-ring events within the HBA-timer
1032 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1033 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1034 * is configured and there is no heart-beat mailbox command outstanding, a
1035 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1036 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1040 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1042 struct lpfc_vport **vports;
1043 LPFC_MBOXQ_t *pmboxq;
1044 struct lpfc_dmabuf *buf_ptr;
1046 struct lpfc_sli *psli = &phba->sli;
1047 LIST_HEAD(completions);
1049 vports = lpfc_create_vport_work_array(phba);
1051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1052 lpfc_rcv_seq_check_edtov(vports[i]);
1053 lpfc_destroy_vport_work_array(phba, vports);
1055 if ((phba->link_state == LPFC_HBA_ERROR) ||
1056 (phba->pport->load_flag & FC_UNLOADING) ||
1057 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1060 spin_lock_irq(&phba->pport->work_port_lock);
1062 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1064 spin_unlock_irq(&phba->pport->work_port_lock);
1065 if (!phba->hb_outstanding)
1066 mod_timer(&phba->hb_tmofunc,
1067 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1069 mod_timer(&phba->hb_tmofunc,
1070 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1073 spin_unlock_irq(&phba->pport->work_port_lock);
1075 if (phba->elsbuf_cnt &&
1076 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1077 spin_lock_irq(&phba->hbalock);
1078 list_splice_init(&phba->elsbuf, &completions);
1079 phba->elsbuf_cnt = 0;
1080 phba->elsbuf_prev_cnt = 0;
1081 spin_unlock_irq(&phba->hbalock);
1083 while (!list_empty(&completions)) {
1084 list_remove_head(&completions, buf_ptr,
1085 struct lpfc_dmabuf, list);
1086 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1090 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1092 /* If there is no heart beat outstanding, issue a heartbeat command */
1093 if (phba->cfg_enable_hba_heartbeat) {
1094 if (!phba->hb_outstanding) {
1095 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1096 (list_empty(&psli->mboxq))) {
1097 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1100 mod_timer(&phba->hb_tmofunc,
1102 HZ * LPFC_HB_MBOX_INTERVAL);
1106 lpfc_heart_beat(phba, pmboxq);
1107 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1108 pmboxq->vport = phba->pport;
1109 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1112 if (retval != MBX_BUSY &&
1113 retval != MBX_SUCCESS) {
1114 mempool_free(pmboxq,
1115 phba->mbox_mem_pool);
1116 mod_timer(&phba->hb_tmofunc,
1118 HZ * LPFC_HB_MBOX_INTERVAL);
1121 phba->skipped_hb = 0;
1122 phba->hb_outstanding = 1;
1123 } else if (time_before_eq(phba->last_completion_time,
1124 phba->skipped_hb)) {
1125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1126 "2857 Last completion time not "
1127 " updated in %d ms\n",
1128 jiffies_to_msecs(jiffies
1129 - phba->last_completion_time));
1131 phba->skipped_hb = jiffies;
1133 mod_timer(&phba->hb_tmofunc,
1134 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1138 * If heart beat timeout called with hb_outstanding set
1139 * we need to give the hb mailbox cmd a chance to
1142 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1143 "0459 Adapter heartbeat still out"
1144 "standing:last compl time was %d ms.\n",
1145 jiffies_to_msecs(jiffies
1146 - phba->last_completion_time));
1147 mod_timer(&phba->hb_tmofunc,
1148 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1154 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1155 * @phba: pointer to lpfc hba data structure.
1157 * This routine is called to bring the HBA offline when HBA hardware error
1158 * other than Port Error 6 has been detected.
1161 lpfc_offline_eratt(struct lpfc_hba *phba)
1163 struct lpfc_sli *psli = &phba->sli;
1165 spin_lock_irq(&phba->hbalock);
1166 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1167 spin_unlock_irq(&phba->hbalock);
1168 lpfc_offline_prep(phba);
1171 lpfc_reset_barrier(phba);
1172 spin_lock_irq(&phba->hbalock);
1173 lpfc_sli_brdreset(phba);
1174 spin_unlock_irq(&phba->hbalock);
1175 lpfc_hba_down_post(phba);
1176 lpfc_sli_brdready(phba, HS_MBRDY);
1177 lpfc_unblock_mgmt_io(phba);
1178 phba->link_state = LPFC_HBA_ERROR;
1183 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1184 * @phba: pointer to lpfc hba data structure.
1186 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1187 * other than Port Error 6 has been detected.
1190 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1192 lpfc_offline_prep(phba);
1194 lpfc_sli4_brdreset(phba);
1195 lpfc_hba_down_post(phba);
1196 lpfc_sli4_post_status_check(phba);
1197 lpfc_unblock_mgmt_io(phba);
1198 phba->link_state = LPFC_HBA_ERROR;
1202 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1203 * @phba: pointer to lpfc hba data structure.
1205 * This routine is invoked to handle the deferred HBA hardware error
1206 * conditions. This type of error is indicated by HBA by setting ER1
1207 * and another ER bit in the host status register. The driver will
1208 * wait until the ER1 bit clears before handling the error condition.
1211 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1213 uint32_t old_host_status = phba->work_hs;
1214 struct lpfc_sli_ring *pring;
1215 struct lpfc_sli *psli = &phba->sli;
1217 /* If the pci channel is offline, ignore possible errors,
1218 * since we cannot communicate with the pci card anyway.
1220 if (pci_channel_offline(phba->pcidev)) {
1221 spin_lock_irq(&phba->hbalock);
1222 phba->hba_flag &= ~DEFER_ERATT;
1223 spin_unlock_irq(&phba->hbalock);
1227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1228 "0479 Deferred Adapter Hardware Error "
1229 "Data: x%x x%x x%x\n",
1231 phba->work_status[0], phba->work_status[1]);
1233 spin_lock_irq(&phba->hbalock);
1234 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1235 spin_unlock_irq(&phba->hbalock);
1239 * Firmware stops when it triggred erratt. That could cause the I/Os
1240 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1241 * SCSI layer retry it after re-establishing link.
1243 pring = &psli->ring[psli->fcp_ring];
1244 lpfc_sli_abort_iocb_ring(phba, pring);
1247 * There was a firmware error. Take the hba offline and then
1248 * attempt to restart it.
1250 lpfc_offline_prep(phba);
1253 /* Wait for the ER1 bit to clear.*/
1254 while (phba->work_hs & HS_FFER1) {
1256 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1257 phba->work_hs = UNPLUG_ERR ;
1260 /* If driver is unloading let the worker thread continue */
1261 if (phba->pport->load_flag & FC_UNLOADING) {
1268 * This is to ptrotect against a race condition in which
1269 * first write to the host attention register clear the
1270 * host status register.
1272 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1273 phba->work_hs = old_host_status & ~HS_FFER1;
1275 spin_lock_irq(&phba->hbalock);
1276 phba->hba_flag &= ~DEFER_ERATT;
1277 spin_unlock_irq(&phba->hbalock);
1278 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1279 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1283 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1285 struct lpfc_board_event_header board_event;
1286 struct Scsi_Host *shost;
1288 board_event.event_type = FC_REG_BOARD_EVENT;
1289 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1290 shost = lpfc_shost_from_vport(phba->pport);
1291 fc_host_post_vendor_event(shost, fc_get_event_number(),
1292 sizeof(board_event),
1293 (char *) &board_event,
1298 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1299 * @phba: pointer to lpfc hba data structure.
1301 * This routine is invoked to handle the following HBA hardware error
1303 * 1 - HBA error attention interrupt
1304 * 2 - DMA ring index out of range
1305 * 3 - Mailbox command came back as unknown
1308 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1310 struct lpfc_vport *vport = phba->pport;
1311 struct lpfc_sli *psli = &phba->sli;
1312 struct lpfc_sli_ring *pring;
1313 uint32_t event_data;
1314 unsigned long temperature;
1315 struct temp_event temp_event_data;
1316 struct Scsi_Host *shost;
1318 /* If the pci channel is offline, ignore possible errors,
1319 * since we cannot communicate with the pci card anyway.
1321 if (pci_channel_offline(phba->pcidev)) {
1322 spin_lock_irq(&phba->hbalock);
1323 phba->hba_flag &= ~DEFER_ERATT;
1324 spin_unlock_irq(&phba->hbalock);
1328 /* If resets are disabled then leave the HBA alone and return */
1329 if (!phba->cfg_enable_hba_reset)
1332 /* Send an internal error event to mgmt application */
1333 lpfc_board_errevt_to_mgmt(phba);
1335 if (phba->hba_flag & DEFER_ERATT)
1336 lpfc_handle_deferred_eratt(phba);
1338 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1339 if (phba->work_hs & HS_FFER6)
1340 /* Re-establishing Link */
1341 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1342 "1301 Re-establishing Link "
1343 "Data: x%x x%x x%x\n",
1344 phba->work_hs, phba->work_status[0],
1345 phba->work_status[1]);
1346 if (phba->work_hs & HS_FFER8)
1347 /* Device Zeroization */
1348 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1349 "2861 Host Authentication device "
1350 "zeroization Data:x%x x%x x%x\n",
1351 phba->work_hs, phba->work_status[0],
1352 phba->work_status[1]);
1354 spin_lock_irq(&phba->hbalock);
1355 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1356 spin_unlock_irq(&phba->hbalock);
1359 * Firmware stops when it triggled erratt with HS_FFER6.
1360 * That could cause the I/Os dropped by the firmware.
1361 * Error iocb (I/O) on txcmplq and let the SCSI layer
1362 * retry it after re-establishing link.
1364 pring = &psli->ring[psli->fcp_ring];
1365 lpfc_sli_abort_iocb_ring(phba, pring);
1368 * There was a firmware error. Take the hba offline and then
1369 * attempt to restart it.
1371 lpfc_offline_prep(phba);
1373 lpfc_sli_brdrestart(phba);
1374 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1375 lpfc_unblock_mgmt_io(phba);
1378 lpfc_unblock_mgmt_io(phba);
1379 } else if (phba->work_hs & HS_CRIT_TEMP) {
1380 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1381 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1382 temp_event_data.event_code = LPFC_CRIT_TEMP;
1383 temp_event_data.data = (uint32_t)temperature;
1385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1386 "0406 Adapter maximum temperature exceeded "
1387 "(%ld), taking this port offline "
1388 "Data: x%x x%x x%x\n",
1389 temperature, phba->work_hs,
1390 phba->work_status[0], phba->work_status[1]);
1392 shost = lpfc_shost_from_vport(phba->pport);
1393 fc_host_post_vendor_event(shost, fc_get_event_number(),
1394 sizeof(temp_event_data),
1395 (char *) &temp_event_data,
1396 SCSI_NL_VID_TYPE_PCI
1397 | PCI_VENDOR_ID_EMULEX);
1399 spin_lock_irq(&phba->hbalock);
1400 phba->over_temp_state = HBA_OVER_TEMP;
1401 spin_unlock_irq(&phba->hbalock);
1402 lpfc_offline_eratt(phba);
1405 /* The if clause above forces this code path when the status
1406 * failure is a value other than FFER6. Do not call the offline
1407 * twice. This is the adapter hardware error path.
1409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1410 "0457 Adapter Hardware Error "
1411 "Data: x%x x%x x%x\n",
1413 phba->work_status[0], phba->work_status[1]);
1415 event_data = FC_REG_DUMP_EVENT;
1416 shost = lpfc_shost_from_vport(vport);
1417 fc_host_post_vendor_event(shost, fc_get_event_number(),
1418 sizeof(event_data), (char *) &event_data,
1419 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1421 lpfc_offline_eratt(phba);
1427 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1428 * @phba: pointer to lpfc hba data structure.
1430 * This routine is invoked to handle the SLI4 HBA hardware error attention
1434 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1436 struct lpfc_vport *vport = phba->pport;
1437 uint32_t event_data;
1438 struct Scsi_Host *shost;
1440 struct lpfc_register portstat_reg;
1442 /* If the pci channel is offline, ignore possible errors, since
1443 * we cannot communicate with the pci card anyway.
1445 if (pci_channel_offline(phba->pcidev))
1447 /* If resets are disabled then leave the HBA alone and return */
1448 if (!phba->cfg_enable_hba_reset)
1451 /* Send an internal error event to mgmt application */
1452 lpfc_board_errevt_to_mgmt(phba);
1454 /* For now, the actual action for SLI4 device handling is not
1455 * specified yet, just treated it as adaptor hardware failure
1457 event_data = FC_REG_DUMP_EVENT;
1458 shost = lpfc_shost_from_vport(vport);
1459 fc_host_post_vendor_event(shost, fc_get_event_number(),
1460 sizeof(event_data), (char *) &event_data,
1461 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1463 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1465 case LPFC_SLI_INTF_IF_TYPE_0:
1466 lpfc_sli4_offline_eratt(phba);
1468 case LPFC_SLI_INTF_IF_TYPE_2:
1469 portstat_reg.word0 =
1470 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1472 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1473 /* TODO: Register for Overtemp async events. */
1474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1475 "2889 Port Overtemperature event, "
1477 spin_lock_irq(&phba->hbalock);
1478 phba->over_temp_state = HBA_OVER_TEMP;
1479 spin_unlock_irq(&phba->hbalock);
1480 lpfc_sli4_offline_eratt(phba);
1483 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
1485 * TODO: Attempt port recovery via a port reset.
1486 * When fully implemented, the driver should
1487 * attempt to recover the port here and return.
1488 * For now, log an error and take the port offline.
1490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1491 "2887 Port Error: Attempting "
1494 lpfc_sli4_offline_eratt(phba);
1496 case LPFC_SLI_INTF_IF_TYPE_1:
1503 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1504 * @phba: pointer to lpfc HBA data structure.
1506 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1507 * routine from the API jump table function pointer from the lpfc_hba struct.
1511 * Any other value - error.
1514 lpfc_handle_eratt(struct lpfc_hba *phba)
1516 (*phba->lpfc_handle_eratt)(phba);
1520 * lpfc_handle_latt - The HBA link event handler
1521 * @phba: pointer to lpfc hba data structure.
1523 * This routine is invoked from the worker thread to handle a HBA host
1524 * attention link event.
1527 lpfc_handle_latt(struct lpfc_hba *phba)
1529 struct lpfc_vport *vport = phba->pport;
1530 struct lpfc_sli *psli = &phba->sli;
1532 volatile uint32_t control;
1533 struct lpfc_dmabuf *mp;
1536 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1539 goto lpfc_handle_latt_err_exit;
1542 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1545 goto lpfc_handle_latt_free_pmb;
1548 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1551 goto lpfc_handle_latt_free_mp;
1554 /* Cleanup any outstanding ELS commands */
1555 lpfc_els_flush_all_cmd(phba);
1557 psli->slistat.link_event++;
1558 lpfc_read_topology(phba, pmb, mp);
1559 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1561 /* Block ELS IOCBs until we have processed this mbox command */
1562 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1563 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1564 if (rc == MBX_NOT_FINISHED) {
1566 goto lpfc_handle_latt_free_mbuf;
1569 /* Clear Link Attention in HA REG */
1570 spin_lock_irq(&phba->hbalock);
1571 writel(HA_LATT, phba->HAregaddr);
1572 readl(phba->HAregaddr); /* flush */
1573 spin_unlock_irq(&phba->hbalock);
1577 lpfc_handle_latt_free_mbuf:
1578 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1579 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1580 lpfc_handle_latt_free_mp:
1582 lpfc_handle_latt_free_pmb:
1583 mempool_free(pmb, phba->mbox_mem_pool);
1584 lpfc_handle_latt_err_exit:
1585 /* Enable Link attention interrupts */
1586 spin_lock_irq(&phba->hbalock);
1587 psli->sli_flag |= LPFC_PROCESS_LA;
1588 control = readl(phba->HCregaddr);
1589 control |= HC_LAINT_ENA;
1590 writel(control, phba->HCregaddr);
1591 readl(phba->HCregaddr); /* flush */
1593 /* Clear Link Attention in HA REG */
1594 writel(HA_LATT, phba->HAregaddr);
1595 readl(phba->HAregaddr); /* flush */
1596 spin_unlock_irq(&phba->hbalock);
1597 lpfc_linkdown(phba);
1598 phba->link_state = LPFC_HBA_ERROR;
1600 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1601 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1607 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1608 * @phba: pointer to lpfc hba data structure.
1609 * @vpd: pointer to the vital product data.
1610 * @len: length of the vital product data in bytes.
1612 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1613 * an array of characters. In this routine, the ModelName, ProgramType, and
1614 * ModelDesc, etc. fields of the phba data structure will be populated.
1617 * 0 - pointer to the VPD passed in is NULL
1621 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1623 uint8_t lenlo, lenhi;
1633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1634 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1635 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1637 while (!finished && (index < (len - 4))) {
1638 switch (vpd[index]) {
1646 i = ((((unsigned short)lenhi) << 8) + lenlo);
1655 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1656 if (Length > len - index)
1657 Length = len - index;
1658 while (Length > 0) {
1659 /* Look for Serial Number */
1660 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1667 phba->SerialNumber[j++] = vpd[index++];
1671 phba->SerialNumber[j] = 0;
1674 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1675 phba->vpd_flag |= VPD_MODEL_DESC;
1682 phba->ModelDesc[j++] = vpd[index++];
1686 phba->ModelDesc[j] = 0;
1689 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1690 phba->vpd_flag |= VPD_MODEL_NAME;
1697 phba->ModelName[j++] = vpd[index++];
1701 phba->ModelName[j] = 0;
1704 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1705 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1712 phba->ProgramType[j++] = vpd[index++];
1716 phba->ProgramType[j] = 0;
1719 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1720 phba->vpd_flag |= VPD_PORT;
1727 phba->Port[j++] = vpd[index++];
1757 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1758 * @phba: pointer to lpfc hba data structure.
1759 * @mdp: pointer to the data structure to hold the derived model name.
1760 * @descp: pointer to the data structure to hold the derived description.
1762 * This routine retrieves HBA's description based on its registered PCI device
1763 * ID. The @descp passed into this function points to an array of 256 chars. It
1764 * shall be returned with the model name, maximum speed, and the host bus type.
1765 * The @mdp passed into this function points to an array of 80 chars. When the
1766 * function returns, the @mdp will be filled with the model name.
1769 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1772 uint16_t dev_id = phba->pcidev->device;
1775 int oneConnect = 0; /* default is not a oneConnect */
1780 } m = {"<Unknown>", "", ""};
1782 if (mdp && mdp[0] != '\0'
1783 && descp && descp[0] != '\0')
1786 if (phba->lmt & LMT_16Gb)
1788 else if (phba->lmt & LMT_10Gb)
1790 else if (phba->lmt & LMT_8Gb)
1792 else if (phba->lmt & LMT_4Gb)
1794 else if (phba->lmt & LMT_2Gb)
1802 case PCI_DEVICE_ID_FIREFLY:
1803 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1805 case PCI_DEVICE_ID_SUPERFLY:
1806 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1807 m = (typeof(m)){"LP7000", "PCI",
1808 "Fibre Channel Adapter"};
1810 m = (typeof(m)){"LP7000E", "PCI",
1811 "Fibre Channel Adapter"};
1813 case PCI_DEVICE_ID_DRAGONFLY:
1814 m = (typeof(m)){"LP8000", "PCI",
1815 "Fibre Channel Adapter"};
1817 case PCI_DEVICE_ID_CENTAUR:
1818 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1819 m = (typeof(m)){"LP9002", "PCI",
1820 "Fibre Channel Adapter"};
1822 m = (typeof(m)){"LP9000", "PCI",
1823 "Fibre Channel Adapter"};
1825 case PCI_DEVICE_ID_RFLY:
1826 m = (typeof(m)){"LP952", "PCI",
1827 "Fibre Channel Adapter"};
1829 case PCI_DEVICE_ID_PEGASUS:
1830 m = (typeof(m)){"LP9802", "PCI-X",
1831 "Fibre Channel Adapter"};
1833 case PCI_DEVICE_ID_THOR:
1834 m = (typeof(m)){"LP10000", "PCI-X",
1835 "Fibre Channel Adapter"};
1837 case PCI_DEVICE_ID_VIPER:
1838 m = (typeof(m)){"LPX1000", "PCI-X",
1839 "Fibre Channel Adapter"};
1841 case PCI_DEVICE_ID_PFLY:
1842 m = (typeof(m)){"LP982", "PCI-X",
1843 "Fibre Channel Adapter"};
1845 case PCI_DEVICE_ID_TFLY:
1846 m = (typeof(m)){"LP1050", "PCI-X",
1847 "Fibre Channel Adapter"};
1849 case PCI_DEVICE_ID_HELIOS:
1850 m = (typeof(m)){"LP11000", "PCI-X2",
1851 "Fibre Channel Adapter"};
1853 case PCI_DEVICE_ID_HELIOS_SCSP:
1854 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1855 "Fibre Channel Adapter"};
1857 case PCI_DEVICE_ID_HELIOS_DCSP:
1858 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1859 "Fibre Channel Adapter"};
1861 case PCI_DEVICE_ID_NEPTUNE:
1862 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1864 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1865 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1867 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1868 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1870 case PCI_DEVICE_ID_BMID:
1871 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1873 case PCI_DEVICE_ID_BSMB:
1874 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1876 case PCI_DEVICE_ID_ZEPHYR:
1877 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1879 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1880 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1882 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1883 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1886 case PCI_DEVICE_ID_ZMID:
1887 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1889 case PCI_DEVICE_ID_ZSMB:
1890 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1892 case PCI_DEVICE_ID_LP101:
1893 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1895 case PCI_DEVICE_ID_LP10000S:
1896 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1898 case PCI_DEVICE_ID_LP11000S:
1899 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1901 case PCI_DEVICE_ID_LPE11000S:
1902 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1904 case PCI_DEVICE_ID_SAT:
1905 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1907 case PCI_DEVICE_ID_SAT_MID:
1908 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1910 case PCI_DEVICE_ID_SAT_SMB:
1911 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1913 case PCI_DEVICE_ID_SAT_DCSP:
1914 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1916 case PCI_DEVICE_ID_SAT_SCSP:
1917 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1919 case PCI_DEVICE_ID_SAT_S:
1920 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1922 case PCI_DEVICE_ID_HORNET:
1923 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1926 case PCI_DEVICE_ID_PROTEUS_VF:
1927 m = (typeof(m)){"LPev12000", "PCIe IOV",
1928 "Fibre Channel Adapter"};
1930 case PCI_DEVICE_ID_PROTEUS_PF:
1931 m = (typeof(m)){"LPev12000", "PCIe IOV",
1932 "Fibre Channel Adapter"};
1934 case PCI_DEVICE_ID_PROTEUS_S:
1935 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1936 "Fibre Channel Adapter"};
1938 case PCI_DEVICE_ID_TIGERSHARK:
1940 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1942 case PCI_DEVICE_ID_TOMCAT:
1944 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1946 case PCI_DEVICE_ID_FALCON:
1947 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1948 "EmulexSecure Fibre"};
1950 case PCI_DEVICE_ID_BALIUS:
1951 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1952 "Fibre Channel Adapter"};
1954 case PCI_DEVICE_ID_LANCER_FC:
1955 case PCI_DEVICE_ID_LANCER_FC_VF:
1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1958 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1964 m = (typeof(m)){"Unknown", "", ""};
1968 if (mdp && mdp[0] == '\0')
1969 snprintf(mdp, 79,"%s", m.name);
1971 * oneConnect hba requires special processing, they are all initiators
1972 * and we put the port number on the end
1974 if (descp && descp[0] == '\0') {
1976 snprintf(descp, 255,
1977 "Emulex OneConnect %s, %s Initiator, Port %s",
1981 snprintf(descp, 255,
1982 "Emulex %s %d%s %s %s",
1983 m.name, max_speed, (GE) ? "GE" : "Gb",
1989 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1990 * @phba: pointer to lpfc hba data structure.
1991 * @pring: pointer to a IOCB ring.
1992 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1994 * This routine posts a given number of IOCBs with the associated DMA buffer
1995 * descriptors specified by the cnt argument to the given IOCB ring.
1998 * The number of IOCBs NOT able to be posted to the IOCB ring.
2001 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2004 struct lpfc_iocbq *iocb;
2005 struct lpfc_dmabuf *mp1, *mp2;
2007 cnt += pring->missbufcnt;
2009 /* While there are buffers to post */
2011 /* Allocate buffer for command iocb */
2012 iocb = lpfc_sli_get_iocbq(phba);
2014 pring->missbufcnt = cnt;
2019 /* 2 buffers can be posted per command */
2020 /* Allocate buffer to post */
2021 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2023 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2024 if (!mp1 || !mp1->virt) {
2026 lpfc_sli_release_iocbq(phba, iocb);
2027 pring->missbufcnt = cnt;
2031 INIT_LIST_HEAD(&mp1->list);
2032 /* Allocate buffer to post */
2034 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2036 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2038 if (!mp2 || !mp2->virt) {
2040 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2042 lpfc_sli_release_iocbq(phba, iocb);
2043 pring->missbufcnt = cnt;
2047 INIT_LIST_HEAD(&mp2->list);
2052 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2053 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2054 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2055 icmd->ulpBdeCount = 1;
2058 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2059 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2060 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2062 icmd->ulpBdeCount = 2;
2065 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2068 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2070 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2074 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2078 lpfc_sli_release_iocbq(phba, iocb);
2079 pring->missbufcnt = cnt;
2082 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2084 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2086 pring->missbufcnt = 0;
2091 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2092 * @phba: pointer to lpfc hba data structure.
2094 * This routine posts initial receive IOCB buffers to the ELS ring. The
2095 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2099 * 0 - success (currently always success)
2102 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2104 struct lpfc_sli *psli = &phba->sli;
2106 /* Ring 0, ELS / CT buffers */
2107 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2108 /* Ring 2 - FCP no buffers needed */
2113 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2116 * lpfc_sha_init - Set up initial array of hash table entries
2117 * @HashResultPointer: pointer to an array as hash table.
2119 * This routine sets up the initial values to the array of hash table entries
2123 lpfc_sha_init(uint32_t * HashResultPointer)
2125 HashResultPointer[0] = 0x67452301;
2126 HashResultPointer[1] = 0xEFCDAB89;
2127 HashResultPointer[2] = 0x98BADCFE;
2128 HashResultPointer[3] = 0x10325476;
2129 HashResultPointer[4] = 0xC3D2E1F0;
2133 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2134 * @HashResultPointer: pointer to an initial/result hash table.
2135 * @HashWorkingPointer: pointer to an working hash table.
2137 * This routine iterates an initial hash table pointed by @HashResultPointer
2138 * with the values from the working hash table pointeed by @HashWorkingPointer.
2139 * The results are putting back to the initial hash table, returned through
2140 * the @HashResultPointer as the result hash table.
2143 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2147 uint32_t A, B, C, D, E;
2150 HashWorkingPointer[t] =
2152 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2154 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2155 } while (++t <= 79);
2157 A = HashResultPointer[0];
2158 B = HashResultPointer[1];
2159 C = HashResultPointer[2];
2160 D = HashResultPointer[3];
2161 E = HashResultPointer[4];
2165 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2166 } else if (t < 40) {
2167 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2168 } else if (t < 60) {
2169 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2171 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2173 TEMP += S(5, A) + E + HashWorkingPointer[t];
2179 } while (++t <= 79);
2181 HashResultPointer[0] += A;
2182 HashResultPointer[1] += B;
2183 HashResultPointer[2] += C;
2184 HashResultPointer[3] += D;
2185 HashResultPointer[4] += E;
2190 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2191 * @RandomChallenge: pointer to the entry of host challenge random number array.
2192 * @HashWorking: pointer to the entry of the working hash array.
2194 * This routine calculates the working hash array referred by @HashWorking
2195 * from the challenge random numbers associated with the host, referred by
2196 * @RandomChallenge. The result is put into the entry of the working hash
2197 * array and returned by reference through @HashWorking.
2200 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2202 *HashWorking = (*RandomChallenge ^ *HashWorking);
2206 * lpfc_hba_init - Perform special handling for LC HBA initialization
2207 * @phba: pointer to lpfc hba data structure.
2208 * @hbainit: pointer to an array of unsigned 32-bit integers.
2210 * This routine performs the special handling for LC HBA initialization.
2213 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2216 uint32_t *HashWorking;
2217 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2219 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2223 HashWorking[0] = HashWorking[78] = *pwwnn++;
2224 HashWorking[1] = HashWorking[79] = *pwwnn;
2226 for (t = 0; t < 7; t++)
2227 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2229 lpfc_sha_init(hbainit);
2230 lpfc_sha_iterate(hbainit, HashWorking);
2235 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2236 * @vport: pointer to a virtual N_Port data structure.
2238 * This routine performs the necessary cleanups before deleting the @vport.
2239 * It invokes the discovery state machine to perform necessary state
2240 * transitions and to release the ndlps associated with the @vport. Note,
2241 * the physical port is treated as @vport 0.
2244 lpfc_cleanup(struct lpfc_vport *vport)
2246 struct lpfc_hba *phba = vport->phba;
2247 struct lpfc_nodelist *ndlp, *next_ndlp;
2250 if (phba->link_state > LPFC_LINK_DOWN)
2251 lpfc_port_link_failure(vport);
2253 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2254 if (!NLP_CHK_NODE_ACT(ndlp)) {
2255 ndlp = lpfc_enable_node(vport, ndlp,
2256 NLP_STE_UNUSED_NODE);
2259 spin_lock_irq(&phba->ndlp_lock);
2260 NLP_SET_FREE_REQ(ndlp);
2261 spin_unlock_irq(&phba->ndlp_lock);
2262 /* Trigger the release of the ndlp memory */
2266 spin_lock_irq(&phba->ndlp_lock);
2267 if (NLP_CHK_FREE_REQ(ndlp)) {
2268 /* The ndlp should not be in memory free mode already */
2269 spin_unlock_irq(&phba->ndlp_lock);
2272 /* Indicate request for freeing ndlp memory */
2273 NLP_SET_FREE_REQ(ndlp);
2274 spin_unlock_irq(&phba->ndlp_lock);
2276 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2277 ndlp->nlp_DID == Fabric_DID) {
2278 /* Just free up ndlp with Fabric_DID for vports */
2283 if (ndlp->nlp_type & NLP_FABRIC)
2284 lpfc_disc_state_machine(vport, ndlp, NULL,
2285 NLP_EVT_DEVICE_RECOVERY);
2287 lpfc_disc_state_machine(vport, ndlp, NULL,
2292 /* At this point, ALL ndlp's should be gone
2293 * because of the previous NLP_EVT_DEVICE_RM.
2294 * Lets wait for this to happen, if needed.
2296 while (!list_empty(&vport->fc_nodes)) {
2298 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2299 "0233 Nodelist not empty\n");
2300 list_for_each_entry_safe(ndlp, next_ndlp,
2301 &vport->fc_nodes, nlp_listp) {
2302 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2304 "0282 did:x%x ndlp:x%p "
2305 "usgmap:x%x refcnt:%d\n",
2306 ndlp->nlp_DID, (void *)ndlp,
2309 &ndlp->kref.refcount));
2314 /* Wait for any activity on ndlps to settle */
2317 lpfc_cleanup_vports_rrqs(vport, NULL);
2321 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2322 * @vport: pointer to a virtual N_Port data structure.
2324 * This routine stops all the timers associated with a @vport. This function
2325 * is invoked before disabling or deleting a @vport. Note that the physical
2326 * port is treated as @vport 0.
2329 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2331 del_timer_sync(&vport->els_tmofunc);
2332 del_timer_sync(&vport->fc_fdmitmo);
2333 del_timer_sync(&vport->delayed_disc_tmo);
2334 lpfc_can_disctmo(vport);
2339 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2340 * @phba: pointer to lpfc hba data structure.
2342 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2343 * caller of this routine should already hold the host lock.
2346 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2348 /* Clear pending FCF rediscovery wait flag */
2349 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2351 /* Now, try to stop the timer */
2352 del_timer(&phba->fcf.redisc_wait);
2356 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2357 * @phba: pointer to lpfc hba data structure.
2359 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2360 * checks whether the FCF rediscovery wait timer is pending with the host
2361 * lock held before proceeding with disabling the timer and clearing the
2362 * wait timer pendig flag.
2365 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2367 spin_lock_irq(&phba->hbalock);
2368 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2369 /* FCF rediscovery timer already fired or stopped */
2370 spin_unlock_irq(&phba->hbalock);
2373 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2374 /* Clear failover in progress flags */
2375 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2376 spin_unlock_irq(&phba->hbalock);
2380 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2381 * @phba: pointer to lpfc hba data structure.
2383 * This routine stops all the timers associated with a HBA. This function is
2384 * invoked before either putting a HBA offline or unloading the driver.
2387 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2389 lpfc_stop_vport_timers(phba->pport);
2390 del_timer_sync(&phba->sli.mbox_tmo);
2391 del_timer_sync(&phba->fabric_block_timer);
2392 del_timer_sync(&phba->eratt_poll);
2393 del_timer_sync(&phba->hb_tmofunc);
2394 if (phba->sli_rev == LPFC_SLI_REV4) {
2395 del_timer_sync(&phba->rrq_tmr);
2396 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2398 phba->hb_outstanding = 0;
2400 switch (phba->pci_dev_grp) {
2401 case LPFC_PCI_DEV_LP:
2402 /* Stop any LightPulse device specific driver timers */
2403 del_timer_sync(&phba->fcp_poll_timer);
2405 case LPFC_PCI_DEV_OC:
2406 /* Stop any OneConnect device sepcific driver timers */
2407 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2411 "0297 Invalid device group (x%x)\n",
2419 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2420 * @phba: pointer to lpfc hba data structure.
2422 * This routine marks a HBA's management interface as blocked. Once the HBA's
2423 * management interface is marked as blocked, all the user space access to
2424 * the HBA, whether they are from sysfs interface or libdfc interface will
2425 * all be blocked. The HBA is set to block the management interface when the
2426 * driver prepares the HBA interface for online or offline.
2429 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2431 unsigned long iflag;
2432 uint8_t actcmd = MBX_HEARTBEAT;
2433 unsigned long timeout;
2436 spin_lock_irqsave(&phba->hbalock, iflag);
2437 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2438 if (phba->sli.mbox_active)
2439 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2440 spin_unlock_irqrestore(&phba->hbalock, iflag);
2441 /* Determine how long we might wait for the active mailbox
2442 * command to be gracefully completed by firmware.
2444 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2446 /* Wait for the outstnading mailbox command to complete */
2447 while (phba->sli.mbox_active) {
2448 /* Check active mailbox complete status every 2ms */
2450 if (time_after(jiffies, timeout)) {
2451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2452 "2813 Mgmt IO is Blocked %x "
2453 "- mbox cmd %x still active\n",
2454 phba->sli.sli_flag, actcmd);
2461 * lpfc_online - Initialize and bring a HBA online
2462 * @phba: pointer to lpfc hba data structure.
2464 * This routine initializes the HBA and brings a HBA online. During this
2465 * process, the management interface is blocked to prevent user space access
2466 * to the HBA interfering with the driver initialization.
2473 lpfc_online(struct lpfc_hba *phba)
2475 struct lpfc_vport *vport;
2476 struct lpfc_vport **vports;
2481 vport = phba->pport;
2483 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2486 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2487 "0458 Bring Adapter online\n");
2489 lpfc_block_mgmt_io(phba);
2491 if (!lpfc_sli_queue_setup(phba)) {
2492 lpfc_unblock_mgmt_io(phba);
2496 if (phba->sli_rev == LPFC_SLI_REV4) {
2497 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2498 lpfc_unblock_mgmt_io(phba);
2502 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2503 lpfc_unblock_mgmt_io(phba);
2508 vports = lpfc_create_vport_work_array(phba);
2510 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2511 struct Scsi_Host *shost;
2512 shost = lpfc_shost_from_vport(vports[i]);
2513 spin_lock_irq(shost->host_lock);
2514 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2515 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2516 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2517 if (phba->sli_rev == LPFC_SLI_REV4)
2518 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2519 spin_unlock_irq(shost->host_lock);
2521 lpfc_destroy_vport_work_array(phba, vports);
2523 lpfc_unblock_mgmt_io(phba);
2528 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2529 * @phba: pointer to lpfc hba data structure.
2531 * This routine marks a HBA's management interface as not blocked. Once the
2532 * HBA's management interface is marked as not blocked, all the user space
2533 * access to the HBA, whether they are from sysfs interface or libdfc
2534 * interface will be allowed. The HBA is set to block the management interface
2535 * when the driver prepares the HBA interface for online or offline and then
2536 * set to unblock the management interface afterwards.
2539 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2541 unsigned long iflag;
2543 spin_lock_irqsave(&phba->hbalock, iflag);
2544 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2545 spin_unlock_irqrestore(&phba->hbalock, iflag);
2549 * lpfc_offline_prep - Prepare a HBA to be brought offline
2550 * @phba: pointer to lpfc hba data structure.
2552 * This routine is invoked to prepare a HBA to be brought offline. It performs
2553 * unregistration login to all the nodes on all vports and flushes the mailbox
2554 * queue to make it ready to be brought offline.
2557 lpfc_offline_prep(struct lpfc_hba * phba)
2559 struct lpfc_vport *vport = phba->pport;
2560 struct lpfc_nodelist *ndlp, *next_ndlp;
2561 struct lpfc_vport **vports;
2562 struct Scsi_Host *shost;
2565 if (vport->fc_flag & FC_OFFLINE_MODE)
2568 lpfc_block_mgmt_io(phba);
2570 lpfc_linkdown(phba);
2572 /* Issue an unreg_login to all nodes on all vports */
2573 vports = lpfc_create_vport_work_array(phba);
2574 if (vports != NULL) {
2575 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2576 if (vports[i]->load_flag & FC_UNLOADING)
2578 shost = lpfc_shost_from_vport(vports[i]);
2579 spin_lock_irq(shost->host_lock);
2580 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2581 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2582 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2583 spin_unlock_irq(shost->host_lock);
2585 shost = lpfc_shost_from_vport(vports[i]);
2586 list_for_each_entry_safe(ndlp, next_ndlp,
2587 &vports[i]->fc_nodes,
2589 if (!NLP_CHK_NODE_ACT(ndlp))
2591 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2593 if (ndlp->nlp_type & NLP_FABRIC) {
2594 lpfc_disc_state_machine(vports[i], ndlp,
2595 NULL, NLP_EVT_DEVICE_RECOVERY);
2596 lpfc_disc_state_machine(vports[i], ndlp,
2597 NULL, NLP_EVT_DEVICE_RM);
2599 spin_lock_irq(shost->host_lock);
2600 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2601 spin_unlock_irq(shost->host_lock);
2602 lpfc_unreg_rpi(vports[i], ndlp);
2606 lpfc_destroy_vport_work_array(phba, vports);
2608 lpfc_sli_mbox_sys_shutdown(phba);
2612 * lpfc_offline - Bring a HBA offline
2613 * @phba: pointer to lpfc hba data structure.
2615 * This routine actually brings a HBA offline. It stops all the timers
2616 * associated with the HBA, brings down the SLI layer, and eventually
2617 * marks the HBA as in offline state for the upper layer protocol.
2620 lpfc_offline(struct lpfc_hba *phba)
2622 struct Scsi_Host *shost;
2623 struct lpfc_vport **vports;
2626 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2629 /* stop port and all timers associated with this hba */
2630 lpfc_stop_port(phba);
2631 vports = lpfc_create_vport_work_array(phba);
2633 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2634 lpfc_stop_vport_timers(vports[i]);
2635 lpfc_destroy_vport_work_array(phba, vports);
2636 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2637 "0460 Bring Adapter offline\n");
2638 /* Bring down the SLI Layer and cleanup. The HBA is offline
2640 lpfc_sli_hba_down(phba);
2641 spin_lock_irq(&phba->hbalock);
2643 spin_unlock_irq(&phba->hbalock);
2644 vports = lpfc_create_vport_work_array(phba);
2646 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2647 shost = lpfc_shost_from_vport(vports[i]);
2648 spin_lock_irq(shost->host_lock);
2649 vports[i]->work_port_events = 0;
2650 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2651 spin_unlock_irq(shost->host_lock);
2653 lpfc_destroy_vport_work_array(phba, vports);
2657 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2658 * @phba: pointer to lpfc hba data structure.
2660 * This routine is to free all the SCSI buffers and IOCBs from the driver
2661 * list back to kernel. It is called from lpfc_pci_remove_one to free
2662 * the internal resources before the device is removed from the system.
2665 * 0 - successful (for now, it always returns 0)
2668 lpfc_scsi_free(struct lpfc_hba *phba)
2670 struct lpfc_scsi_buf *sb, *sb_next;
2671 struct lpfc_iocbq *io, *io_next;
2673 spin_lock_irq(&phba->hbalock);
2674 /* Release all the lpfc_scsi_bufs maintained by this host. */
2675 spin_lock(&phba->scsi_buf_list_lock);
2676 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2677 list_del(&sb->list);
2678 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2681 phba->total_scsi_bufs--;
2683 spin_unlock(&phba->scsi_buf_list_lock);
2685 /* Release all the lpfc_iocbq entries maintained by this host. */
2686 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2687 list_del(&io->list);
2689 phba->total_iocbq_bufs--;
2692 spin_unlock_irq(&phba->hbalock);
2697 * lpfc_create_port - Create an FC port
2698 * @phba: pointer to lpfc hba data structure.
2699 * @instance: a unique integer ID to this FC port.
2700 * @dev: pointer to the device data structure.
2702 * This routine creates a FC port for the upper layer protocol. The FC port
2703 * can be created on top of either a physical port or a virtual port provided
2704 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2705 * and associates the FC port created before adding the shost into the SCSI
2709 * @vport - pointer to the virtual N_Port data structure.
2710 * NULL - port create failed.
2713 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2715 struct lpfc_vport *vport;
2716 struct Scsi_Host *shost;
2719 if (dev != &phba->pcidev->dev)
2720 shost = scsi_host_alloc(&lpfc_vport_template,
2721 sizeof(struct lpfc_vport));
2723 shost = scsi_host_alloc(&lpfc_template,
2724 sizeof(struct lpfc_vport));
2728 vport = (struct lpfc_vport *) shost->hostdata;
2730 vport->load_flag |= FC_LOADING;
2731 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2732 vport->fc_rscn_flush = 0;
2734 lpfc_get_vport_cfgparam(vport);
2735 shost->unique_id = instance;
2736 shost->max_id = LPFC_MAX_TARGET;
2737 shost->max_lun = vport->cfg_max_luns;
2738 shost->this_id = -1;
2739 shost->max_cmd_len = 16;
2740 if (phba->sli_rev == LPFC_SLI_REV4) {
2741 shost->dma_boundary =
2742 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2743 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2747 * Set initial can_queue value since 0 is no longer supported and
2748 * scsi_add_host will fail. This will be adjusted later based on the
2749 * max xri value determined in hba setup.
2751 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2752 if (dev != &phba->pcidev->dev) {
2753 shost->transportt = lpfc_vport_transport_template;
2754 vport->port_type = LPFC_NPIV_PORT;
2756 shost->transportt = lpfc_transport_template;
2757 vport->port_type = LPFC_PHYSICAL_PORT;
2760 /* Initialize all internally managed lists. */
2761 INIT_LIST_HEAD(&vport->fc_nodes);
2762 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2763 spin_lock_init(&vport->work_port_lock);
2765 init_timer(&vport->fc_disctmo);
2766 vport->fc_disctmo.function = lpfc_disc_timeout;
2767 vport->fc_disctmo.data = (unsigned long)vport;
2769 init_timer(&vport->fc_fdmitmo);
2770 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2771 vport->fc_fdmitmo.data = (unsigned long)vport;
2773 init_timer(&vport->els_tmofunc);
2774 vport->els_tmofunc.function = lpfc_els_timeout;
2775 vport->els_tmofunc.data = (unsigned long)vport;
2777 init_timer(&vport->delayed_disc_tmo);
2778 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2779 vport->delayed_disc_tmo.data = (unsigned long)vport;
2781 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2785 spin_lock_irq(&phba->hbalock);
2786 list_add_tail(&vport->listentry, &phba->port_list);
2787 spin_unlock_irq(&phba->hbalock);
2791 scsi_host_put(shost);
2797 * destroy_port - destroy an FC port
2798 * @vport: pointer to an lpfc virtual N_Port data structure.
2800 * This routine destroys a FC port from the upper layer protocol. All the
2801 * resources associated with the port are released.
2804 destroy_port(struct lpfc_vport *vport)
2806 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2807 struct lpfc_hba *phba = vport->phba;
2809 lpfc_debugfs_terminate(vport);
2810 fc_remove_host(shost);
2811 scsi_remove_host(shost);
2813 spin_lock_irq(&phba->hbalock);
2814 list_del_init(&vport->listentry);
2815 spin_unlock_irq(&phba->hbalock);
2817 lpfc_cleanup(vport);
2822 * lpfc_get_instance - Get a unique integer ID
2824 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2825 * uses the kernel idr facility to perform the task.
2828 * instance - a unique integer ID allocated as the new instance.
2829 * -1 - lpfc get instance failed.
2832 lpfc_get_instance(void)
2836 /* Assign an unused number */
2837 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2839 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2845 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2846 * @shost: pointer to SCSI host data structure.
2847 * @time: elapsed time of the scan in jiffies.
2849 * This routine is called by the SCSI layer with a SCSI host to determine
2850 * whether the scan host is finished.
2852 * Note: there is no scan_start function as adapter initialization will have
2853 * asynchronously kicked off the link initialization.
2856 * 0 - SCSI host scan is not over yet.
2857 * 1 - SCSI host scan is over.
2859 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2861 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2862 struct lpfc_hba *phba = vport->phba;
2865 spin_lock_irq(shost->host_lock);
2867 if (vport->load_flag & FC_UNLOADING) {
2871 if (time >= 30 * HZ) {
2872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2873 "0461 Scanning longer than 30 "
2874 "seconds. Continuing initialization\n");
2878 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2879 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2880 "0465 Link down longer than 15 "
2881 "seconds. Continuing initialization\n");
2886 if (vport->port_state != LPFC_VPORT_READY)
2888 if (vport->num_disc_nodes || vport->fc_prli_sent)
2890 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2892 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2898 spin_unlock_irq(shost->host_lock);
2903 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2904 * @shost: pointer to SCSI host data structure.
2906 * This routine initializes a given SCSI host attributes on a FC port. The
2907 * SCSI host can be either on top of a physical port or a virtual port.
2909 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2911 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2912 struct lpfc_hba *phba = vport->phba;
2914 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2917 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2918 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2919 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2921 memset(fc_host_supported_fc4s(shost), 0,
2922 sizeof(fc_host_supported_fc4s(shost)));
2923 fc_host_supported_fc4s(shost)[2] = 1;
2924 fc_host_supported_fc4s(shost)[7] = 1;
2926 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2927 sizeof fc_host_symbolic_name(shost));
2929 fc_host_supported_speeds(shost) = 0;
2930 if (phba->lmt & LMT_16Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2932 if (phba->lmt & LMT_10Gb)
2933 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2934 if (phba->lmt & LMT_8Gb)
2935 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2936 if (phba->lmt & LMT_4Gb)
2937 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2938 if (phba->lmt & LMT_2Gb)
2939 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2940 if (phba->lmt & LMT_1Gb)
2941 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2943 fc_host_maxframe_size(shost) =
2944 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2945 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2947 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2949 /* This value is also unchanging */
2950 memset(fc_host_active_fc4s(shost), 0,
2951 sizeof(fc_host_active_fc4s(shost)));
2952 fc_host_active_fc4s(shost)[2] = 1;
2953 fc_host_active_fc4s(shost)[7] = 1;
2955 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2956 spin_lock_irq(shost->host_lock);
2957 vport->load_flag &= ~FC_LOADING;
2958 spin_unlock_irq(shost->host_lock);
2962 * lpfc_stop_port_s3 - Stop SLI3 device port
2963 * @phba: pointer to lpfc hba data structure.
2965 * This routine is invoked to stop an SLI3 device port, it stops the device
2966 * from generating interrupts and stops the device driver's timers for the
2970 lpfc_stop_port_s3(struct lpfc_hba *phba)
2972 /* Clear all interrupt enable conditions */
2973 writel(0, phba->HCregaddr);
2974 readl(phba->HCregaddr); /* flush */
2975 /* Clear all pending interrupts */
2976 writel(0xffffffff, phba->HAregaddr);
2977 readl(phba->HAregaddr); /* flush */
2979 /* Reset some HBA SLI setup states */
2980 lpfc_stop_hba_timers(phba);
2981 phba->pport->work_port_events = 0;
2985 * lpfc_stop_port_s4 - Stop SLI4 device port
2986 * @phba: pointer to lpfc hba data structure.
2988 * This routine is invoked to stop an SLI4 device port, it stops the device
2989 * from generating interrupts and stops the device driver's timers for the
2993 lpfc_stop_port_s4(struct lpfc_hba *phba)
2995 /* Reset some HBA SLI4 setup states */
2996 lpfc_stop_hba_timers(phba);
2997 phba->pport->work_port_events = 0;
2998 phba->sli4_hba.intr_enable = 0;
3002 * lpfc_stop_port - Wrapper function for stopping hba port
3003 * @phba: Pointer to HBA context object.
3005 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3006 * the API jump table function pointer from the lpfc_hba struct.
3009 lpfc_stop_port(struct lpfc_hba *phba)
3011 phba->lpfc_stop_port(phba);
3015 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3016 * @phba: Pointer to hba for which this call is being executed.
3018 * This routine starts the timer waiting for the FCF rediscovery to complete.
3021 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3023 unsigned long fcf_redisc_wait_tmo =
3024 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3025 /* Start fcf rediscovery wait period timer */
3026 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3027 spin_lock_irq(&phba->hbalock);
3028 /* Allow action to new fcf asynchronous event */
3029 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3030 /* Mark the FCF rediscovery pending state */
3031 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3032 spin_unlock_irq(&phba->hbalock);
3036 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3037 * @ptr: Map to lpfc_hba data structure pointer.
3039 * This routine is invoked when waiting for FCF table rediscover has been
3040 * timed out. If new FCF record(s) has (have) been discovered during the
3041 * wait period, a new FCF event shall be added to the FCOE async event
3042 * list, and then worker thread shall be waked up for processing from the
3043 * worker thread context.
3046 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3048 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3050 /* Don't send FCF rediscovery event if timer cancelled */
3051 spin_lock_irq(&phba->hbalock);
3052 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3053 spin_unlock_irq(&phba->hbalock);
3056 /* Clear FCF rediscovery timer pending flag */
3057 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3058 /* FCF rediscovery event to worker thread */
3059 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3060 spin_unlock_irq(&phba->hbalock);
3061 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3062 "2776 FCF rediscover quiescent timer expired\n");
3063 /* wake up worker thread */
3064 lpfc_worker_wake_up(phba);
3068 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3069 * @phba: pointer to lpfc hba data structure.
3070 * @acqe_link: pointer to the async link completion queue entry.
3072 * This routine is to parse the SLI4 link-attention link fault code and
3073 * translate it into the base driver's read link attention mailbox command
3076 * Return: Link-attention status in terms of base driver's coding.
3079 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3080 struct lpfc_acqe_link *acqe_link)
3082 uint16_t latt_fault;
3084 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3085 case LPFC_ASYNC_LINK_FAULT_NONE:
3086 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3087 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3092 "0398 Invalid link fault code: x%x\n",
3093 bf_get(lpfc_acqe_link_fault, acqe_link));
3094 latt_fault = MBXERR_ERROR;
3101 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3102 * @phba: pointer to lpfc hba data structure.
3103 * @acqe_link: pointer to the async link completion queue entry.
3105 * This routine is to parse the SLI4 link attention type and translate it
3106 * into the base driver's link attention type coding.
3108 * Return: Link attention type in terms of base driver's coding.
3111 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3112 struct lpfc_acqe_link *acqe_link)
3116 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3117 case LPFC_ASYNC_LINK_STATUS_DOWN:
3118 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3119 att_type = LPFC_ATT_LINK_DOWN;
3121 case LPFC_ASYNC_LINK_STATUS_UP:
3122 /* Ignore physical link up events - wait for logical link up */
3123 att_type = LPFC_ATT_RESERVED;
3125 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3126 att_type = LPFC_ATT_LINK_UP;
3129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3130 "0399 Invalid link attention type: x%x\n",
3131 bf_get(lpfc_acqe_link_status, acqe_link));
3132 att_type = LPFC_ATT_RESERVED;
3139 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3140 * @phba: pointer to lpfc hba data structure.
3141 * @acqe_link: pointer to the async link completion queue entry.
3143 * This routine is to parse the SLI4 link-attention link speed and translate
3144 * it into the base driver's link-attention link speed coding.
3146 * Return: Link-attention link speed in terms of base driver's coding.
3149 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3150 struct lpfc_acqe_link *acqe_link)
3154 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3155 case LPFC_ASYNC_LINK_SPEED_ZERO:
3156 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3157 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3158 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3160 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3161 link_speed = LPFC_LINK_SPEED_1GHZ;
3163 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3164 link_speed = LPFC_LINK_SPEED_10GHZ;
3167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3168 "0483 Invalid link-attention link speed: x%x\n",
3169 bf_get(lpfc_acqe_link_speed, acqe_link));
3170 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3177 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3178 * @phba: pointer to lpfc hba data structure.
3179 * @acqe_link: pointer to the async link completion queue entry.
3181 * This routine is to handle the SLI4 asynchronous FCoE link event.
3184 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3185 struct lpfc_acqe_link *acqe_link)
3187 struct lpfc_dmabuf *mp;
3190 struct lpfc_mbx_read_top *la;
3194 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3195 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3197 phba->fcoe_eventtag = acqe_link->event_tag;
3198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3201 "0395 The mboxq allocation failed\n");
3204 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3206 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3207 "0396 The lpfc_dmabuf allocation failed\n");
3210 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3213 "0397 The mbuf allocation failed\n");
3214 goto out_free_dmabuf;
3217 /* Cleanup any outstanding ELS commands */
3218 lpfc_els_flush_all_cmd(phba);
3220 /* Block ELS IOCBs until we have done process link event */
3221 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3223 /* Update link event statistics */
3224 phba->sli.slistat.link_event++;
3226 /* Create lpfc_handle_latt mailbox command from link ACQE */
3227 lpfc_read_topology(phba, pmb, mp);
3228 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3229 pmb->vport = phba->pport;
3231 /* Keep the link status for extra SLI4 state machine reference */
3232 phba->sli4_hba.link_state.speed =
3233 bf_get(lpfc_acqe_link_speed, acqe_link);
3234 phba->sli4_hba.link_state.duplex =
3235 bf_get(lpfc_acqe_link_duplex, acqe_link);
3236 phba->sli4_hba.link_state.status =
3237 bf_get(lpfc_acqe_link_status, acqe_link);
3238 phba->sli4_hba.link_state.type =
3239 bf_get(lpfc_acqe_link_type, acqe_link);
3240 phba->sli4_hba.link_state.number =
3241 bf_get(lpfc_acqe_link_number, acqe_link);
3242 phba->sli4_hba.link_state.fault =
3243 bf_get(lpfc_acqe_link_fault, acqe_link);
3244 phba->sli4_hba.link_state.logical_speed =
3245 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3246 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3247 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3248 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3249 "Logical speed:%dMbps Fault:%d\n",
3250 phba->sli4_hba.link_state.speed,
3251 phba->sli4_hba.link_state.topology,
3252 phba->sli4_hba.link_state.status,
3253 phba->sli4_hba.link_state.type,
3254 phba->sli4_hba.link_state.number,
3255 phba->sli4_hba.link_state.logical_speed * 10,
3256 phba->sli4_hba.link_state.fault);
3258 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3259 * topology info. Note: Optional for non FC-AL ports.
3261 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3263 if (rc == MBX_NOT_FINISHED)
3264 goto out_free_dmabuf;
3268 * For FCoE Mode: fill in all the topology information we need and call
3269 * the READ_TOPOLOGY completion routine to continue without actually
3270 * sending the READ_TOPOLOGY mailbox command to the port.
3272 /* Parse and translate status field */
3274 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3276 /* Parse and translate link attention fields */
3277 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3278 la->eventTag = acqe_link->event_tag;
3279 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3280 bf_set(lpfc_mbx_read_top_link_spd, la,
3281 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3283 /* Fake the the following irrelvant fields */
3284 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3285 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3286 bf_set(lpfc_mbx_read_top_il, la, 0);
3287 bf_set(lpfc_mbx_read_top_pb, la, 0);
3288 bf_set(lpfc_mbx_read_top_fa, la, 0);
3289 bf_set(lpfc_mbx_read_top_mm, la, 0);
3291 /* Invoke the lpfc_handle_latt mailbox command callback function */
3292 lpfc_mbx_cmpl_read_topology(phba, pmb);
3299 mempool_free(pmb, phba->mbox_mem_pool);
3303 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3304 * @phba: pointer to lpfc hba data structure.
3305 * @acqe_fc: pointer to the async fc completion queue entry.
3307 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3308 * that the event was received and then issue a read_topology mailbox command so
3309 * that the rest of the driver will treat it the same as SLI3.
3312 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3314 struct lpfc_dmabuf *mp;
3318 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3319 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3320 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3321 "2895 Non FC link Event detected.(%d)\n",
3322 bf_get(lpfc_trailer_type, acqe_fc));
3325 /* Keep the link status for extra SLI4 state machine reference */
3326 phba->sli4_hba.link_state.speed =
3327 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3328 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3329 phba->sli4_hba.link_state.topology =
3330 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3331 phba->sli4_hba.link_state.status =
3332 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3333 phba->sli4_hba.link_state.type =
3334 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3335 phba->sli4_hba.link_state.number =
3336 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3337 phba->sli4_hba.link_state.fault =
3338 bf_get(lpfc_acqe_link_fault, acqe_fc);
3339 phba->sli4_hba.link_state.logical_speed =
3340 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3341 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3342 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3343 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3344 "%dMbps Fault:%d\n",
3345 phba->sli4_hba.link_state.speed,
3346 phba->sli4_hba.link_state.topology,
3347 phba->sli4_hba.link_state.status,
3348 phba->sli4_hba.link_state.type,
3349 phba->sli4_hba.link_state.number,
3350 phba->sli4_hba.link_state.logical_speed * 10,
3351 phba->sli4_hba.link_state.fault);
3352 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3354 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3355 "2897 The mboxq allocation failed\n");
3358 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3360 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3361 "2898 The lpfc_dmabuf allocation failed\n");
3364 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3367 "2899 The mbuf allocation failed\n");
3368 goto out_free_dmabuf;
3371 /* Cleanup any outstanding ELS commands */
3372 lpfc_els_flush_all_cmd(phba);
3374 /* Block ELS IOCBs until we have done process link event */
3375 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3377 /* Update link event statistics */
3378 phba->sli.slistat.link_event++;
3380 /* Create lpfc_handle_latt mailbox command from link ACQE */
3381 lpfc_read_topology(phba, pmb, mp);
3382 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3383 pmb->vport = phba->pport;
3385 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3386 if (rc == MBX_NOT_FINISHED)
3387 goto out_free_dmabuf;
3393 mempool_free(pmb, phba->mbox_mem_pool);
3397 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3398 * @phba: pointer to lpfc hba data structure.
3399 * @acqe_fc: pointer to the async SLI completion queue entry.
3401 * This routine is to handle the SLI4 asynchronous SLI events.
3404 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3406 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3407 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3408 "x%08x SLI Event Type:%d",
3409 acqe_sli->event_data1, acqe_sli->event_data2,
3410 bf_get(lpfc_trailer_type, acqe_sli));
3415 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3416 * @vport: pointer to vport data structure.
3418 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3419 * response to a CVL event.
3421 * Return the pointer to the ndlp with the vport if successful, otherwise
3424 static struct lpfc_nodelist *
3425 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3427 struct lpfc_nodelist *ndlp;
3428 struct Scsi_Host *shost;
3429 struct lpfc_hba *phba;
3436 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3438 /* Cannot find existing Fabric ndlp, so allocate a new one */
3439 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3442 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3443 /* Set the node type */
3444 ndlp->nlp_type |= NLP_FABRIC;
3445 /* Put ndlp onto node list */
3446 lpfc_enqueue_node(vport, ndlp);
3447 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3448 /* re-setup ndlp without removing from node list */
3449 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3453 if ((phba->pport->port_state < LPFC_FLOGI) &&
3454 (phba->pport->port_state != LPFC_VPORT_FAILED))
3456 /* If virtual link is not yet instantiated ignore CVL */
3457 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3458 && (vport->port_state != LPFC_VPORT_FAILED))
3460 shost = lpfc_shost_from_vport(vport);
3463 lpfc_linkdown_port(vport);
3464 lpfc_cleanup_pending_mbox(vport);
3465 spin_lock_irq(shost->host_lock);
3466 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3467 spin_unlock_irq(shost->host_lock);
3473 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3474 * @vport: pointer to lpfc hba data structure.
3476 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3477 * response to a FCF dead event.
3480 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3482 struct lpfc_vport **vports;
3485 vports = lpfc_create_vport_work_array(phba);
3487 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3488 lpfc_sli4_perform_vport_cvl(vports[i]);
3489 lpfc_destroy_vport_work_array(phba, vports);
3493 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3494 * @phba: pointer to lpfc hba data structure.
3495 * @acqe_link: pointer to the async fcoe completion queue entry.
3497 * This routine is to handle the SLI4 asynchronous fcoe event.
3500 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3501 struct lpfc_acqe_fip *acqe_fip)
3503 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3505 struct lpfc_vport *vport;
3506 struct lpfc_nodelist *ndlp;
3507 struct Scsi_Host *shost;
3508 int active_vlink_present;
3509 struct lpfc_vport **vports;
3512 phba->fc_eventTag = acqe_fip->event_tag;
3513 phba->fcoe_eventtag = acqe_fip->event_tag;
3514 switch (event_type) {
3515 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3516 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3517 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3518 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3520 "2546 New FCF event, evt_tag:x%x, "
3522 acqe_fip->event_tag,
3525 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3527 "2788 FCF param modified event, "
3528 "evt_tag:x%x, index:x%x\n",
3529 acqe_fip->event_tag,
3531 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3533 * During period of FCF discovery, read the FCF
3534 * table record indexed by the event to update
3535 * FCF roundrobin failover eligible FCF bmask.
3537 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3539 "2779 Read FCF (x%x) for updating "
3540 "roundrobin FCF failover bmask\n",
3542 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3545 /* If the FCF discovery is in progress, do nothing. */
3546 spin_lock_irq(&phba->hbalock);
3547 if (phba->hba_flag & FCF_TS_INPROG) {
3548 spin_unlock_irq(&phba->hbalock);
3551 /* If fast FCF failover rescan event is pending, do nothing */
3552 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3553 spin_unlock_irq(&phba->hbalock);
3557 /* If the FCF has been in discovered state, do nothing. */
3558 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3559 spin_unlock_irq(&phba->hbalock);
3562 spin_unlock_irq(&phba->hbalock);
3564 /* Otherwise, scan the entire FCF table and re-discover SAN */
3565 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3566 "2770 Start FCF table scan per async FCF "
3567 "event, evt_tag:x%x, index:x%x\n",
3568 acqe_fip->event_tag, acqe_fip->index);
3569 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3570 LPFC_FCOE_FCF_GET_FIRST);
3572 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3573 "2547 Issue FCF scan read FCF mailbox "
3574 "command failed (x%x)\n", rc);
3577 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3579 "2548 FCF Table full count 0x%x tag 0x%x\n",
3580 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3581 acqe_fip->event_tag);
3584 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3585 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3586 "2549 FCF (x%x) disconnected from network, "
3587 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3589 * If we are in the middle of FCF failover process, clear
3590 * the corresponding FCF bit in the roundrobin bitmap.
3592 spin_lock_irq(&phba->hbalock);
3593 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3594 spin_unlock_irq(&phba->hbalock);
3595 /* Update FLOGI FCF failover eligible FCF bmask */
3596 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3599 spin_unlock_irq(&phba->hbalock);
3601 /* If the event is not for currently used fcf do nothing */
3602 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3606 * Otherwise, request the port to rediscover the entire FCF
3607 * table for a fast recovery from case that the current FCF
3608 * is no longer valid as we are not in the middle of FCF
3609 * failover process already.
3611 spin_lock_irq(&phba->hbalock);
3612 /* Mark the fast failover process in progress */
3613 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3614 spin_unlock_irq(&phba->hbalock);
3616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3617 "2771 Start FCF fast failover process due to "
3618 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3619 "\n", acqe_fip->event_tag, acqe_fip->index);
3620 rc = lpfc_sli4_redisc_fcf_table(phba);
3622 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3624 "2772 Issue FCF rediscover mabilbox "
3625 "command failed, fail through to FCF "
3627 spin_lock_irq(&phba->hbalock);
3628 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3629 spin_unlock_irq(&phba->hbalock);
3631 * Last resort will fail over by treating this
3632 * as a link down to FCF registration.
3634 lpfc_sli4_fcf_dead_failthrough(phba);
3636 /* Reset FCF roundrobin bmask for new discovery */
3637 memset(phba->fcf.fcf_rr_bmask, 0,
3638 sizeof(*phba->fcf.fcf_rr_bmask));
3640 * Handling fast FCF failover to a DEAD FCF event is
3641 * considered equalivant to receiving CVL to all vports.
3643 lpfc_sli4_perform_all_vport_cvl(phba);
3646 case LPFC_FIP_EVENT_TYPE_CVL:
3647 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3648 "2718 Clear Virtual Link Received for VPI 0x%x"
3649 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3651 vport = lpfc_find_vport_by_vpid(phba,
3652 acqe_fip->index - phba->vpi_base);
3653 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3656 active_vlink_present = 0;
3658 vports = lpfc_create_vport_work_array(phba);
3660 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3662 if ((!(vports[i]->fc_flag &
3663 FC_VPORT_CVL_RCVD)) &&
3664 (vports[i]->port_state > LPFC_FDISC)) {
3665 active_vlink_present = 1;
3669 lpfc_destroy_vport_work_array(phba, vports);
3672 if (active_vlink_present) {
3674 * If there are other active VLinks present,
3675 * re-instantiate the Vlink using FDISC.
3677 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3678 shost = lpfc_shost_from_vport(vport);
3679 spin_lock_irq(shost->host_lock);
3680 ndlp->nlp_flag |= NLP_DELAY_TMO;
3681 spin_unlock_irq(shost->host_lock);
3682 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3683 vport->port_state = LPFC_FDISC;
3686 * Otherwise, we request port to rediscover
3687 * the entire FCF table for a fast recovery
3688 * from possible case that the current FCF
3689 * is no longer valid if we are not already
3690 * in the FCF failover process.
3692 spin_lock_irq(&phba->hbalock);
3693 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3694 spin_unlock_irq(&phba->hbalock);
3697 /* Mark the fast failover process in progress */
3698 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3699 spin_unlock_irq(&phba->hbalock);
3700 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3702 "2773 Start FCF failover per CVL, "
3703 "evt_tag:x%x\n", acqe_fip->event_tag);
3704 rc = lpfc_sli4_redisc_fcf_table(phba);
3706 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3708 "2774 Issue FCF rediscover "
3709 "mabilbox command failed, "
3710 "through to CVL event\n");
3711 spin_lock_irq(&phba->hbalock);
3712 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3713 spin_unlock_irq(&phba->hbalock);
3715 * Last resort will be re-try on the
3716 * the current registered FCF entry.
3718 lpfc_retry_pport_discovery(phba);
3721 * Reset FCF roundrobin bmask for new
3724 memset(phba->fcf.fcf_rr_bmask, 0,
3725 sizeof(*phba->fcf.fcf_rr_bmask));
3729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3730 "0288 Unknown FCoE event type 0x%x event tag "
3731 "0x%x\n", event_type, acqe_fip->event_tag);
3737 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3738 * @phba: pointer to lpfc hba data structure.
3739 * @acqe_link: pointer to the async dcbx completion queue entry.
3741 * This routine is to handle the SLI4 asynchronous dcbx event.
3744 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3745 struct lpfc_acqe_dcbx *acqe_dcbx)
3747 phba->fc_eventTag = acqe_dcbx->event_tag;
3748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3749 "0290 The SLI4 DCBX asynchronous event is not "
3754 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3755 * @phba: pointer to lpfc hba data structure.
3756 * @acqe_link: pointer to the async grp5 completion queue entry.
3758 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3759 * is an asynchronous notified of a logical link speed change. The Port
3760 * reports the logical link speed in units of 10Mbps.
3763 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3764 struct lpfc_acqe_grp5 *acqe_grp5)
3766 uint16_t prev_ll_spd;
3768 phba->fc_eventTag = acqe_grp5->event_tag;
3769 phba->fcoe_eventtag = acqe_grp5->event_tag;
3770 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3771 phba->sli4_hba.link_state.logical_speed =
3772 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3773 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3774 "2789 GRP5 Async Event: Updating logical link speed "
3775 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3776 (phba->sli4_hba.link_state.logical_speed*10));
3780 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3781 * @phba: pointer to lpfc hba data structure.
3783 * This routine is invoked by the worker thread to process all the pending
3784 * SLI4 asynchronous events.
3786 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3788 struct lpfc_cq_event *cq_event;
3790 /* First, declare the async event has been handled */
3791 spin_lock_irq(&phba->hbalock);
3792 phba->hba_flag &= ~ASYNC_EVENT;
3793 spin_unlock_irq(&phba->hbalock);
3794 /* Now, handle all the async events */
3795 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3796 /* Get the first event from the head of the event queue */
3797 spin_lock_irq(&phba->hbalock);
3798 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3799 cq_event, struct lpfc_cq_event, list);
3800 spin_unlock_irq(&phba->hbalock);
3801 /* Process the asynchronous event */
3802 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3803 case LPFC_TRAILER_CODE_LINK:
3804 lpfc_sli4_async_link_evt(phba,
3805 &cq_event->cqe.acqe_link);
3807 case LPFC_TRAILER_CODE_FCOE:
3808 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3810 case LPFC_TRAILER_CODE_DCBX:
3811 lpfc_sli4_async_dcbx_evt(phba,
3812 &cq_event->cqe.acqe_dcbx);
3814 case LPFC_TRAILER_CODE_GRP5:
3815 lpfc_sli4_async_grp5_evt(phba,
3816 &cq_event->cqe.acqe_grp5);
3818 case LPFC_TRAILER_CODE_FC:
3819 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3821 case LPFC_TRAILER_CODE_SLI:
3822 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3825 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3826 "1804 Invalid asynchrous event code: "
3827 "x%x\n", bf_get(lpfc_trailer_code,
3828 &cq_event->cqe.mcqe_cmpl));
3831 /* Free the completion event processed to the free pool */
3832 lpfc_sli4_cq_event_release(phba, cq_event);
3837 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3838 * @phba: pointer to lpfc hba data structure.
3840 * This routine is invoked by the worker thread to process FCF table
3841 * rediscovery pending completion event.
3843 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3847 spin_lock_irq(&phba->hbalock);
3848 /* Clear FCF rediscovery timeout event */
3849 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3850 /* Clear driver fast failover FCF record flag */
3851 phba->fcf.failover_rec.flag = 0;
3852 /* Set state for FCF fast failover */
3853 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3854 spin_unlock_irq(&phba->hbalock);
3856 /* Scan FCF table from the first entry to re-discover SAN */
3857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3858 "2777 Start post-quiescent FCF table scan\n");
3859 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3861 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3862 "2747 Issue FCF scan read FCF mailbox "
3863 "command failed 0x%x\n", rc);
3867 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3868 * @phba: pointer to lpfc hba data structure.
3869 * @dev_grp: The HBA PCI-Device group number.
3871 * This routine is invoked to set up the per HBA PCI-Device group function
3872 * API jump table entries.
3874 * Return: 0 if success, otherwise -ENODEV
3877 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3881 /* Set up lpfc PCI-device group */
3882 phba->pci_dev_grp = dev_grp;
3884 /* The LPFC_PCI_DEV_OC uses SLI4 */
3885 if (dev_grp == LPFC_PCI_DEV_OC)
3886 phba->sli_rev = LPFC_SLI_REV4;
3888 /* Set up device INIT API function jump table */
3889 rc = lpfc_init_api_table_setup(phba, dev_grp);
3892 /* Set up SCSI API function jump table */
3893 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3896 /* Set up SLI API function jump table */
3897 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3900 /* Set up MBOX API function jump table */
3901 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3909 * lpfc_log_intr_mode - Log the active interrupt mode
3910 * @phba: pointer to lpfc hba data structure.
3911 * @intr_mode: active interrupt mode adopted.
3913 * This routine it invoked to log the currently used active interrupt mode
3916 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3918 switch (intr_mode) {
3920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3921 "0470 Enable INTx interrupt mode.\n");
3924 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3925 "0481 Enabled MSI interrupt mode.\n");
3928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3929 "0480 Enabled MSI-X interrupt mode.\n");
3932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3933 "0482 Illegal interrupt mode.\n");
3940 * lpfc_enable_pci_dev - Enable a generic PCI device.
3941 * @phba: pointer to lpfc hba data structure.
3943 * This routine is invoked to enable the PCI device that is common to all
3948 * other values - error
3951 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3953 struct pci_dev *pdev;
3956 /* Obtain PCI device reference */
3960 pdev = phba->pcidev;
3961 /* Select PCI BARs */
3962 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3963 /* Enable PCI device */
3964 if (pci_enable_device_mem(pdev))
3966 /* Request PCI resource for the device */
3967 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3968 goto out_disable_device;
3969 /* Set up device as PCI master and save state for EEH */
3970 pci_set_master(pdev);
3971 pci_try_set_mwi(pdev);
3972 pci_save_state(pdev);
3974 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3975 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3976 pdev->needs_freset = 1;
3981 pci_disable_device(pdev);
3987 * lpfc_disable_pci_dev - Disable a generic PCI device.
3988 * @phba: pointer to lpfc hba data structure.
3990 * This routine is invoked to disable the PCI device that is common to all
3994 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3996 struct pci_dev *pdev;
3999 /* Obtain PCI device reference */
4003 pdev = phba->pcidev;
4004 /* Select PCI BARs */
4005 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4006 /* Release PCI resource and disable PCI device */
4007 pci_release_selected_regions(pdev, bars);
4008 pci_disable_device(pdev);
4009 /* Null out PCI private reference to driver */
4010 pci_set_drvdata(pdev, NULL);
4016 * lpfc_reset_hba - Reset a hba
4017 * @phba: pointer to lpfc hba data structure.
4019 * This routine is invoked to reset a hba device. It brings the HBA
4020 * offline, performs a board restart, and then brings the board back
4021 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4022 * on outstanding mailbox commands.
4025 lpfc_reset_hba(struct lpfc_hba *phba)
4027 /* If resets are disabled then set error state and return. */
4028 if (!phba->cfg_enable_hba_reset) {
4029 phba->link_state = LPFC_HBA_ERROR;
4032 lpfc_offline_prep(phba);
4034 lpfc_sli_brdrestart(phba);
4036 lpfc_unblock_mgmt_io(phba);
4040 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4041 * @phba: pointer to lpfc hba data structure.
4042 * @nr_vfn: number of virtual functions to be enabled.
4044 * This function enables the PCI SR-IOV virtual functions to a physical
4045 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4046 * enable the number of virtual functions to the physical function. As
4047 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4048 * API call does not considered as an error condition for most of the device.
4051 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4053 struct pci_dev *pdev = phba->pcidev;
4056 rc = pci_enable_sriov(pdev, nr_vfn);
4058 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4059 "2806 Failed to enable sriov on this device "
4060 "with vfn number nr_vf:%d, rc:%d\n",
4063 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4064 "2807 Successful enable sriov on this device "
4065 "with vfn number nr_vf:%d\n", nr_vfn);
4070 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4071 * @phba: pointer to lpfc hba data structure.
4073 * This routine is invoked to set up the driver internal resources specific to
4074 * support the SLI-3 HBA device it attached to.
4078 * other values - error
4081 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4083 struct lpfc_sli *psli;
4087 * Initialize timers used by driver
4090 /* Heartbeat timer */
4091 init_timer(&phba->hb_tmofunc);
4092 phba->hb_tmofunc.function = lpfc_hb_timeout;
4093 phba->hb_tmofunc.data = (unsigned long)phba;
4096 /* MBOX heartbeat timer */
4097 init_timer(&psli->mbox_tmo);
4098 psli->mbox_tmo.function = lpfc_mbox_timeout;
4099 psli->mbox_tmo.data = (unsigned long) phba;
4100 /* FCP polling mode timer */
4101 init_timer(&phba->fcp_poll_timer);
4102 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4103 phba->fcp_poll_timer.data = (unsigned long) phba;
4104 /* Fabric block timer */
4105 init_timer(&phba->fabric_block_timer);
4106 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4107 phba->fabric_block_timer.data = (unsigned long) phba;
4108 /* EA polling mode timer */
4109 init_timer(&phba->eratt_poll);
4110 phba->eratt_poll.function = lpfc_poll_eratt;
4111 phba->eratt_poll.data = (unsigned long) phba;
4113 /* Host attention work mask setup */
4114 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4115 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4117 /* Get all the module params for configuring this host */
4118 lpfc_get_cfgparam(phba);
4119 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4120 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4121 /* check for menlo minimum sg count */
4122 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4123 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4127 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4128 * used to create the sg_dma_buf_pool must be dynamically calculated.
4129 * 2 segments are added since the IOCB needs a command and response bde.
4131 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4132 sizeof(struct fcp_rsp) +
4133 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4135 if (phba->cfg_enable_bg) {
4136 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4137 phba->cfg_sg_dma_buf_size +=
4138 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4141 /* Also reinitialize the host templates with new values. */
4142 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4143 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4145 phba->max_vpi = LPFC_MAX_VPI;
4146 /* This will be set to correct value after config_port mbox */
4147 phba->max_vports = 0;
4150 * Initialize the SLI Layer to run with lpfc HBAs.
4152 lpfc_sli_setup(phba);
4153 lpfc_sli_queue_setup(phba);
4155 /* Allocate device driver memory */
4156 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4160 * Enable sr-iov virtual functions if supported and configured
4161 * through the module parameter.
4163 if (phba->cfg_sriov_nr_virtfn > 0) {
4164 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4165 phba->cfg_sriov_nr_virtfn);
4167 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4168 "2808 Requested number of SR-IOV "
4169 "virtual functions (%d) is not "
4171 phba->cfg_sriov_nr_virtfn);
4172 phba->cfg_sriov_nr_virtfn = 0;
4180 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4181 * @phba: pointer to lpfc hba data structure.
4183 * This routine is invoked to unset the driver internal resources set up
4184 * specific for supporting the SLI-3 HBA device it attached to.
4187 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4189 /* Free device driver memory allocated */
4190 lpfc_mem_free_all(phba);
4196 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4197 * @phba: pointer to lpfc hba data structure.
4199 * This routine is invoked to set up the driver internal resources specific to
4200 * support the SLI-4 HBA device it attached to.
4204 * other values - error
4207 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4209 struct lpfc_sli *psli;
4210 LPFC_MBOXQ_t *mboxq;
4211 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4212 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4213 struct lpfc_mqe *mqe;
4214 int longs, sli_family;
4216 /* Before proceed, wait for POST done and device ready */
4217 rc = lpfc_sli4_post_status_check(phba);
4222 * Initialize timers used by driver
4225 /* Heartbeat timer */
4226 init_timer(&phba->hb_tmofunc);
4227 phba->hb_tmofunc.function = lpfc_hb_timeout;
4228 phba->hb_tmofunc.data = (unsigned long)phba;
4229 init_timer(&phba->rrq_tmr);
4230 phba->rrq_tmr.function = lpfc_rrq_timeout;
4231 phba->rrq_tmr.data = (unsigned long)phba;
4234 /* MBOX heartbeat timer */
4235 init_timer(&psli->mbox_tmo);
4236 psli->mbox_tmo.function = lpfc_mbox_timeout;
4237 psli->mbox_tmo.data = (unsigned long) phba;
4238 /* Fabric block timer */
4239 init_timer(&phba->fabric_block_timer);
4240 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4241 phba->fabric_block_timer.data = (unsigned long) phba;
4242 /* EA polling mode timer */
4243 init_timer(&phba->eratt_poll);
4244 phba->eratt_poll.function = lpfc_poll_eratt;
4245 phba->eratt_poll.data = (unsigned long) phba;
4246 /* FCF rediscover timer */
4247 init_timer(&phba->fcf.redisc_wait);
4248 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4249 phba->fcf.redisc_wait.data = (unsigned long)phba;
4252 * Control structure for handling external multi-buffer mailbox
4253 * command pass-through.
4255 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4256 sizeof(struct lpfc_mbox_ext_buf_ctx));
4257 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4260 * We need to do a READ_CONFIG mailbox command here before
4261 * calling lpfc_get_cfgparam. For VFs this will report the
4262 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4263 * All of the resources allocated
4264 * for this Port are tied to these values.
4266 /* Get all the module params for configuring this host */
4267 lpfc_get_cfgparam(phba);
4268 phba->max_vpi = LPFC_MAX_VPI;
4269 /* This will be set to correct value after the read_config mbox */
4270 phba->max_vports = 0;
4272 /* Program the default value of vlan_id and fc_map */
4273 phba->valid_vlan = 0;
4274 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4275 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4276 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4279 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4280 * used to create the sg_dma_buf_pool must be dynamically calculated.
4281 * 2 segments are added since the IOCB needs a command and response bde.
4282 * To insure that the scsi sgl does not cross a 4k page boundary only
4283 * sgl sizes of must be a power of 2.
4285 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4286 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4288 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4289 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4290 switch (sli_family) {
4291 case LPFC_SLI_INTF_FAMILY_BE2:
4292 case LPFC_SLI_INTF_FAMILY_BE3:
4293 /* There is a single hint for BE - 2 pages per BPL. */
4294 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4295 LPFC_SLI_INTF_SLI_HINT1_1)
4296 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4298 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4299 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4303 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4304 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4305 dma_buf_size = dma_buf_size << 1)
4307 if (dma_buf_size == max_buf_size)
4308 phba->cfg_sg_seg_cnt = (dma_buf_size -
4309 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4310 (2 * sizeof(struct sli4_sge))) /
4311 sizeof(struct sli4_sge);
4312 phba->cfg_sg_dma_buf_size = dma_buf_size;
4314 /* Initialize buffer queue management fields */
4315 hbq_count = lpfc_sli_hbq_count();
4316 for (i = 0; i < hbq_count; ++i)
4317 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4318 INIT_LIST_HEAD(&phba->rb_pend_list);
4319 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4320 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4323 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4325 /* Initialize the Abort scsi buffer list used by driver */
4326 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4327 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4328 /* This abort list used by worker thread */
4329 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4332 * Initialize driver internal slow-path work queues
4335 /* Driver internel slow-path CQ Event pool */
4336 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4337 /* Response IOCB work queue list */
4338 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4339 /* Asynchronous event CQ Event work queue list */
4340 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4341 /* Fast-path XRI aborted CQ Event work queue list */
4342 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4343 /* Slow-path XRI aborted CQ Event work queue list */
4344 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4345 /* Receive queue CQ Event work queue list */
4346 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4348 /* Initialize extent block lists. */
4349 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4350 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4351 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4352 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4354 /* Initialize the driver internal SLI layer lists. */
4355 lpfc_sli_setup(phba);
4356 lpfc_sli_queue_setup(phba);
4358 /* Allocate device driver memory */
4359 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4363 /* IF Type 2 ports get initialized now. */
4364 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4365 LPFC_SLI_INTF_IF_TYPE_2) {
4366 rc = lpfc_pci_function_reset(phba);
4371 /* Create the bootstrap mailbox command */
4372 rc = lpfc_create_bootstrap_mbox(phba);
4376 /* Set up the host's endian order with the device. */
4377 rc = lpfc_setup_endian_order(phba);
4379 goto out_free_bsmbx;
4381 /* Set up the hba's configuration parameters. */
4382 rc = lpfc_sli4_read_config(phba);
4384 goto out_free_bsmbx;
4386 /* IF Type 0 ports get initialized now. */
4387 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4388 LPFC_SLI_INTF_IF_TYPE_0) {
4389 rc = lpfc_pci_function_reset(phba);
4391 goto out_free_bsmbx;
4394 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4398 goto out_free_bsmbx;
4401 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4402 lpfc_supported_pages(mboxq);
4403 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4405 mqe = &mboxq->u.mqe;
4406 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4407 LPFC_MAX_SUPPORTED_PAGES);
4408 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4409 switch (pn_page[i]) {
4410 case LPFC_SLI4_PARAMETERS:
4411 phba->sli4_hba.pc_sli4_params.supported = 1;
4417 /* Read the port's SLI4 Parameters capabilities if supported. */
4418 if (phba->sli4_hba.pc_sli4_params.supported)
4419 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4421 mempool_free(mboxq, phba->mbox_mem_pool);
4423 goto out_free_bsmbx;
4427 * Get sli4 parameters that override parameters from Port capabilities.
4428 * If this call fails, it isn't critical unless the SLI4 parameters come
4431 rc = lpfc_get_sli4_parameters(phba, mboxq);
4433 if (phba->sli4_hba.extents_in_use &&
4434 phba->sli4_hba.rpi_hdrs_in_use) {
4435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4436 "2999 Unsupported SLI4 Parameters "
4437 "Extents and RPI headers enabled.\n");
4438 goto out_free_bsmbx;
4441 mempool_free(mboxq, phba->mbox_mem_pool);
4442 /* Create all the SLI4 queues */
4443 rc = lpfc_sli4_queue_create(phba);
4445 goto out_free_bsmbx;
4447 /* Create driver internal CQE event pool */
4448 rc = lpfc_sli4_cq_event_pool_create(phba);
4450 goto out_destroy_queue;
4452 /* Initialize and populate the iocb list per host */
4453 rc = lpfc_init_sgl_list(phba);
4455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4456 "1400 Failed to initialize sgl list.\n");
4457 goto out_destroy_cq_event_pool;
4459 rc = lpfc_init_active_sgl_array(phba);
4461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4462 "1430 Failed to initialize sgl list.\n");
4463 goto out_free_sgl_list;
4465 rc = lpfc_sli4_init_rpi_hdrs(phba);
4467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4468 "1432 Failed to initialize rpi headers.\n");
4469 goto out_free_active_sgl;
4472 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4473 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4474 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4476 if (!phba->fcf.fcf_rr_bmask) {
4477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4478 "2759 Failed allocate memory for FCF round "
4479 "robin failover bmask\n");
4481 goto out_remove_rpi_hdrs;
4484 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4485 phba->cfg_fcp_eq_count), GFP_KERNEL);
4486 if (!phba->sli4_hba.fcp_eq_hdl) {
4487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4488 "2572 Failed allocate memory for fast-path "
4489 "per-EQ handle array\n");
4491 goto out_free_fcf_rr_bmask;
4494 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4495 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4496 if (!phba->sli4_hba.msix_entries) {
4497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4498 "2573 Failed allocate memory for msi-x "
4499 "interrupt vector entries\n");
4501 goto out_free_fcp_eq_hdl;
4505 * Enable sr-iov virtual functions if supported and configured
4506 * through the module parameter.
4508 if (phba->cfg_sriov_nr_virtfn > 0) {
4509 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4510 phba->cfg_sriov_nr_virtfn);
4512 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4513 "3020 Requested number of SR-IOV "
4514 "virtual functions (%d) is not "
4516 phba->cfg_sriov_nr_virtfn);
4517 phba->cfg_sriov_nr_virtfn = 0;
4523 out_free_fcp_eq_hdl:
4524 kfree(phba->sli4_hba.fcp_eq_hdl);
4525 out_free_fcf_rr_bmask:
4526 kfree(phba->fcf.fcf_rr_bmask);
4527 out_remove_rpi_hdrs:
4528 lpfc_sli4_remove_rpi_hdrs(phba);
4529 out_free_active_sgl:
4530 lpfc_free_active_sgl(phba);
4532 lpfc_free_sgl_list(phba);
4533 out_destroy_cq_event_pool:
4534 lpfc_sli4_cq_event_pool_destroy(phba);
4536 lpfc_sli4_queue_destroy(phba);
4538 lpfc_destroy_bootstrap_mbox(phba);
4540 lpfc_mem_free(phba);
4545 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4546 * @phba: pointer to lpfc hba data structure.
4548 * This routine is invoked to unset the driver internal resources set up
4549 * specific for supporting the SLI-4 HBA device it attached to.
4552 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4554 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4556 /* Free memory allocated for msi-x interrupt vector entries */
4557 kfree(phba->sli4_hba.msix_entries);
4559 /* Free memory allocated for fast-path work queue handles */
4560 kfree(phba->sli4_hba.fcp_eq_hdl);
4562 /* Free the allocated rpi headers. */
4563 lpfc_sli4_remove_rpi_hdrs(phba);
4564 lpfc_sli4_remove_rpis(phba);
4566 /* Free eligible FCF index bmask */
4567 kfree(phba->fcf.fcf_rr_bmask);
4569 /* Free the ELS sgl list */
4570 lpfc_free_active_sgl(phba);
4571 lpfc_free_sgl_list(phba);
4573 /* Free the SCSI sgl management array */
4574 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4576 /* Free the SLI4 queues */
4577 lpfc_sli4_queue_destroy(phba);
4579 /* Free the completion queue EQ event pool */
4580 lpfc_sli4_cq_event_release_all(phba);
4581 lpfc_sli4_cq_event_pool_destroy(phba);
4583 /* Release resource identifiers. */
4584 lpfc_sli4_dealloc_resource_identifiers(phba);
4586 /* Free the bsmbx region. */
4587 lpfc_destroy_bootstrap_mbox(phba);
4589 /* Free the SLI Layer memory with SLI4 HBAs */
4590 lpfc_mem_free_all(phba);
4592 /* Free the current connect table */
4593 list_for_each_entry_safe(conn_entry, next_conn_entry,
4594 &phba->fcf_conn_rec_list, list) {
4595 list_del_init(&conn_entry->list);
4603 * lpfc_init_api_table_setup - Set up init api function jump table
4604 * @phba: The hba struct for which this call is being executed.
4605 * @dev_grp: The HBA PCI-Device group number.
4607 * This routine sets up the device INIT interface API function jump table
4610 * Returns: 0 - success, -ENODEV - failure.
4613 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4615 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4616 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4617 phba->lpfc_selective_reset = lpfc_selective_reset;
4619 case LPFC_PCI_DEV_LP:
4620 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4621 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4622 phba->lpfc_stop_port = lpfc_stop_port_s3;
4624 case LPFC_PCI_DEV_OC:
4625 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4626 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4627 phba->lpfc_stop_port = lpfc_stop_port_s4;
4630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4631 "1431 Invalid HBA PCI-device group: 0x%x\n",
4640 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4641 * @phba: pointer to lpfc hba data structure.
4643 * This routine is invoked to set up the driver internal resources before the
4644 * device specific resource setup to support the HBA device it attached to.
4648 * other values - error
4651 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4654 * Driver resources common to all SLI revisions
4656 atomic_set(&phba->fast_event_count, 0);
4657 spin_lock_init(&phba->hbalock);
4659 /* Initialize ndlp management spinlock */
4660 spin_lock_init(&phba->ndlp_lock);
4662 INIT_LIST_HEAD(&phba->port_list);
4663 INIT_LIST_HEAD(&phba->work_list);
4664 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4666 /* Initialize the wait queue head for the kernel thread */
4667 init_waitqueue_head(&phba->work_waitq);
4669 /* Initialize the scsi buffer list used by driver for scsi IO */
4670 spin_lock_init(&phba->scsi_buf_list_lock);
4671 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4673 /* Initialize the fabric iocb list */
4674 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4676 /* Initialize list to save ELS buffers */
4677 INIT_LIST_HEAD(&phba->elsbuf);
4679 /* Initialize FCF connection rec list */
4680 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4686 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4687 * @phba: pointer to lpfc hba data structure.
4689 * This routine is invoked to set up the driver internal resources after the
4690 * device specific resource setup to support the HBA device it attached to.
4694 * other values - error
4697 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4701 /* Startup the kernel thread for this host adapter. */
4702 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4703 "lpfc_worker_%d", phba->brd_no);
4704 if (IS_ERR(phba->worker_thread)) {
4705 error = PTR_ERR(phba->worker_thread);
4713 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4714 * @phba: pointer to lpfc hba data structure.
4716 * This routine is invoked to unset the driver internal resources set up after
4717 * the device specific resource setup for supporting the HBA device it
4721 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4723 /* Stop kernel worker thread */
4724 kthread_stop(phba->worker_thread);
4728 * lpfc_free_iocb_list - Free iocb list.
4729 * @phba: pointer to lpfc hba data structure.
4731 * This routine is invoked to free the driver's IOCB list and memory.
4734 lpfc_free_iocb_list(struct lpfc_hba *phba)
4736 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4738 spin_lock_irq(&phba->hbalock);
4739 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4740 &phba->lpfc_iocb_list, list) {
4741 list_del(&iocbq_entry->list);
4743 phba->total_iocbq_bufs--;
4745 spin_unlock_irq(&phba->hbalock);
4751 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4752 * @phba: pointer to lpfc hba data structure.
4754 * This routine is invoked to allocate and initizlize the driver's IOCB
4755 * list and set up the IOCB tag array accordingly.
4759 * other values - error
4762 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4764 struct lpfc_iocbq *iocbq_entry = NULL;
4768 /* Initialize and populate the iocb list per host. */
4769 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4770 for (i = 0; i < iocb_count; i++) {
4771 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4772 if (iocbq_entry == NULL) {
4773 printk(KERN_ERR "%s: only allocated %d iocbs of "
4774 "expected %d count. Unloading driver.\n",
4775 __func__, i, LPFC_IOCB_LIST_CNT);
4776 goto out_free_iocbq;
4779 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4782 printk(KERN_ERR "%s: failed to allocate IOTAG. "
4783 "Unloading driver.\n", __func__);
4784 goto out_free_iocbq;
4786 iocbq_entry->sli4_lxritag = NO_XRI;
4787 iocbq_entry->sli4_xritag = NO_XRI;
4789 spin_lock_irq(&phba->hbalock);
4790 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4791 phba->total_iocbq_bufs++;
4792 spin_unlock_irq(&phba->hbalock);
4798 lpfc_free_iocb_list(phba);
4804 * lpfc_free_sgl_list - Free sgl list.
4805 * @phba: pointer to lpfc hba data structure.
4807 * This routine is invoked to free the driver's sgl list and memory.
4810 lpfc_free_sgl_list(struct lpfc_hba *phba)
4812 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4813 LIST_HEAD(sglq_list);
4815 spin_lock_irq(&phba->hbalock);
4816 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4817 spin_unlock_irq(&phba->hbalock);
4819 list_for_each_entry_safe(sglq_entry, sglq_next,
4821 list_del(&sglq_entry->list);
4822 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4824 phba->sli4_hba.total_sglq_bufs--;
4826 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4830 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4831 * @phba: pointer to lpfc hba data structure.
4833 * This routine is invoked to allocate the driver's active sgl memory.
4834 * This array will hold the sglq_entry's for active IOs.
4837 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4840 size = sizeof(struct lpfc_sglq *);
4841 size *= phba->sli4_hba.max_cfg_param.max_xri;
4843 phba->sli4_hba.lpfc_sglq_active_list =
4844 kzalloc(size, GFP_KERNEL);
4845 if (!phba->sli4_hba.lpfc_sglq_active_list)
4851 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4852 * @phba: pointer to lpfc hba data structure.
4854 * This routine is invoked to walk through the array of active sglq entries
4855 * and free all of the resources.
4856 * This is just a place holder for now.
4859 lpfc_free_active_sgl(struct lpfc_hba *phba)
4861 kfree(phba->sli4_hba.lpfc_sglq_active_list);
4865 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4866 * @phba: pointer to lpfc hba data structure.
4868 * This routine is invoked to allocate and initizlize the driver's sgl
4869 * list and set up the sgl xritag tag array accordingly.
4873 * other values - error
4876 lpfc_init_sgl_list(struct lpfc_hba *phba)
4878 struct lpfc_sglq *sglq_entry = NULL;
4882 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4883 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4884 "2400 ELS XRI count %d.\n",
4886 /* Initialize and populate the sglq list per host/VF. */
4887 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4888 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4890 /* Sanity check on XRI management */
4891 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4892 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4893 "2562 No room left for SCSI XRI allocation: "
4894 "max_xri=%d, els_xri=%d\n",
4895 phba->sli4_hba.max_cfg_param.max_xri,
4900 /* Allocate memory for the ELS XRI management array */
4901 phba->sli4_hba.lpfc_els_sgl_array =
4902 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4905 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4906 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4907 "2401 Failed to allocate memory for ELS "
4908 "XRI management array of size %d.\n",
4913 /* Keep the SCSI XRI into the XRI management array */
4914 phba->sli4_hba.scsi_xri_max =
4915 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4916 phba->sli4_hba.scsi_xri_cnt = 0;
4917 phba->sli4_hba.lpfc_scsi_psb_array =
4918 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4919 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4921 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4923 "2563 Failed to allocate memory for SCSI "
4924 "XRI management array of size %d.\n",
4925 phba->sli4_hba.scsi_xri_max);
4926 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4930 for (i = 0; i < els_xri_cnt; i++) {
4931 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4932 if (sglq_entry == NULL) {
4933 printk(KERN_ERR "%s: only allocated %d sgls of "
4934 "expected %d count. Unloading driver.\n",
4935 __func__, i, els_xri_cnt);
4939 sglq_entry->buff_type = GEN_BUFF_TYPE;
4940 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4941 if (sglq_entry->virt == NULL) {
4943 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4944 "Unloading driver.\n", __func__);
4947 sglq_entry->sgl = sglq_entry->virt;
4948 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4950 /* The list order is used by later block SGL registraton */
4951 spin_lock_irq(&phba->hbalock);
4952 sglq_entry->state = SGL_FREED;
4953 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4954 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4955 phba->sli4_hba.total_sglq_bufs++;
4956 spin_unlock_irq(&phba->hbalock);
4961 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4962 lpfc_free_sgl_list(phba);
4967 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4968 * @phba: pointer to lpfc hba data structure.
4970 * This routine is invoked to post rpi header templates to the
4971 * port for those SLI4 ports that do not support extents. This routine
4972 * posts a PAGE_SIZE memory region to the port to hold up to
4973 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
4974 * and should be called only when interrupts are disabled.
4978 * -ERROR - otherwise.
4981 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4984 struct lpfc_rpi_hdr *rpi_hdr;
4986 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4988 * If the SLI4 port supports extents, posting the rpi header isn't
4989 * required. Set the expected maximum count and let the actual value
4990 * get set when extents are fully allocated.
4992 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4993 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4996 if (phba->sli4_hba.extents_in_use)
4999 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5001 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5002 "0391 Error during rpi post operation\n");
5003 lpfc_sli4_remove_rpis(phba);
5011 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5012 * @phba: pointer to lpfc hba data structure.
5014 * This routine is invoked to allocate a single 4KB memory region to
5015 * support rpis and stores them in the phba. This single region
5016 * provides support for up to 64 rpis. The region is used globally
5020 * A valid rpi hdr on success.
5021 * A NULL pointer on any failure.
5023 struct lpfc_rpi_hdr *
5024 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5026 uint16_t rpi_limit, curr_rpi_range;
5027 struct lpfc_dmabuf *dmabuf;
5028 struct lpfc_rpi_hdr *rpi_hdr;
5032 * If the SLI4 port supports extents, posting the rpi header isn't
5033 * required. Set the expected maximum count and let the actual value
5034 * get set when extents are fully allocated.
5036 if (!phba->sli4_hba.rpi_hdrs_in_use)
5038 if (phba->sli4_hba.extents_in_use)
5041 /* The limit on the logical index is just the max_rpi count. */
5042 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5043 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5045 spin_lock_irq(&phba->hbalock);
5047 * Establish the starting RPI in this header block. The starting
5048 * rpi is normalized to a zero base because the physical rpi is
5051 curr_rpi_range = phba->sli4_hba.next_rpi -
5052 phba->sli4_hba.max_cfg_param.rpi_base;
5053 spin_unlock_irq(&phba->hbalock);
5056 * The port has a limited number of rpis. The increment here
5057 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5058 * and to allow the full max_rpi range per port.
5060 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5061 rpi_count = rpi_limit - curr_rpi_range;
5063 rpi_count = LPFC_RPI_HDR_COUNT;
5068 * First allocate the protocol header region for the port. The
5069 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5071 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5075 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5076 LPFC_HDR_TEMPLATE_SIZE,
5079 if (!dmabuf->virt) {
5081 goto err_free_dmabuf;
5084 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5085 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5087 goto err_free_coherent;
5090 /* Save the rpi header data for cleanup later. */
5091 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5093 goto err_free_coherent;
5095 rpi_hdr->dmabuf = dmabuf;
5096 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5097 rpi_hdr->page_count = 1;
5098 spin_lock_irq(&phba->hbalock);
5100 /* The rpi_hdr stores the logical index only. */
5101 rpi_hdr->start_rpi = curr_rpi_range;
5102 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5105 * The next_rpi stores the next logical module-64 rpi value used
5106 * to post physical rpis in subsequent rpi postings.
5108 phba->sli4_hba.next_rpi += rpi_count;
5109 spin_unlock_irq(&phba->hbalock);
5113 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5114 dmabuf->virt, dmabuf->phys);
5121 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
5122 * @phba: pointer to lpfc hba data structure.
5124 * This routine is invoked to remove all memory resources allocated
5125 * to support rpis for SLI4 ports not supporting extents. This routine
5126 * presumes the caller has released all rpis consumed by fabric or port
5127 * logins and is prepared to have the header pages removed.
5130 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5132 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5134 if (!phba->sli4_hba.rpi_hdrs_in_use)
5137 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5138 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5139 list_del(&rpi_hdr->list);
5140 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5141 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5142 kfree(rpi_hdr->dmabuf);
5146 /* There are no rpis available to the port now. */
5147 phba->sli4_hba.next_rpi = 0;
5151 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5152 * @pdev: pointer to pci device data structure.
5154 * This routine is invoked to allocate the driver hba data structure for an
5155 * HBA device. If the allocation is successful, the phba reference to the
5156 * PCI device data structure is set.
5159 * pointer to @phba - successful
5162 static struct lpfc_hba *
5163 lpfc_hba_alloc(struct pci_dev *pdev)
5165 struct lpfc_hba *phba;
5167 /* Allocate memory for HBA structure */
5168 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5170 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5174 /* Set reference to PCI device in HBA structure */
5175 phba->pcidev = pdev;
5177 /* Assign an unused board number */
5178 phba->brd_no = lpfc_get_instance();
5179 if (phba->brd_no < 0) {
5184 spin_lock_init(&phba->ct_ev_lock);
5185 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5191 * lpfc_hba_free - Free driver hba data structure with a device.
5192 * @phba: pointer to lpfc hba data structure.
5194 * This routine is invoked to free the driver hba data structure with an
5198 lpfc_hba_free(struct lpfc_hba *phba)
5200 /* Release the driver assigned board number */
5201 idr_remove(&lpfc_hba_index, phba->brd_no);
5208 * lpfc_create_shost - Create hba physical port with associated scsi host.
5209 * @phba: pointer to lpfc hba data structure.
5211 * This routine is invoked to create HBA physical port and associate a SCSI
5216 * other values - error
5219 lpfc_create_shost(struct lpfc_hba *phba)
5221 struct lpfc_vport *vport;
5222 struct Scsi_Host *shost;
5224 /* Initialize HBA FC structure */
5225 phba->fc_edtov = FF_DEF_EDTOV;
5226 phba->fc_ratov = FF_DEF_RATOV;
5227 phba->fc_altov = FF_DEF_ALTOV;
5228 phba->fc_arbtov = FF_DEF_ARBTOV;
5230 atomic_set(&phba->sdev_cnt, 0);
5231 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5235 shost = lpfc_shost_from_vport(vport);
5236 phba->pport = vport;
5237 lpfc_debugfs_initialize(vport);
5238 /* Put reference to SCSI host to driver's device private data */
5239 pci_set_drvdata(phba->pcidev, shost);
5245 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5246 * @phba: pointer to lpfc hba data structure.
5248 * This routine is invoked to destroy HBA physical port and the associated
5252 lpfc_destroy_shost(struct lpfc_hba *phba)
5254 struct lpfc_vport *vport = phba->pport;
5256 /* Destroy physical port that associated with the SCSI host */
5257 destroy_port(vport);
5263 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5264 * @phba: pointer to lpfc hba data structure.
5265 * @shost: the shost to be used to detect Block guard settings.
5267 * This routine sets up the local Block guard protocol settings for @shost.
5268 * This routine also allocates memory for debugging bg buffers.
5271 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5274 if (lpfc_prot_mask && lpfc_prot_guard) {
5275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5276 "1478 Registering BlockGuard with the "
5278 scsi_host_set_prot(shost, lpfc_prot_mask);
5279 scsi_host_set_guard(shost, lpfc_prot_guard);
5281 if (!_dump_buf_data) {
5283 spin_lock_init(&_dump_buf_lock);
5285 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5286 if (_dump_buf_data) {
5287 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5288 "9043 BLKGRD: allocated %d pages for "
5289 "_dump_buf_data at 0x%p\n",
5290 (1 << pagecnt), _dump_buf_data);
5291 _dump_buf_data_order = pagecnt;
5292 memset(_dump_buf_data, 0,
5293 ((1 << PAGE_SHIFT) << pagecnt));
5298 if (!_dump_buf_data_order)
5299 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5300 "9044 BLKGRD: ERROR unable to allocate "
5301 "memory for hexdump\n");
5303 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5304 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5305 "\n", _dump_buf_data);
5306 if (!_dump_buf_dif) {
5309 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5310 if (_dump_buf_dif) {
5311 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5312 "9046 BLKGRD: allocated %d pages for "
5313 "_dump_buf_dif at 0x%p\n",
5314 (1 << pagecnt), _dump_buf_dif);
5315 _dump_buf_dif_order = pagecnt;
5316 memset(_dump_buf_dif, 0,
5317 ((1 << PAGE_SHIFT) << pagecnt));
5322 if (!_dump_buf_dif_order)
5323 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5324 "9047 BLKGRD: ERROR unable to allocate "
5325 "memory for hexdump\n");
5327 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5328 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5333 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5334 * @phba: pointer to lpfc hba data structure.
5336 * This routine is invoked to perform all the necessary post initialization
5337 * setup for the device.
5340 lpfc_post_init_setup(struct lpfc_hba *phba)
5342 struct Scsi_Host *shost;
5343 struct lpfc_adapter_event_header adapter_event;
5345 /* Get the default values for Model Name and Description */
5346 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5349 * hba setup may have changed the hba_queue_depth so we need to
5350 * adjust the value of can_queue.
5352 shost = pci_get_drvdata(phba->pcidev);
5353 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5354 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5355 lpfc_setup_bg(phba, shost);
5357 lpfc_host_attrib_init(shost);
5359 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5360 spin_lock_irq(shost->host_lock);
5361 lpfc_poll_start_timer(phba);
5362 spin_unlock_irq(shost->host_lock);
5365 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5366 "0428 Perform SCSI scan\n");
5367 /* Send board arrival event to upper layer */
5368 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5369 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5370 fc_host_post_vendor_event(shost, fc_get_event_number(),
5371 sizeof(adapter_event),
5372 (char *) &adapter_event,
5378 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5379 * @phba: pointer to lpfc hba data structure.
5381 * This routine is invoked to set up the PCI device memory space for device
5382 * with SLI-3 interface spec.
5386 * other values - error
5389 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5391 struct pci_dev *pdev;
5392 unsigned long bar0map_len, bar2map_len;
5395 int error = -ENODEV;
5397 /* Obtain PCI device reference */
5401 pdev = phba->pcidev;
5403 /* Set the device DMA mask size */
5404 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5405 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5406 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5407 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5412 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5413 * required by each mapping.
5415 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5416 bar0map_len = pci_resource_len(pdev, 0);
5418 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5419 bar2map_len = pci_resource_len(pdev, 2);
5421 /* Map HBA SLIM to a kernel virtual address. */
5422 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5423 if (!phba->slim_memmap_p) {
5424 dev_printk(KERN_ERR, &pdev->dev,
5425 "ioremap failed for SLIM memory.\n");
5429 /* Map HBA Control Registers to a kernel virtual address. */
5430 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5431 if (!phba->ctrl_regs_memmap_p) {
5432 dev_printk(KERN_ERR, &pdev->dev,
5433 "ioremap failed for HBA control registers.\n");
5434 goto out_iounmap_slim;
5437 /* Allocate memory for SLI-2 structures */
5438 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5442 if (!phba->slim2p.virt)
5445 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5446 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5447 phba->mbox_ext = (phba->slim2p.virt +
5448 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5449 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5450 phba->IOCBs = (phba->slim2p.virt +
5451 offsetof(struct lpfc_sli2_slim, IOCBs));
5453 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5454 lpfc_sli_hbq_size(),
5455 &phba->hbqslimp.phys,
5457 if (!phba->hbqslimp.virt)
5460 hbq_count = lpfc_sli_hbq_count();
5461 ptr = phba->hbqslimp.virt;
5462 for (i = 0; i < hbq_count; ++i) {
5463 phba->hbqs[i].hbq_virt = ptr;
5464 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5465 ptr += (lpfc_hbq_defs[i]->entry_count *
5466 sizeof(struct lpfc_hbq_entry));
5468 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5469 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5471 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5473 INIT_LIST_HEAD(&phba->rb_pend_list);
5475 phba->MBslimaddr = phba->slim_memmap_p;
5476 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5477 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5478 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5479 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5484 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5485 phba->slim2p.virt, phba->slim2p.phys);
5487 iounmap(phba->ctrl_regs_memmap_p);
5489 iounmap(phba->slim_memmap_p);
5495 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5496 * @phba: pointer to lpfc hba data structure.
5498 * This routine is invoked to unset the PCI device memory space for device
5499 * with SLI-3 interface spec.
5502 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5504 struct pci_dev *pdev;
5506 /* Obtain PCI device reference */
5510 pdev = phba->pcidev;
5512 /* Free coherent DMA memory allocated */
5513 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5514 phba->hbqslimp.virt, phba->hbqslimp.phys);
5515 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5516 phba->slim2p.virt, phba->slim2p.phys);
5518 /* I/O memory unmap */
5519 iounmap(phba->ctrl_regs_memmap_p);
5520 iounmap(phba->slim_memmap_p);
5526 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5527 * @phba: pointer to lpfc hba data structure.
5529 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5530 * done and check status.
5532 * Return 0 if successful, otherwise -ENODEV.
5535 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5537 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5538 struct lpfc_register reg_data;
5539 int i, port_error = 0;
5542 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5543 memset(®_data, 0, sizeof(reg_data));
5544 if (!phba->sli4_hba.PSMPHRregaddr)
5547 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5548 for (i = 0; i < 3000; i++) {
5549 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5550 &portsmphr_reg.word0) ||
5551 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5552 /* Port has a fatal POST error, break out */
5553 port_error = -ENODEV;
5556 if (LPFC_POST_STAGE_PORT_READY ==
5557 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5563 * If there was a port error during POST, then don't proceed with
5564 * other register reads as the data may not be valid. Just exit.
5567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5568 "1408 Port Failed POST - portsmphr=0x%x, "
5569 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5570 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5571 portsmphr_reg.word0,
5572 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5573 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5574 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5575 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5576 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5577 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5578 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5579 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5582 "2534 Device Info: SLIFamily=0x%x, "
5583 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5584 "SLIHint_2=0x%x, FT=0x%x\n",
5585 bf_get(lpfc_sli_intf_sli_family,
5586 &phba->sli4_hba.sli_intf),
5587 bf_get(lpfc_sli_intf_slirev,
5588 &phba->sli4_hba.sli_intf),
5589 bf_get(lpfc_sli_intf_if_type,
5590 &phba->sli4_hba.sli_intf),
5591 bf_get(lpfc_sli_intf_sli_hint1,
5592 &phba->sli4_hba.sli_intf),
5593 bf_get(lpfc_sli_intf_sli_hint2,
5594 &phba->sli4_hba.sli_intf),
5595 bf_get(lpfc_sli_intf_func_type,
5596 &phba->sli4_hba.sli_intf));
5598 * Check for other Port errors during the initialization
5599 * process. Fail the load if the port did not come up
5602 if_type = bf_get(lpfc_sli_intf_if_type,
5603 &phba->sli4_hba.sli_intf);
5605 case LPFC_SLI_INTF_IF_TYPE_0:
5606 phba->sli4_hba.ue_mask_lo =
5607 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5608 phba->sli4_hba.ue_mask_hi =
5609 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5611 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5613 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5614 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5615 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5617 "1422 Unrecoverable Error "
5618 "Detected during POST "
5619 "uerr_lo_reg=0x%x, "
5620 "uerr_hi_reg=0x%x, "
5621 "ue_mask_lo_reg=0x%x, "
5622 "ue_mask_hi_reg=0x%x\n",
5625 phba->sli4_hba.ue_mask_lo,
5626 phba->sli4_hba.ue_mask_hi);
5627 port_error = -ENODEV;
5630 case LPFC_SLI_INTF_IF_TYPE_2:
5631 /* Final checks. The port status should be clean. */
5632 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5634 (bf_get(lpfc_sliport_status_err, ®_data) &&
5635 !bf_get(lpfc_sliport_status_rn, ®_data))) {
5636 phba->work_status[0] =
5637 readl(phba->sli4_hba.u.if_type2.
5639 phba->work_status[1] =
5640 readl(phba->sli4_hba.u.if_type2.
5642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5643 "2888 Port Error Detected "
5645 "port status reg 0x%x, "
5646 "port_smphr reg 0x%x, "
5647 "error 1=0x%x, error 2=0x%x\n",
5649 portsmphr_reg.word0,
5650 phba->work_status[0],
5651 phba->work_status[1]);
5652 port_error = -ENODEV;
5655 case LPFC_SLI_INTF_IF_TYPE_1:
5664 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5665 * @phba: pointer to lpfc hba data structure.
5666 * @if_type: The SLI4 interface type getting configured.
5668 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5672 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5675 case LPFC_SLI_INTF_IF_TYPE_0:
5676 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5677 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5678 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5679 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5680 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5681 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5682 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5683 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5684 phba->sli4_hba.SLIINTFregaddr =
5685 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5687 case LPFC_SLI_INTF_IF_TYPE_2:
5688 phba->sli4_hba.u.if_type2.ERR1regaddr =
5689 phba->sli4_hba.conf_regs_memmap_p +
5690 LPFC_CTL_PORT_ER1_OFFSET;
5691 phba->sli4_hba.u.if_type2.ERR2regaddr =
5692 phba->sli4_hba.conf_regs_memmap_p +
5693 LPFC_CTL_PORT_ER2_OFFSET;
5694 phba->sli4_hba.u.if_type2.CTRLregaddr =
5695 phba->sli4_hba.conf_regs_memmap_p +
5696 LPFC_CTL_PORT_CTL_OFFSET;
5697 phba->sli4_hba.u.if_type2.STATUSregaddr =
5698 phba->sli4_hba.conf_regs_memmap_p +
5699 LPFC_CTL_PORT_STA_OFFSET;
5700 phba->sli4_hba.SLIINTFregaddr =
5701 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5702 phba->sli4_hba.PSMPHRregaddr =
5703 phba->sli4_hba.conf_regs_memmap_p +
5704 LPFC_CTL_PORT_SEM_OFFSET;
5705 phba->sli4_hba.RQDBregaddr =
5706 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5707 phba->sli4_hba.WQDBregaddr =
5708 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5709 phba->sli4_hba.EQCQDBregaddr =
5710 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5711 phba->sli4_hba.MQDBregaddr =
5712 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5713 phba->sli4_hba.BMBXregaddr =
5714 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5716 case LPFC_SLI_INTF_IF_TYPE_1:
5718 dev_printk(KERN_ERR, &phba->pcidev->dev,
5719 "FATAL - unsupported SLI4 interface type - %d\n",
5726 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5727 * @phba: pointer to lpfc hba data structure.
5729 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5733 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5735 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5736 LPFC_SLIPORT_IF0_SMPHR;
5737 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5739 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5741 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5746 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5747 * @phba: pointer to lpfc hba data structure.
5748 * @vf: virtual function number
5750 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5751 * based on the given viftual function number, @vf.
5753 * Return 0 if successful, otherwise -ENODEV.
5756 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5758 if (vf > LPFC_VIR_FUNC_MAX)
5761 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5762 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5763 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5764 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5765 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5766 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5767 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5768 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5769 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5770 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5775 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5776 * @phba: pointer to lpfc hba data structure.
5778 * This routine is invoked to create the bootstrap mailbox
5779 * region consistent with the SLI-4 interface spec. This
5780 * routine allocates all memory necessary to communicate
5781 * mailbox commands to the port and sets up all alignment
5782 * needs. No locks are expected to be held when calling
5787 * -ENOMEM - could not allocated memory.
5790 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5793 struct lpfc_dmabuf *dmabuf;
5794 struct dma_address *dma_address;
5798 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5803 * The bootstrap mailbox region is comprised of 2 parts
5804 * plus an alignment restriction of 16 bytes.
5806 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5807 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5811 if (!dmabuf->virt) {
5815 memset(dmabuf->virt, 0, bmbx_size);
5818 * Initialize the bootstrap mailbox pointers now so that the register
5819 * operations are simple later. The mailbox dma address is required
5820 * to be 16-byte aligned. Also align the virtual memory as each
5821 * maibox is copied into the bmbx mailbox region before issuing the
5822 * command to the port.
5824 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5825 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5827 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5828 LPFC_ALIGN_16_BYTE);
5829 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5830 LPFC_ALIGN_16_BYTE);
5833 * Set the high and low physical addresses now. The SLI4 alignment
5834 * requirement is 16 bytes and the mailbox is posted to the port
5835 * as two 30-bit addresses. The other data is a bit marking whether
5836 * the 30-bit address is the high or low address.
5837 * Upcast bmbx aphys to 64bits so shift instruction compiles
5838 * clean on 32 bit machines.
5840 dma_address = &phba->sli4_hba.bmbx.dma_address;
5841 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5842 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5843 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5844 LPFC_BMBX_BIT1_ADDR_HI);
5846 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5847 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5848 LPFC_BMBX_BIT1_ADDR_LO);
5853 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5854 * @phba: pointer to lpfc hba data structure.
5856 * This routine is invoked to teardown the bootstrap mailbox
5857 * region and release all host resources. This routine requires
5858 * the caller to ensure all mailbox commands recovered, no
5859 * additional mailbox comands are sent, and interrupts are disabled
5860 * before calling this routine.
5864 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5866 dma_free_coherent(&phba->pcidev->dev,
5867 phba->sli4_hba.bmbx.bmbx_size,
5868 phba->sli4_hba.bmbx.dmabuf->virt,
5869 phba->sli4_hba.bmbx.dmabuf->phys);
5871 kfree(phba->sli4_hba.bmbx.dmabuf);
5872 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5876 * lpfc_sli4_read_config - Get the config parameters.
5877 * @phba: pointer to lpfc hba data structure.
5879 * This routine is invoked to read the configuration parameters from the HBA.
5880 * The configuration parameters are used to set the base and maximum values
5881 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5882 * allocation for the port.
5886 * -ENOMEM - No available memory
5887 * -EIO - The mailbox failed to complete successfully.
5890 lpfc_sli4_read_config(struct lpfc_hba *phba)
5893 struct lpfc_mbx_read_config *rd_config;
5894 union lpfc_sli4_cfg_shdr *shdr;
5895 uint32_t shdr_status, shdr_add_status;
5896 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5897 struct lpfc_rsrc_desc_fcfcoe *desc;
5898 uint32_t desc_count;
5899 int length, i, rc = 0;
5901 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5903 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5904 "2011 Unable to allocate memory for issuing "
5905 "SLI_CONFIG_SPECIAL mailbox command\n");
5909 lpfc_read_config(phba, pmb);
5911 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5912 if (rc != MBX_SUCCESS) {
5913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5914 "2012 Mailbox failed , mbxCmd x%x "
5915 "READ_CONFIG, mbxStatus x%x\n",
5916 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5917 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5920 rd_config = &pmb->u.mqe.un.rd_config;
5921 phba->sli4_hba.extents_in_use =
5922 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5923 phba->sli4_hba.max_cfg_param.max_xri =
5924 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5925 phba->sli4_hba.max_cfg_param.xri_base =
5926 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5927 phba->sli4_hba.max_cfg_param.max_vpi =
5928 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5929 phba->sli4_hba.max_cfg_param.vpi_base =
5930 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5931 phba->sli4_hba.max_cfg_param.max_rpi =
5932 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5933 phba->sli4_hba.max_cfg_param.rpi_base =
5934 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5935 phba->sli4_hba.max_cfg_param.max_vfi =
5936 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5937 phba->sli4_hba.max_cfg_param.vfi_base =
5938 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5939 phba->sli4_hba.max_cfg_param.max_fcfi =
5940 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5941 phba->sli4_hba.max_cfg_param.max_eq =
5942 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5943 phba->sli4_hba.max_cfg_param.max_rq =
5944 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5945 phba->sli4_hba.max_cfg_param.max_wq =
5946 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5947 phba->sli4_hba.max_cfg_param.max_cq =
5948 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5949 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5950 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5951 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5952 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5953 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5954 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5955 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5956 phba->max_vports = phba->max_vpi;
5957 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5958 "2003 cfg params Extents? %d "
5964 phba->sli4_hba.extents_in_use,
5965 phba->sli4_hba.max_cfg_param.xri_base,
5966 phba->sli4_hba.max_cfg_param.max_xri,
5967 phba->sli4_hba.max_cfg_param.vpi_base,
5968 phba->sli4_hba.max_cfg_param.max_vpi,
5969 phba->sli4_hba.max_cfg_param.vfi_base,
5970 phba->sli4_hba.max_cfg_param.max_vfi,
5971 phba->sli4_hba.max_cfg_param.rpi_base,
5972 phba->sli4_hba.max_cfg_param.max_rpi,
5973 phba->sli4_hba.max_cfg_param.max_fcfi);
5979 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5980 if (phba->cfg_hba_queue_depth >
5981 (phba->sli4_hba.max_cfg_param.max_xri -
5982 lpfc_sli4_get_els_iocb_cnt(phba)))
5983 phba->cfg_hba_queue_depth =
5984 phba->sli4_hba.max_cfg_param.max_xri -
5985 lpfc_sli4_get_els_iocb_cnt(phba);
5987 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5988 LPFC_SLI_INTF_IF_TYPE_2)
5991 /* get the pf# and vf# for SLI4 if_type 2 port */
5992 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5993 sizeof(struct lpfc_sli4_cfg_mhdr));
5994 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5995 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5996 length, LPFC_SLI4_MBX_EMBED);
5998 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5999 shdr = (union lpfc_sli4_cfg_shdr *)
6000 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6001 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6002 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6003 if (rc || shdr_status || shdr_add_status) {
6004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6005 "3026 Mailbox failed , mbxCmd x%x "
6006 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6007 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6008 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6013 /* search for fc_fcoe resrouce descriptor */
6014 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6015 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6017 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6018 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6019 &get_func_cfg->func_cfg.desc[i];
6020 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6021 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6022 phba->sli4_hba.iov.pf_number =
6023 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6024 phba->sli4_hba.iov.vf_number =
6025 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6030 if (i < LPFC_RSRC_DESC_MAX_NUM)
6031 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6032 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6033 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6034 phba->sli4_hba.iov.vf_number);
6036 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6037 "3028 GET_FUNCTION_CONFIG: failed to find "
6038 "Resrouce Descriptor:x%x\n",
6039 LPFC_RSRC_DESC_TYPE_FCFCOE);
6044 mempool_free(pmb, phba->mbox_mem_pool);
6049 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
6050 * @phba: pointer to lpfc hba data structure.
6052 * This routine is invoked to setup the port-side endian order when
6053 * the port if_type is 0. This routine has no function for other
6058 * -ENOMEM - No available memory
6059 * -EIO - The mailbox failed to complete successfully.
6062 lpfc_setup_endian_order(struct lpfc_hba *phba)
6064 LPFC_MBOXQ_t *mboxq;
6065 uint32_t if_type, rc = 0;
6066 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6067 HOST_ENDIAN_HIGH_WORD1};
6069 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6071 case LPFC_SLI_INTF_IF_TYPE_0:
6072 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6076 "0492 Unable to allocate memory for "
6077 "issuing SLI_CONFIG_SPECIAL mailbox "
6083 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
6084 * two words to contain special data values and no other data.
6086 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6087 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6088 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6089 if (rc != MBX_SUCCESS) {
6090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6091 "0493 SLI_CONFIG_SPECIAL mailbox "
6092 "failed with status x%x\n",
6096 mempool_free(mboxq, phba->mbox_mem_pool);
6098 case LPFC_SLI_INTF_IF_TYPE_2:
6099 case LPFC_SLI_INTF_IF_TYPE_1:
6107 * lpfc_sli4_queue_create - Create all the SLI4 queues
6108 * @phba: pointer to lpfc hba data structure.
6110 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6111 * operation. For each SLI4 queue type, the parameters such as queue entry
6112 * count (queue depth) shall be taken from the module parameter. For now,
6113 * we just use some constant number as place holder.
6117 * -ENOMEM - No available memory
6118 * -EIO - The mailbox failed to complete successfully.
6121 lpfc_sli4_queue_create(struct lpfc_hba *phba)
6123 struct lpfc_queue *qdesc;
6124 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6125 int cfg_fcp_wq_count;
6126 int cfg_fcp_eq_count;
6129 * Sanity check for confiugred queue parameters against the run-time
6133 /* Sanity check on FCP fast-path WQ parameters */
6134 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
6135 if (cfg_fcp_wq_count >
6136 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
6137 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
6139 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
6140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6141 "2581 Not enough WQs (%d) from "
6142 "the pci function for supporting "
6144 phba->sli4_hba.max_cfg_param.max_wq,
6145 phba->cfg_fcp_wq_count);
6148 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6149 "2582 Not enough WQs (%d) from the pci "
6150 "function for supporting the requested "
6151 "FCP WQs (%d), the actual FCP WQs can "
6152 "be supported: %d\n",
6153 phba->sli4_hba.max_cfg_param.max_wq,
6154 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
6156 /* The actual number of FCP work queues adopted */
6157 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
6159 /* Sanity check on FCP fast-path EQ parameters */
6160 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6161 if (cfg_fcp_eq_count >
6162 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6163 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6165 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6167 "2574 Not enough EQs (%d) from the "
6168 "pci function for supporting FCP "
6170 phba->sli4_hba.max_cfg_param.max_eq,
6171 phba->cfg_fcp_eq_count);
6174 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6175 "2575 Not enough EQs (%d) from the pci "
6176 "function for supporting the requested "
6177 "FCP EQs (%d), the actual FCP EQs can "
6178 "be supported: %d\n",
6179 phba->sli4_hba.max_cfg_param.max_eq,
6180 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
6182 /* It does not make sense to have more EQs than WQs */
6183 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6184 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6185 "2593 The FCP EQ count(%d) cannot be greater "
6186 "than the FCP WQ count(%d), limiting the "
6187 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6188 phba->cfg_fcp_wq_count,
6189 phba->cfg_fcp_wq_count);
6190 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6192 /* The actual number of FCP event queues adopted */
6193 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
6194 /* The overall number of event queues used */
6195 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6198 * Create Event Queues (EQs)
6201 /* Get EQ depth from module parameter, fake the default for now */
6202 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6203 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6205 /* Create slow path event queue */
6206 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6207 phba->sli4_hba.eq_ecount);
6209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6210 "0496 Failed allocate slow-path EQ\n");
6213 phba->sli4_hba.sp_eq = qdesc;
6215 /* Create fast-path FCP Event Queue(s) */
6216 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6217 phba->cfg_fcp_eq_count), GFP_KERNEL);
6218 if (!phba->sli4_hba.fp_eq) {
6219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6220 "2576 Failed allocate memory for fast-path "
6221 "EQ record array\n");
6222 goto out_free_sp_eq;
6224 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6225 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6226 phba->sli4_hba.eq_ecount);
6228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6229 "0497 Failed allocate fast-path EQ\n");
6230 goto out_free_fp_eq;
6232 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6236 * Create Complete Queues (CQs)
6239 /* Get CQ depth from module parameter, fake the default for now */
6240 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6241 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6243 /* Create slow-path Mailbox Command Complete Queue */
6244 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6245 phba->sli4_hba.cq_ecount);
6247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6248 "0500 Failed allocate slow-path mailbox CQ\n");
6249 goto out_free_fp_eq;
6251 phba->sli4_hba.mbx_cq = qdesc;
6253 /* Create slow-path ELS Complete Queue */
6254 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6255 phba->sli4_hba.cq_ecount);
6257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6258 "0501 Failed allocate slow-path ELS CQ\n");
6259 goto out_free_mbx_cq;
6261 phba->sli4_hba.els_cq = qdesc;
6264 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
6265 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6266 phba->cfg_fcp_eq_count), GFP_KERNEL);
6267 if (!phba->sli4_hba.fcp_cq) {
6268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6269 "2577 Failed allocate memory for fast-path "
6270 "CQ record array\n");
6271 goto out_free_els_cq;
6273 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6274 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6275 phba->sli4_hba.cq_ecount);
6277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6278 "0499 Failed allocate fast-path FCP "
6279 "CQ (%d)\n", fcp_cqidx);
6280 goto out_free_fcp_cq;
6282 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6285 /* Create Mailbox Command Queue */
6286 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6287 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6289 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6290 phba->sli4_hba.mq_ecount);
6292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6293 "0505 Failed allocate slow-path MQ\n");
6294 goto out_free_fcp_cq;
6296 phba->sli4_hba.mbx_wq = qdesc;
6299 * Create all the Work Queues (WQs)
6301 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6302 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6304 /* Create slow-path ELS Work Queue */
6305 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6306 phba->sli4_hba.wq_ecount);
6308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6309 "0504 Failed allocate slow-path ELS WQ\n");
6310 goto out_free_mbx_wq;
6312 phba->sli4_hba.els_wq = qdesc;
6314 /* Create fast-path FCP Work Queue(s) */
6315 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6316 phba->cfg_fcp_wq_count), GFP_KERNEL);
6317 if (!phba->sli4_hba.fcp_wq) {
6318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6319 "2578 Failed allocate memory for fast-path "
6320 "WQ record array\n");
6321 goto out_free_els_wq;
6323 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6324 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6325 phba->sli4_hba.wq_ecount);
6327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6328 "0503 Failed allocate fast-path FCP "
6329 "WQ (%d)\n", fcp_wqidx);
6330 goto out_free_fcp_wq;
6332 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6336 * Create Receive Queue (RQ)
6338 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6339 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6341 /* Create Receive Queue for header */
6342 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6343 phba->sli4_hba.rq_ecount);
6345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6346 "0506 Failed allocate receive HRQ\n");
6347 goto out_free_fcp_wq;
6349 phba->sli4_hba.hdr_rq = qdesc;
6351 /* Create Receive Queue for data */
6352 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6353 phba->sli4_hba.rq_ecount);
6355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6356 "0507 Failed allocate receive DRQ\n");
6357 goto out_free_hdr_rq;
6359 phba->sli4_hba.dat_rq = qdesc;
6364 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6365 phba->sli4_hba.hdr_rq = NULL;
6367 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6368 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6369 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6371 kfree(phba->sli4_hba.fcp_wq);
6373 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6374 phba->sli4_hba.els_wq = NULL;
6376 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6377 phba->sli4_hba.mbx_wq = NULL;
6379 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6380 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6381 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6383 kfree(phba->sli4_hba.fcp_cq);
6385 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6386 phba->sli4_hba.els_cq = NULL;
6388 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6389 phba->sli4_hba.mbx_cq = NULL;
6391 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6392 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6393 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6395 kfree(phba->sli4_hba.fp_eq);
6397 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6398 phba->sli4_hba.sp_eq = NULL;
6404 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6405 * @phba: pointer to lpfc hba data structure.
6407 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6412 * -ENOMEM - No available memory
6413 * -EIO - The mailbox failed to complete successfully.
6416 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6420 /* Release mailbox command work queue */
6421 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6422 phba->sli4_hba.mbx_wq = NULL;
6424 /* Release ELS work queue */
6425 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6426 phba->sli4_hba.els_wq = NULL;
6428 /* Release FCP work queue */
6429 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6430 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6431 kfree(phba->sli4_hba.fcp_wq);
6432 phba->sli4_hba.fcp_wq = NULL;
6434 /* Release unsolicited receive queue */
6435 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6436 phba->sli4_hba.hdr_rq = NULL;
6437 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6438 phba->sli4_hba.dat_rq = NULL;
6440 /* Release ELS complete queue */
6441 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6442 phba->sli4_hba.els_cq = NULL;
6444 /* Release mailbox command complete queue */
6445 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6446 phba->sli4_hba.mbx_cq = NULL;
6448 /* Release FCP response complete queue */
6451 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6452 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6453 kfree(phba->sli4_hba.fcp_cq);
6454 phba->sli4_hba.fcp_cq = NULL;
6456 /* Release fast-path event queue */
6457 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6458 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6459 kfree(phba->sli4_hba.fp_eq);
6460 phba->sli4_hba.fp_eq = NULL;
6462 /* Release slow-path event queue */
6463 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6464 phba->sli4_hba.sp_eq = NULL;
6470 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6471 * @phba: pointer to lpfc hba data structure.
6473 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6478 * -ENOMEM - No available memory
6479 * -EIO - The mailbox failed to complete successfully.
6482 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6485 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6486 int fcp_cq_index = 0;
6489 * Set up Event Queues (EQs)
6492 /* Set up slow-path event queue */
6493 if (!phba->sli4_hba.sp_eq) {
6494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6495 "0520 Slow-path EQ not allocated\n");
6498 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6502 "0521 Failed setup of slow-path EQ: "
6506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6507 "2583 Slow-path EQ setup: queue-id=%d\n",
6508 phba->sli4_hba.sp_eq->queue_id);
6510 /* Set up fast-path event queue */
6511 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6512 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6514 "0522 Fast-path EQ (%d) not "
6515 "allocated\n", fcp_eqidx);
6516 goto out_destroy_fp_eq;
6518 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6519 phba->cfg_fcp_imax);
6521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6522 "0523 Failed setup of fast-path EQ "
6523 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6524 goto out_destroy_fp_eq;
6526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6527 "2584 Fast-path EQ setup: "
6528 "queue[%d]-id=%d\n", fcp_eqidx,
6529 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6533 * Set up Complete Queues (CQs)
6536 /* Set up slow-path MBOX Complete Queue as the first CQ */
6537 if (!phba->sli4_hba.mbx_cq) {
6538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6539 "0528 Mailbox CQ not allocated\n");
6540 goto out_destroy_fp_eq;
6542 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6543 LPFC_MCQ, LPFC_MBOX);
6545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6546 "0529 Failed setup of slow-path mailbox CQ: "
6548 goto out_destroy_fp_eq;
6550 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6551 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6552 phba->sli4_hba.mbx_cq->queue_id,
6553 phba->sli4_hba.sp_eq->queue_id);
6555 /* Set up slow-path ELS Complete Queue */
6556 if (!phba->sli4_hba.els_cq) {
6557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6558 "0530 ELS CQ not allocated\n");
6559 goto out_destroy_mbx_cq;
6561 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6562 LPFC_WCQ, LPFC_ELS);
6564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6565 "0531 Failed setup of slow-path ELS CQ: "
6567 goto out_destroy_mbx_cq;
6569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6570 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6571 phba->sli4_hba.els_cq->queue_id,
6572 phba->sli4_hba.sp_eq->queue_id);
6574 /* Set up fast-path FCP Response Complete Queue */
6577 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6579 "0526 Fast-path FCP CQ (%d) not "
6580 "allocated\n", fcp_cqidx);
6581 goto out_destroy_fcp_cq;
6583 if (phba->cfg_fcp_eq_count)
6584 rc = lpfc_cq_create(phba,
6585 phba->sli4_hba.fcp_cq[fcp_cqidx],
6586 phba->sli4_hba.fp_eq[fcp_cqidx],
6587 LPFC_WCQ, LPFC_FCP);
6589 rc = lpfc_cq_create(phba,
6590 phba->sli4_hba.fcp_cq[fcp_cqidx],
6591 phba->sli4_hba.sp_eq,
6592 LPFC_WCQ, LPFC_FCP);
6594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6595 "0527 Failed setup of fast-path FCP "
6596 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6597 goto out_destroy_fcp_cq;
6599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6600 "2588 FCP CQ setup: cq[%d]-id=%d, "
6601 "parent %seq[%d]-id=%d\n",
6603 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6604 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6606 (phba->cfg_fcp_eq_count) ?
6607 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6608 phba->sli4_hba.sp_eq->queue_id);
6609 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6612 * Set up all the Work Queues (WQs)
6615 /* Set up Mailbox Command Queue */
6616 if (!phba->sli4_hba.mbx_wq) {
6617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6618 "0538 Slow-path MQ not allocated\n");
6619 goto out_destroy_fcp_cq;
6621 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6622 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6625 "0539 Failed setup of slow-path MQ: "
6627 goto out_destroy_fcp_cq;
6629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6630 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6631 phba->sli4_hba.mbx_wq->queue_id,
6632 phba->sli4_hba.mbx_cq->queue_id);
6634 /* Set up slow-path ELS Work Queue */
6635 if (!phba->sli4_hba.els_wq) {
6636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6637 "0536 Slow-path ELS WQ not allocated\n");
6638 goto out_destroy_mbx_wq;
6640 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6641 phba->sli4_hba.els_cq, LPFC_ELS);
6643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6644 "0537 Failed setup of slow-path ELS WQ: "
6646 goto out_destroy_mbx_wq;
6648 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6649 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6650 phba->sli4_hba.els_wq->queue_id,
6651 phba->sli4_hba.els_cq->queue_id);
6653 /* Set up fast-path FCP Work Queue */
6654 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6655 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6657 "0534 Fast-path FCP WQ (%d) not "
6658 "allocated\n", fcp_wqidx);
6659 goto out_destroy_fcp_wq;
6661 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6662 phba->sli4_hba.fcp_cq[fcp_cq_index],
6665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6666 "0535 Failed setup of fast-path FCP "
6667 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6668 goto out_destroy_fcp_wq;
6670 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6671 "2591 FCP WQ setup: wq[%d]-id=%d, "
6672 "parent cq[%d]-id=%d\n",
6674 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6676 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6677 /* Round robin FCP Work Queue's Completion Queue assignment */
6678 if (phba->cfg_fcp_eq_count)
6679 fcp_cq_index = ((fcp_cq_index + 1) %
6680 phba->cfg_fcp_eq_count);
6684 * Create Receive Queue (RQ)
6686 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6688 "0540 Receive Queue not allocated\n");
6689 goto out_destroy_fcp_wq;
6691 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6692 phba->sli4_hba.els_cq, LPFC_USOL);
6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6695 "0541 Failed setup of Receive Queue: "
6697 goto out_destroy_fcp_wq;
6699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6700 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6701 "parent cq-id=%d\n",
6702 phba->sli4_hba.hdr_rq->queue_id,
6703 phba->sli4_hba.dat_rq->queue_id,
6704 phba->sli4_hba.els_cq->queue_id);
6708 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6709 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6710 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6712 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6714 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6715 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6716 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6718 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6720 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6721 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6722 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6728 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6729 * @phba: pointer to lpfc hba data structure.
6731 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6736 * -ENOMEM - No available memory
6737 * -EIO - The mailbox failed to complete successfully.
6740 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6744 /* Unset mailbox command work queue */
6745 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6746 /* Unset ELS work queue */
6747 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6748 /* Unset unsolicited receive queue */
6749 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6750 /* Unset FCP work queue */
6751 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6752 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6753 /* Unset mailbox command complete queue */
6754 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6755 /* Unset ELS complete queue */
6756 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6757 /* Unset FCP response complete queue */
6758 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6759 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6760 /* Unset fast-path event queue */
6761 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6762 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6763 /* Unset slow-path event queue */
6764 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6768 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6769 * @phba: pointer to lpfc hba data structure.
6771 * This routine is invoked to allocate and set up a pool of completion queue
6772 * events. The body of the completion queue event is a completion queue entry
6773 * CQE. For now, this pool is used for the interrupt service routine to queue
6774 * the following HBA completion queue events for the worker thread to process:
6775 * - Mailbox asynchronous events
6776 * - Receive queue completion unsolicited events
6777 * Later, this can be used for all the slow-path events.
6781 * -ENOMEM - No available memory
6784 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6786 struct lpfc_cq_event *cq_event;
6789 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6790 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6792 goto out_pool_create_fail;
6793 list_add_tail(&cq_event->list,
6794 &phba->sli4_hba.sp_cqe_event_pool);
6798 out_pool_create_fail:
6799 lpfc_sli4_cq_event_pool_destroy(phba);
6804 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6805 * @phba: pointer to lpfc hba data structure.
6807 * This routine is invoked to free the pool of completion queue events at
6808 * driver unload time. Note that, it is the responsibility of the driver
6809 * cleanup routine to free all the outstanding completion-queue events
6810 * allocated from this pool back into the pool before invoking this routine
6811 * to destroy the pool.
6814 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6816 struct lpfc_cq_event *cq_event, *next_cq_event;
6818 list_for_each_entry_safe(cq_event, next_cq_event,
6819 &phba->sli4_hba.sp_cqe_event_pool, list) {
6820 list_del(&cq_event->list);
6826 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6827 * @phba: pointer to lpfc hba data structure.
6829 * This routine is the lock free version of the API invoked to allocate a
6830 * completion-queue event from the free pool.
6832 * Return: Pointer to the newly allocated completion-queue event if successful
6835 struct lpfc_cq_event *
6836 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6838 struct lpfc_cq_event *cq_event = NULL;
6840 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6841 struct lpfc_cq_event, list);
6846 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6847 * @phba: pointer to lpfc hba data structure.
6849 * This routine is the lock version of the API invoked to allocate a
6850 * completion-queue event from the free pool.
6852 * Return: Pointer to the newly allocated completion-queue event if successful
6855 struct lpfc_cq_event *
6856 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6858 struct lpfc_cq_event *cq_event;
6859 unsigned long iflags;
6861 spin_lock_irqsave(&phba->hbalock, iflags);
6862 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6863 spin_unlock_irqrestore(&phba->hbalock, iflags);
6868 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6869 * @phba: pointer to lpfc hba data structure.
6870 * @cq_event: pointer to the completion queue event to be freed.
6872 * This routine is the lock free version of the API invoked to release a
6873 * completion-queue event back into the free pool.
6876 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6877 struct lpfc_cq_event *cq_event)
6879 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6883 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6884 * @phba: pointer to lpfc hba data structure.
6885 * @cq_event: pointer to the completion queue event to be freed.
6887 * This routine is the lock version of the API invoked to release a
6888 * completion-queue event back into the free pool.
6891 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6892 struct lpfc_cq_event *cq_event)
6894 unsigned long iflags;
6895 spin_lock_irqsave(&phba->hbalock, iflags);
6896 __lpfc_sli4_cq_event_release(phba, cq_event);
6897 spin_unlock_irqrestore(&phba->hbalock, iflags);
6901 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6902 * @phba: pointer to lpfc hba data structure.
6904 * This routine is to free all the pending completion-queue events to the
6905 * back into the free pool for device reset.
6908 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6911 struct lpfc_cq_event *cqe;
6912 unsigned long iflags;
6914 /* Retrieve all the pending WCQEs from pending WCQE lists */
6915 spin_lock_irqsave(&phba->hbalock, iflags);
6916 /* Pending FCP XRI abort events */
6917 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6919 /* Pending ELS XRI abort events */
6920 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6922 /* Pending asynnc events */
6923 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6925 spin_unlock_irqrestore(&phba->hbalock, iflags);
6927 while (!list_empty(&cqelist)) {
6928 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6929 lpfc_sli4_cq_event_release(phba, cqe);
6934 * lpfc_pci_function_reset - Reset pci function.
6935 * @phba: pointer to lpfc hba data structure.
6937 * This routine is invoked to request a PCI function reset. It will destroys
6938 * all resources assigned to the PCI function which originates this request.
6942 * -ENOMEM - No available memory
6943 * -EIO - The mailbox failed to complete successfully.
6946 lpfc_pci_function_reset(struct lpfc_hba *phba)
6948 LPFC_MBOXQ_t *mboxq;
6949 uint32_t rc = 0, if_type;
6950 uint32_t shdr_status, shdr_add_status;
6951 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
6952 union lpfc_sli4_cfg_shdr *shdr;
6953 struct lpfc_register reg_data;
6955 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6957 case LPFC_SLI_INTF_IF_TYPE_0:
6958 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6962 "0494 Unable to allocate memory for "
6963 "issuing SLI_FUNCTION_RESET mailbox "
6968 /* Setup PCI function reset mailbox-ioctl command */
6969 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6970 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6971 LPFC_SLI4_MBX_EMBED);
6972 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6973 shdr = (union lpfc_sli4_cfg_shdr *)
6974 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6975 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6976 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6978 if (rc != MBX_TIMEOUT)
6979 mempool_free(mboxq, phba->mbox_mem_pool);
6980 if (shdr_status || shdr_add_status || rc) {
6981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6982 "0495 SLI_FUNCTION_RESET mailbox "
6983 "failed with status x%x add_status x%x,"
6984 " mbx status x%x\n",
6985 shdr_status, shdr_add_status, rc);
6989 case LPFC_SLI_INTF_IF_TYPE_2:
6990 for (num_resets = 0;
6991 num_resets < MAX_IF_TYPE_2_RESETS;
6994 bf_set(lpfc_sliport_ctrl_end, ®_data,
6995 LPFC_SLIPORT_LITTLE_ENDIAN);
6996 bf_set(lpfc_sliport_ctrl_ip, ®_data,
6997 LPFC_SLIPORT_INIT_PORT);
6998 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7002 * Poll the Port Status Register and wait for RDY for
7003 * up to 10 seconds. If the port doesn't respond, treat
7004 * it as an error. If the port responds with RN, start
7007 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7008 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7009 STATUSregaddr, ®_data.word0)) {
7013 if (bf_get(lpfc_sliport_status_rdy, ®_data))
7015 if (bf_get(lpfc_sliport_status_rn, ®_data)) {
7023 * If the port responds to the init request with
7024 * reset needed, delay for a bit and restart the loop.
7032 /* Detect any port errors. */
7033 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7038 if ((bf_get(lpfc_sliport_status_err, ®_data)) ||
7039 (rdy_chk >= 1000)) {
7040 phba->work_status[0] = readl(
7041 phba->sli4_hba.u.if_type2.ERR1regaddr);
7042 phba->work_status[1] = readl(
7043 phba->sli4_hba.u.if_type2.ERR2regaddr);
7044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7045 "2890 Port Error Detected "
7046 "during Port Reset: "
7047 "port status reg 0x%x, "
7048 "error 1=0x%x, error 2=0x%x\n",
7050 phba->work_status[0],
7051 phba->work_status[1]);
7056 * Terminate the outer loop provided the Port indicated
7057 * ready within 10 seconds.
7062 /* delay driver action following IF_TYPE_2 function reset */
7065 case LPFC_SLI_INTF_IF_TYPE_1:
7070 /* Catch the not-ready port failure after a port reset. */
7071 if (num_resets >= MAX_IF_TYPE_2_RESETS)
7078 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
7079 * @phba: pointer to lpfc hba data structure.
7080 * @cnt: number of nop mailbox commands to send.
7082 * This routine is invoked to send a number @cnt of NOP mailbox command and
7083 * wait for each command to complete.
7085 * Return: the number of NOP mailbox command completed.
7088 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7090 LPFC_MBOXQ_t *mboxq;
7091 int length, cmdsent;
7094 uint32_t shdr_status, shdr_add_status;
7095 union lpfc_sli4_cfg_shdr *shdr;
7098 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7099 "2518 Requested to send 0 NOP mailbox cmd\n");
7103 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7106 "2519 Unable to allocate memory for issuing "
7107 "NOP mailbox command\n");
7111 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7112 length = (sizeof(struct lpfc_mbx_nop) -
7113 sizeof(struct lpfc_sli4_cfg_mhdr));
7114 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7115 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7117 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
7118 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7119 if (!phba->sli4_hba.intr_enable)
7120 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7122 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7123 if (rc == MBX_TIMEOUT)
7125 /* Check return status */
7126 shdr = (union lpfc_sli4_cfg_shdr *)
7127 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7128 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7129 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7131 if (shdr_status || shdr_add_status || rc) {
7132 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7133 "2520 NOP mailbox command failed "
7134 "status x%x add_status x%x mbx "
7135 "status x%x\n", shdr_status,
7136 shdr_add_status, rc);
7141 if (rc != MBX_TIMEOUT)
7142 mempool_free(mboxq, phba->mbox_mem_pool);
7148 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
7149 * @phba: pointer to lpfc hba data structure.
7151 * This routine is invoked to set up the PCI device memory space for device
7152 * with SLI-4 interface spec.
7156 * other values - error
7159 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7161 struct pci_dev *pdev;
7162 unsigned long bar0map_len, bar1map_len, bar2map_len;
7163 int error = -ENODEV;
7166 /* Obtain PCI device reference */
7170 pdev = phba->pcidev;
7172 /* Set the device DMA mask size */
7173 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7174 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7175 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7176 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7182 * The BARs and register set definitions and offset locations are
7183 * dependent on the if_type.
7185 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7186 &phba->sli4_hba.sli_intf.word0)) {
7190 /* There is no SLI3 failback for SLI4 devices. */
7191 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7192 LPFC_SLI_INTF_VALID) {
7193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7194 "2894 SLI_INTF reg contents invalid "
7195 "sli_intf reg 0x%x\n",
7196 phba->sli4_hba.sli_intf.word0);
7200 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7202 * Get the bus address of SLI4 device Bar regions and the
7203 * number of bytes required by each mapping. The mapping of the
7204 * particular PCI BARs regions is dependent on the type of
7207 if (pci_resource_start(pdev, 0)) {
7208 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7209 bar0map_len = pci_resource_len(pdev, 0);
7212 * Map SLI4 PCI Config Space Register base to a kernel virtual
7215 phba->sli4_hba.conf_regs_memmap_p =
7216 ioremap(phba->pci_bar0_map, bar0map_len);
7217 if (!phba->sli4_hba.conf_regs_memmap_p) {
7218 dev_printk(KERN_ERR, &pdev->dev,
7219 "ioremap failed for SLI4 PCI config "
7223 /* Set up BAR0 PCI config space register memory map */
7224 lpfc_sli4_bar0_register_memmap(phba, if_type);
7226 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7227 bar0map_len = pci_resource_len(pdev, 1);
7228 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7229 dev_printk(KERN_ERR, &pdev->dev,
7230 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7233 phba->sli4_hba.conf_regs_memmap_p =
7234 ioremap(phba->pci_bar0_map, bar0map_len);
7235 if (!phba->sli4_hba.conf_regs_memmap_p) {
7236 dev_printk(KERN_ERR, &pdev->dev,
7237 "ioremap failed for SLI4 PCI config "
7241 lpfc_sli4_bar0_register_memmap(phba, if_type);
7244 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7245 (pci_resource_start(pdev, 2))) {
7247 * Map SLI4 if type 0 HBA Control Register base to a kernel
7248 * virtual address and setup the registers.
7250 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7251 bar1map_len = pci_resource_len(pdev, 2);
7252 phba->sli4_hba.ctrl_regs_memmap_p =
7253 ioremap(phba->pci_bar1_map, bar1map_len);
7254 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7255 dev_printk(KERN_ERR, &pdev->dev,
7256 "ioremap failed for SLI4 HBA control registers.\n");
7257 goto out_iounmap_conf;
7259 lpfc_sli4_bar1_register_memmap(phba);
7262 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7263 (pci_resource_start(pdev, 4))) {
7265 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7266 * virtual address and setup the registers.
7268 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7269 bar2map_len = pci_resource_len(pdev, 4);
7270 phba->sli4_hba.drbl_regs_memmap_p =
7271 ioremap(phba->pci_bar2_map, bar2map_len);
7272 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7273 dev_printk(KERN_ERR, &pdev->dev,
7274 "ioremap failed for SLI4 HBA doorbell registers.\n");
7275 goto out_iounmap_ctrl;
7277 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7279 goto out_iounmap_all;
7285 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7287 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7289 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7295 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7296 * @phba: pointer to lpfc hba data structure.
7298 * This routine is invoked to unset the PCI device memory space for device
7299 * with SLI-4 interface spec.
7302 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7304 struct pci_dev *pdev;
7306 /* Obtain PCI device reference */
7310 pdev = phba->pcidev;
7312 /* Free coherent DMA memory allocated */
7314 /* Unmap I/O memory space */
7315 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7316 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7317 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7323 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7324 * @phba: pointer to lpfc hba data structure.
7326 * This routine is invoked to enable the MSI-X interrupt vectors to device
7327 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7328 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7329 * invoked, enables either all or nothing, depending on the current
7330 * availability of PCI vector resources. The device driver is responsible
7331 * for calling the individual request_irq() to register each MSI-X vector
7332 * with a interrupt handler, which is done in this function. Note that
7333 * later when device is unloading, the driver should always call free_irq()
7334 * on all MSI-X vectors it has done request_irq() on before calling
7335 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7336 * will be left with MSI-X enabled and leaks its vectors.
7340 * other values - error
7343 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7348 /* Set up MSI-X multi-message vectors */
7349 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7350 phba->msix_entries[i].entry = i;
7352 /* Configure MSI-X capability structure */
7353 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7354 ARRAY_SIZE(phba->msix_entries));
7356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7357 "0420 PCI enable MSI-X failed (%d)\n", rc);
7360 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7362 "0477 MSI-X entry[%d]: vector=x%x "
7364 phba->msix_entries[i].vector,
7365 phba->msix_entries[i].entry);
7367 * Assign MSI-X vectors to interrupt handlers
7370 /* vector-0 is associated to slow-path handler */
7371 rc = request_irq(phba->msix_entries[0].vector,
7372 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7373 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7375 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7376 "0421 MSI-X slow-path request_irq failed "
7381 /* vector-1 is associated to fast-path handler */
7382 rc = request_irq(phba->msix_entries[1].vector,
7383 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7384 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7387 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7388 "0429 MSI-X fast-path request_irq failed "
7394 * Configure HBA MSI-X attention conditions to messages
7396 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7401 "0474 Unable to allocate memory for issuing "
7402 "MBOX_CONFIG_MSI command\n");
7405 rc = lpfc_config_msi(phba, pmb);
7408 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7409 if (rc != MBX_SUCCESS) {
7410 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7411 "0351 Config MSI mailbox command failed, "
7412 "mbxCmd x%x, mbxStatus x%x\n",
7413 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7417 /* Free memory allocated for mailbox command */
7418 mempool_free(pmb, phba->mbox_mem_pool);
7422 /* Free memory allocated for mailbox command */
7423 mempool_free(pmb, phba->mbox_mem_pool);
7426 /* free the irq already requested */
7427 free_irq(phba->msix_entries[1].vector, phba);
7430 /* free the irq already requested */
7431 free_irq(phba->msix_entries[0].vector, phba);
7434 /* Unconfigure MSI-X capability structure */
7435 pci_disable_msix(phba->pcidev);
7440 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7441 * @phba: pointer to lpfc hba data structure.
7443 * This routine is invoked to release the MSI-X vectors and then disable the
7444 * MSI-X interrupt mode to device with SLI-3 interface spec.
7447 lpfc_sli_disable_msix(struct lpfc_hba *phba)
7451 /* Free up MSI-X multi-message vectors */
7452 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7453 free_irq(phba->msix_entries[i].vector, phba);
7455 pci_disable_msix(phba->pcidev);
7461 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7462 * @phba: pointer to lpfc hba data structure.
7464 * This routine is invoked to enable the MSI interrupt mode to device with
7465 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7466 * enable the MSI vector. The device driver is responsible for calling the
7467 * request_irq() to register MSI vector with a interrupt the handler, which
7468 * is done in this function.
7472 * other values - error
7475 lpfc_sli_enable_msi(struct lpfc_hba *phba)
7479 rc = pci_enable_msi(phba->pcidev);
7481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7482 "0462 PCI enable MSI mode success.\n");
7484 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7485 "0471 PCI enable MSI mode failed (%d)\n", rc);
7489 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7490 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7492 pci_disable_msi(phba->pcidev);
7493 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7494 "0478 MSI request_irq failed (%d)\n", rc);
7500 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7501 * @phba: pointer to lpfc hba data structure.
7503 * This routine is invoked to disable the MSI interrupt mode to device with
7504 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7505 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7506 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7510 lpfc_sli_disable_msi(struct lpfc_hba *phba)
7512 free_irq(phba->pcidev->irq, phba);
7513 pci_disable_msi(phba->pcidev);
7518 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7519 * @phba: pointer to lpfc hba data structure.
7521 * This routine is invoked to enable device interrupt and associate driver's
7522 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7523 * spec. Depends on the interrupt mode configured to the driver, the driver
7524 * will try to fallback from the configured interrupt mode to an interrupt
7525 * mode which is supported by the platform, kernel, and device in the order
7527 * MSI-X -> MSI -> IRQ.
7531 * other values - error
7534 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7536 uint32_t intr_mode = LPFC_INTR_ERROR;
7539 if (cfg_mode == 2) {
7540 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7541 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7543 /* Now, try to enable MSI-X interrupt mode */
7544 retval = lpfc_sli_enable_msix(phba);
7546 /* Indicate initialization to MSI-X mode */
7547 phba->intr_type = MSIX;
7553 /* Fallback to MSI if MSI-X initialization failed */
7554 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7555 retval = lpfc_sli_enable_msi(phba);
7557 /* Indicate initialization to MSI mode */
7558 phba->intr_type = MSI;
7563 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7564 if (phba->intr_type == NONE) {
7565 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7566 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7568 /* Indicate initialization to INTx mode */
7569 phba->intr_type = INTx;
7577 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7578 * @phba: pointer to lpfc hba data structure.
7580 * This routine is invoked to disable device interrupt and disassociate the
7581 * driver's interrupt handler(s) from interrupt vector(s) to device with
7582 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7583 * release the interrupt vector(s) for the message signaled interrupt.
7586 lpfc_sli_disable_intr(struct lpfc_hba *phba)
7588 /* Disable the currently initialized interrupt mode */
7589 if (phba->intr_type == MSIX)
7590 lpfc_sli_disable_msix(phba);
7591 else if (phba->intr_type == MSI)
7592 lpfc_sli_disable_msi(phba);
7593 else if (phba->intr_type == INTx)
7594 free_irq(phba->pcidev->irq, phba);
7596 /* Reset interrupt management states */
7597 phba->intr_type = NONE;
7598 phba->sli.slistat.sli_intr = 0;
7604 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7605 * @phba: pointer to lpfc hba data structure.
7607 * This routine is invoked to enable the MSI-X interrupt vectors to device
7608 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7609 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7610 * enables either all or nothing, depending on the current availability of
7611 * PCI vector resources. The device driver is responsible for calling the
7612 * individual request_irq() to register each MSI-X vector with a interrupt
7613 * handler, which is done in this function. Note that later when device is
7614 * unloading, the driver should always call free_irq() on all MSI-X vectors
7615 * it has done request_irq() on before calling pci_disable_msix(). Failure
7616 * to do so results in a BUG_ON() and a device will be left with MSI-X
7617 * enabled and leaks its vectors.
7621 * other values - error
7624 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7626 int vectors, rc, index;
7628 /* Set up MSI-X multi-message vectors */
7629 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7630 phba->sli4_hba.msix_entries[index].entry = index;
7632 /* Configure MSI-X capability structure */
7633 vectors = phba->sli4_hba.cfg_eqn;
7634 enable_msix_vectors:
7635 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7639 goto enable_msix_vectors;
7641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7642 "0484 PCI enable MSI-X failed (%d)\n", rc);
7646 /* Log MSI-X vector assignment */
7647 for (index = 0; index < vectors; index++)
7648 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7649 "0489 MSI-X entry[%d]: vector=x%x "
7650 "message=%d\n", index,
7651 phba->sli4_hba.msix_entries[index].vector,
7652 phba->sli4_hba.msix_entries[index].entry);
7654 * Assign MSI-X vectors to interrupt handlers
7657 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7658 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7659 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7661 /* All Interrupts need to be handled by one EQ */
7662 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7663 &lpfc_sli4_intr_handler, IRQF_SHARED,
7664 LPFC_DRIVER_NAME, phba);
7666 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7667 "0485 MSI-X slow-path request_irq failed "
7672 /* The rest of the vector(s) are associated to fast-path handler(s) */
7673 for (index = 1; index < vectors; index++) {
7674 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7675 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7676 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7677 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7678 LPFC_FP_DRIVER_HANDLER_NAME,
7679 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7681 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7682 "0486 MSI-X fast-path (%d) "
7683 "request_irq failed (%d)\n", index, rc);
7687 phba->sli4_hba.msix_vec_nr = vectors;
7692 /* free the irq already requested */
7693 for (--index; index >= 1; index--)
7694 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7695 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7697 /* free the irq already requested */
7698 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7701 /* Unconfigure MSI-X capability structure */
7702 pci_disable_msix(phba->pcidev);
7707 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7708 * @phba: pointer to lpfc hba data structure.
7710 * This routine is invoked to release the MSI-X vectors and then disable the
7711 * MSI-X interrupt mode to device with SLI-4 interface spec.
7714 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7718 /* Free up MSI-X multi-message vectors */
7719 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7721 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7722 free_irq(phba->sli4_hba.msix_entries[index].vector,
7723 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7726 pci_disable_msix(phba->pcidev);
7732 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7733 * @phba: pointer to lpfc hba data structure.
7735 * This routine is invoked to enable the MSI interrupt mode to device with
7736 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7737 * to enable the MSI vector. The device driver is responsible for calling
7738 * the request_irq() to register MSI vector with a interrupt the handler,
7739 * which is done in this function.
7743 * other values - error
7746 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7750 rc = pci_enable_msi(phba->pcidev);
7752 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7753 "0487 PCI enable MSI mode success.\n");
7755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7756 "0488 PCI enable MSI mode failed (%d)\n", rc);
7760 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7761 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7763 pci_disable_msi(phba->pcidev);
7764 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7765 "0490 MSI request_irq failed (%d)\n", rc);
7769 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7770 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7771 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7778 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7779 * @phba: pointer to lpfc hba data structure.
7781 * This routine is invoked to disable the MSI interrupt mode to device with
7782 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7783 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7784 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7788 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7790 free_irq(phba->pcidev->irq, phba);
7791 pci_disable_msi(phba->pcidev);
7796 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7797 * @phba: pointer to lpfc hba data structure.
7799 * This routine is invoked to enable device interrupt and associate driver's
7800 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7801 * interface spec. Depends on the interrupt mode configured to the driver,
7802 * the driver will try to fallback from the configured interrupt mode to an
7803 * interrupt mode which is supported by the platform, kernel, and device in
7805 * MSI-X -> MSI -> IRQ.
7809 * other values - error
7812 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7814 uint32_t intr_mode = LPFC_INTR_ERROR;
7817 if (cfg_mode == 2) {
7818 /* Preparation before conf_msi mbox cmd */
7821 /* Now, try to enable MSI-X interrupt mode */
7822 retval = lpfc_sli4_enable_msix(phba);
7824 /* Indicate initialization to MSI-X mode */
7825 phba->intr_type = MSIX;
7831 /* Fallback to MSI if MSI-X initialization failed */
7832 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7833 retval = lpfc_sli4_enable_msi(phba);
7835 /* Indicate initialization to MSI mode */
7836 phba->intr_type = MSI;
7841 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7842 if (phba->intr_type == NONE) {
7843 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7844 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7846 /* Indicate initialization to INTx mode */
7847 phba->intr_type = INTx;
7849 for (index = 0; index < phba->cfg_fcp_eq_count;
7851 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7852 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7860 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7861 * @phba: pointer to lpfc hba data structure.
7863 * This routine is invoked to disable device interrupt and disassociate
7864 * the driver's interrupt handler(s) from interrupt vector(s) to device
7865 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7866 * will release the interrupt vector(s) for the message signaled interrupt.
7869 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7871 /* Disable the currently initialized interrupt mode */
7872 if (phba->intr_type == MSIX)
7873 lpfc_sli4_disable_msix(phba);
7874 else if (phba->intr_type == MSI)
7875 lpfc_sli4_disable_msi(phba);
7876 else if (phba->intr_type == INTx)
7877 free_irq(phba->pcidev->irq, phba);
7879 /* Reset interrupt management states */
7880 phba->intr_type = NONE;
7881 phba->sli.slistat.sli_intr = 0;
7887 * lpfc_unset_hba - Unset SLI3 hba device initialization
7888 * @phba: pointer to lpfc hba data structure.
7890 * This routine is invoked to unset the HBA device initialization steps to
7891 * a device with SLI-3 interface spec.
7894 lpfc_unset_hba(struct lpfc_hba *phba)
7896 struct lpfc_vport *vport = phba->pport;
7897 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7899 spin_lock_irq(shost->host_lock);
7900 vport->load_flag |= FC_UNLOADING;
7901 spin_unlock_irq(shost->host_lock);
7903 lpfc_stop_hba_timers(phba);
7905 phba->pport->work_port_events = 0;
7907 lpfc_sli_hba_down(phba);
7909 lpfc_sli_brdrestart(phba);
7911 lpfc_sli_disable_intr(phba);
7917 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7918 * @phba: pointer to lpfc hba data structure.
7920 * This routine is invoked to unset the HBA device initialization steps to
7921 * a device with SLI-4 interface spec.
7924 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7926 struct lpfc_vport *vport = phba->pport;
7927 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7929 spin_lock_irq(shost->host_lock);
7930 vport->load_flag |= FC_UNLOADING;
7931 spin_unlock_irq(shost->host_lock);
7933 phba->pport->work_port_events = 0;
7935 /* Stop the SLI4 device port */
7936 lpfc_stop_port(phba);
7938 lpfc_sli4_disable_intr(phba);
7940 /* Reset SLI4 HBA FCoE function */
7941 lpfc_pci_function_reset(phba);
7947 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7948 * @phba: Pointer to HBA context object.
7950 * This function is called in the SLI4 code path to wait for completion
7951 * of device's XRIs exchange busy. It will check the XRI exchange busy
7952 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7953 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7954 * I/Os every 30 seconds, log error message, and wait forever. Only when
7955 * all XRI exchange busy complete, the driver unload shall proceed with
7956 * invoking the function reset ioctl mailbox command to the CNA and the
7957 * the rest of the driver unload resource release.
7960 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7963 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7964 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7966 while (!fcp_xri_cmpl || !els_xri_cmpl) {
7967 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7970 "2877 FCP XRI exchange busy "
7971 "wait time: %d seconds.\n",
7974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7975 "2878 ELS XRI exchange busy "
7976 "wait time: %d seconds.\n",
7978 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7979 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7981 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7982 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7985 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7987 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7992 * lpfc_sli4_hba_unset - Unset the fcoe hba
7993 * @phba: Pointer to HBA context object.
7995 * This function is called in the SLI4 code path to reset the HBA's FCoE
7996 * function. The caller is not required to hold any lock. This routine
7997 * issues PCI function reset mailbox command to reset the FCoE function.
7998 * At the end of the function, it calls lpfc_hba_down_post function to
7999 * free any pending commands.
8002 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8005 LPFC_MBOXQ_t *mboxq;
8006 struct pci_dev *pdev = phba->pcidev;
8008 lpfc_stop_hba_timers(phba);
8009 phba->sli4_hba.intr_enable = 0;
8012 * Gracefully wait out the potential current outstanding asynchronous
8016 /* First, block any pending async mailbox command from posted */
8017 spin_lock_irq(&phba->hbalock);
8018 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8019 spin_unlock_irq(&phba->hbalock);
8020 /* Now, trying to wait it out if we can */
8021 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8023 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8026 /* Forcefully release the outstanding mailbox command if timed out */
8027 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8028 spin_lock_irq(&phba->hbalock);
8029 mboxq = phba->sli.mbox_active;
8030 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8031 __lpfc_mbox_cmpl_put(phba, mboxq);
8032 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8033 phba->sli.mbox_active = NULL;
8034 spin_unlock_irq(&phba->hbalock);
8037 /* Abort all iocbs associated with the hba */
8038 lpfc_sli_hba_iocb_abort(phba);
8040 /* Wait for completion of device XRI exchange busy */
8041 lpfc_sli4_xri_exchange_busy_wait(phba);
8043 /* Disable PCI subsystem interrupt */
8044 lpfc_sli4_disable_intr(phba);
8046 /* Disable SR-IOV if enabled */
8047 if (phba->cfg_sriov_nr_virtfn)
8048 pci_disable_sriov(pdev);
8050 /* Stop kthread signal shall trigger work_done one more time */
8051 kthread_stop(phba->worker_thread);
8053 /* Reset SLI4 HBA FCoE function */
8054 lpfc_pci_function_reset(phba);
8056 /* Stop the SLI4 device port */
8057 phba->pport->work_port_events = 0;
8061 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
8062 * @phba: Pointer to HBA context object.
8063 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8065 * This function is called in the SLI4 code path to read the port's
8066 * sli4 capabilities.
8068 * This function may be be called from any context that can block-wait
8069 * for the completion. The expectation is that this routine is called
8070 * typically from probe_one or from the online routine.
8073 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8076 struct lpfc_mqe *mqe;
8077 struct lpfc_pc_sli4_params *sli4_params;
8081 mqe = &mboxq->u.mqe;
8083 /* Read the port's SLI4 Parameters port capabilities */
8084 lpfc_pc_sli4_params(mboxq);
8085 if (!phba->sli4_hba.intr_enable)
8086 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8088 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
8089 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8095 sli4_params = &phba->sli4_hba.pc_sli4_params;
8096 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
8097 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
8098 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
8099 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
8100 &mqe->un.sli4_params);
8101 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
8102 &mqe->un.sli4_params);
8103 sli4_params->proto_types = mqe->un.sli4_params.word3;
8104 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
8105 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
8106 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
8107 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
8108 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
8109 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
8110 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
8111 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
8112 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
8113 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
8114 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
8115 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
8116 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
8117 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
8118 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
8119 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
8120 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
8121 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
8122 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
8123 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8125 /* Make sure that sge_supp_len can be handled by the driver */
8126 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8127 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8133 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
8134 * @phba: Pointer to HBA context object.
8135 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
8137 * This function is called in the SLI4 code path to read the port's
8138 * sli4 capabilities.
8140 * This function may be be called from any context that can block-wait
8141 * for the completion. The expectation is that this routine is called
8142 * typically from probe_one or from the online routine.
8145 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8148 struct lpfc_mqe *mqe = &mboxq->u.mqe;
8149 struct lpfc_pc_sli4_params *sli4_params;
8151 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8154 * By default, the driver assumes the SLI4 port requires RPI
8155 * header postings. The SLI4_PARAM response will correct this
8158 phba->sli4_hba.rpi_hdrs_in_use = 1;
8160 /* Read the port's SLI4 Config Parameters */
8161 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
8162 sizeof(struct lpfc_sli4_cfg_mhdr));
8163 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8164 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
8165 length, LPFC_SLI4_MBX_EMBED);
8166 if (!phba->sli4_hba.intr_enable)
8167 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8169 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
8170 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
8173 sli4_params = &phba->sli4_hba.pc_sli4_params;
8174 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
8175 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
8176 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
8177 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
8178 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
8179 mbx_sli4_parameters);
8180 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
8181 mbx_sli4_parameters);
8182 if (bf_get(cfg_phwq, mbx_sli4_parameters))
8183 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
8185 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
8186 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
8187 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
8188 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
8189 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
8190 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
8191 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
8192 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
8193 mbx_sli4_parameters);
8194 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
8195 mbx_sli4_parameters);
8196 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8197 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8199 /* Make sure that sge_supp_len can be handled by the driver */
8200 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8201 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8207 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
8208 * @pdev: pointer to PCI device
8209 * @pid: pointer to PCI device identifier
8211 * This routine is to be called to attach a device with SLI-3 interface spec
8212 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8213 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8214 * information of the device and driver to see if the driver state that it can
8215 * support this kind of device. If the match is successful, the driver core
8216 * invokes this routine. If this routine determines it can claim the HBA, it
8217 * does all the initialization that it needs to do to handle the HBA properly.
8220 * 0 - driver can claim the device
8221 * negative value - driver can not claim the device
8223 static int __devinit
8224 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8226 struct lpfc_hba *phba;
8227 struct lpfc_vport *vport = NULL;
8228 struct Scsi_Host *shost = NULL;
8230 uint32_t cfg_mode, intr_mode;
8232 /* Allocate memory for HBA structure */
8233 phba = lpfc_hba_alloc(pdev);
8237 /* Perform generic PCI device enabling operation */
8238 error = lpfc_enable_pci_dev(phba);
8240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8241 "1401 Failed to enable pci device.\n");
8245 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8246 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
8248 goto out_disable_pci_dev;
8250 /* Set up SLI-3 specific device PCI memory space */
8251 error = lpfc_sli_pci_mem_setup(phba);
8253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8254 "1402 Failed to set up pci memory space.\n");
8255 goto out_disable_pci_dev;
8258 /* Set up phase-1 common device driver resources */
8259 error = lpfc_setup_driver_resource_phase1(phba);
8261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8262 "1403 Failed to set up driver resource.\n");
8263 goto out_unset_pci_mem_s3;
8266 /* Set up SLI-3 specific device driver resources */
8267 error = lpfc_sli_driver_resource_setup(phba);
8269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8270 "1404 Failed to set up driver resource.\n");
8271 goto out_unset_pci_mem_s3;
8274 /* Initialize and populate the iocb list per host */
8275 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8278 "1405 Failed to initialize iocb list.\n");
8279 goto out_unset_driver_resource_s3;
8282 /* Set up common device driver resources */
8283 error = lpfc_setup_driver_resource_phase2(phba);
8285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8286 "1406 Failed to set up driver resource.\n");
8287 goto out_free_iocb_list;
8290 /* Create SCSI host to the physical port */
8291 error = lpfc_create_shost(phba);
8293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8294 "1407 Failed to create scsi host.\n");
8295 goto out_unset_driver_resource;
8298 /* Configure sysfs attributes */
8299 vport = phba->pport;
8300 error = lpfc_alloc_sysfs_attr(vport);
8302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8303 "1476 Failed to allocate sysfs attr\n");
8304 goto out_destroy_shost;
8307 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8308 /* Now, trying to enable interrupt and bring up the device */
8309 cfg_mode = phba->cfg_use_msi;
8311 /* Put device to a known state before enabling interrupt */
8312 lpfc_stop_port(phba);
8313 /* Configure and enable interrupt */
8314 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8315 if (intr_mode == LPFC_INTR_ERROR) {
8316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8317 "0431 Failed to enable interrupt.\n");
8319 goto out_free_sysfs_attr;
8321 /* SLI-3 HBA setup */
8322 if (lpfc_sli_hba_setup(phba)) {
8323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8324 "1477 Failed to set up hba\n");
8326 goto out_remove_device;
8329 /* Wait 50ms for the interrupts of previous mailbox commands */
8331 /* Check active interrupts on message signaled interrupts */
8332 if (intr_mode == 0 ||
8333 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8334 /* Log the current active interrupt mode */
8335 phba->intr_mode = intr_mode;
8336 lpfc_log_intr_mode(phba, intr_mode);
8339 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8340 "0447 Configure interrupt mode (%d) "
8341 "failed active interrupt test.\n",
8343 /* Disable the current interrupt mode */
8344 lpfc_sli_disable_intr(phba);
8345 /* Try next level of interrupt mode */
8346 cfg_mode = --intr_mode;
8350 /* Perform post initialization setup */
8351 lpfc_post_init_setup(phba);
8353 /* Check if there are static vports to be created. */
8354 lpfc_create_static_vport(phba);
8359 lpfc_unset_hba(phba);
8360 out_free_sysfs_attr:
8361 lpfc_free_sysfs_attr(vport);
8363 lpfc_destroy_shost(phba);
8364 out_unset_driver_resource:
8365 lpfc_unset_driver_resource_phase2(phba);
8367 lpfc_free_iocb_list(phba);
8368 out_unset_driver_resource_s3:
8369 lpfc_sli_driver_resource_unset(phba);
8370 out_unset_pci_mem_s3:
8371 lpfc_sli_pci_mem_unset(phba);
8372 out_disable_pci_dev:
8373 lpfc_disable_pci_dev(phba);
8375 scsi_host_put(shost);
8377 lpfc_hba_free(phba);
8382 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8383 * @pdev: pointer to PCI device
8385 * This routine is to be called to disattach a device with SLI-3 interface
8386 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8387 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8388 * device to be removed from the PCI subsystem properly.
8390 static void __devexit
8391 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8393 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8394 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8395 struct lpfc_vport **vports;
8396 struct lpfc_hba *phba = vport->phba;
8398 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8400 spin_lock_irq(&phba->hbalock);
8401 vport->load_flag |= FC_UNLOADING;
8402 spin_unlock_irq(&phba->hbalock);
8404 lpfc_free_sysfs_attr(vport);
8406 /* Release all the vports against this physical port */
8407 vports = lpfc_create_vport_work_array(phba);
8409 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8410 fc_vport_terminate(vports[i]->fc_vport);
8411 lpfc_destroy_vport_work_array(phba, vports);
8413 /* Remove FC host and then SCSI host with the physical port */
8414 fc_remove_host(shost);
8415 scsi_remove_host(shost);
8416 lpfc_cleanup(vport);
8419 * Bring down the SLI Layer. This step disable all interrupts,
8420 * clears the rings, discards all mailbox commands, and resets
8424 /* HBA interrupt will be disabled after this call */
8425 lpfc_sli_hba_down(phba);
8426 /* Stop kthread signal shall trigger work_done one more time */
8427 kthread_stop(phba->worker_thread);
8428 /* Final cleanup of txcmplq and reset the HBA */
8429 lpfc_sli_brdrestart(phba);
8431 lpfc_stop_hba_timers(phba);
8432 spin_lock_irq(&phba->hbalock);
8433 list_del_init(&vport->listentry);
8434 spin_unlock_irq(&phba->hbalock);
8436 lpfc_debugfs_terminate(vport);
8438 /* Disable SR-IOV if enabled */
8439 if (phba->cfg_sriov_nr_virtfn)
8440 pci_disable_sriov(pdev);
8442 /* Disable interrupt */
8443 lpfc_sli_disable_intr(phba);
8445 pci_set_drvdata(pdev, NULL);
8446 scsi_host_put(shost);
8449 * Call scsi_free before mem_free since scsi bufs are released to their
8450 * corresponding pools here.
8452 lpfc_scsi_free(phba);
8453 lpfc_mem_free_all(phba);
8455 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8456 phba->hbqslimp.virt, phba->hbqslimp.phys);
8458 /* Free resources associated with SLI2 interface */
8459 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8460 phba->slim2p.virt, phba->slim2p.phys);
8462 /* unmap adapter SLIM and Control Registers */
8463 iounmap(phba->ctrl_regs_memmap_p);
8464 iounmap(phba->slim_memmap_p);
8466 lpfc_hba_free(phba);
8468 pci_release_selected_regions(pdev, bars);
8469 pci_disable_device(pdev);
8473 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8474 * @pdev: pointer to PCI device
8475 * @msg: power management message
8477 * This routine is to be called from the kernel's PCI subsystem to support
8478 * system Power Management (PM) to device with SLI-3 interface spec. When
8479 * PM invokes this method, it quiesces the device by stopping the driver's
8480 * worker thread for the device, turning off device's interrupt and DMA,
8481 * and bring the device offline. Note that as the driver implements the
8482 * minimum PM requirements to a power-aware driver's PM support for the
8483 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8484 * to the suspend() method call will be treated as SUSPEND and the driver will
8485 * fully reinitialize its device during resume() method call, the driver will
8486 * set device to PCI_D3hot state in PCI config space instead of setting it
8487 * according to the @msg provided by the PM.
8490 * 0 - driver suspended the device
8494 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8496 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8497 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8499 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8500 "0473 PCI device Power Management suspend.\n");
8502 /* Bring down the device */
8503 lpfc_offline_prep(phba);
8505 kthread_stop(phba->worker_thread);
8507 /* Disable interrupt from device */
8508 lpfc_sli_disable_intr(phba);
8510 /* Save device state to PCI config space */
8511 pci_save_state(pdev);
8512 pci_set_power_state(pdev, PCI_D3hot);
8518 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8519 * @pdev: pointer to PCI device
8521 * This routine is to be called from the kernel's PCI subsystem to support
8522 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8523 * invokes this method, it restores the device's PCI config space state and
8524 * fully reinitializes the device and brings it online. Note that as the
8525 * driver implements the minimum PM requirements to a power-aware driver's
8526 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8527 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8528 * driver will fully reinitialize its device during resume() method call,
8529 * the device will be set to PCI_D0 directly in PCI config space before
8530 * restoring the state.
8533 * 0 - driver suspended the device
8537 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8539 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8540 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8544 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8545 "0452 PCI device Power Management resume.\n");
8547 /* Restore device state from PCI config space */
8548 pci_set_power_state(pdev, PCI_D0);
8549 pci_restore_state(pdev);
8552 * As the new kernel behavior of pci_restore_state() API call clears
8553 * device saved_state flag, need to save the restored state again.
8555 pci_save_state(pdev);
8557 if (pdev->is_busmaster)
8558 pci_set_master(pdev);
8560 /* Startup the kernel thread for this host adapter. */
8561 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8562 "lpfc_worker_%d", phba->brd_no);
8563 if (IS_ERR(phba->worker_thread)) {
8564 error = PTR_ERR(phba->worker_thread);
8565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8566 "0434 PM resume failed to start worker "
8567 "thread: error=x%x.\n", error);
8571 /* Configure and enable interrupt */
8572 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8573 if (intr_mode == LPFC_INTR_ERROR) {
8574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8575 "0430 PM resume Failed to enable interrupt\n");
8578 phba->intr_mode = intr_mode;
8580 /* Restart HBA and bring it online */
8581 lpfc_sli_brdrestart(phba);
8584 /* Log the current active interrupt mode */
8585 lpfc_log_intr_mode(phba, phba->intr_mode);
8591 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8592 * @phba: pointer to lpfc hba data structure.
8594 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8595 * aborts all the outstanding SCSI I/Os to the pci device.
8598 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8600 struct lpfc_sli *psli = &phba->sli;
8601 struct lpfc_sli_ring *pring;
8603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8604 "2723 PCI channel I/O abort preparing for recovery\n");
8607 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8608 * and let the SCSI mid-layer to retry them to recover.
8610 pring = &psli->ring[psli->fcp_ring];
8611 lpfc_sli_abort_iocb_ring(phba, pring);
8615 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8616 * @phba: pointer to lpfc hba data structure.
8618 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8619 * disables the device interrupt and pci device, and aborts the internal FCP
8623 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8626 "2710 PCI channel disable preparing for reset\n");
8628 /* Block any management I/Os to the device */
8629 lpfc_block_mgmt_io(phba);
8631 /* Block all SCSI devices' I/Os on the host */
8632 lpfc_scsi_dev_block(phba);
8634 /* stop all timers */
8635 lpfc_stop_hba_timers(phba);
8637 /* Disable interrupt and pci device */
8638 lpfc_sli_disable_intr(phba);
8639 pci_disable_device(phba->pcidev);
8641 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8642 lpfc_sli_flush_fcp_rings(phba);
8646 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8647 * @phba: pointer to lpfc hba data structure.
8649 * This routine is called to prepare the SLI3 device for PCI slot permanently
8650 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8654 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8657 "2711 PCI channel permanent disable for failure\n");
8658 /* Block all SCSI devices' I/Os on the host */
8659 lpfc_scsi_dev_block(phba);
8661 /* stop all timers */
8662 lpfc_stop_hba_timers(phba);
8664 /* Clean up all driver's outstanding SCSI I/Os */
8665 lpfc_sli_flush_fcp_rings(phba);
8669 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8670 * @pdev: pointer to PCI device.
8671 * @state: the current PCI connection state.
8673 * This routine is called from the PCI subsystem for I/O error handling to
8674 * device with SLI-3 interface spec. This function is called by the PCI
8675 * subsystem after a PCI bus error affecting this device has been detected.
8676 * When this function is invoked, it will need to stop all the I/Os and
8677 * interrupt(s) to the device. Once that is done, it will return
8678 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8682 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8683 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8684 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8686 static pci_ers_result_t
8687 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8689 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8690 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8693 case pci_channel_io_normal:
8694 /* Non-fatal error, prepare for recovery */
8695 lpfc_sli_prep_dev_for_recover(phba);
8696 return PCI_ERS_RESULT_CAN_RECOVER;
8697 case pci_channel_io_frozen:
8698 /* Fatal error, prepare for slot reset */
8699 lpfc_sli_prep_dev_for_reset(phba);
8700 return PCI_ERS_RESULT_NEED_RESET;
8701 case pci_channel_io_perm_failure:
8702 /* Permanent failure, prepare for device down */
8703 lpfc_sli_prep_dev_for_perm_failure(phba);
8704 return PCI_ERS_RESULT_DISCONNECT;
8706 /* Unknown state, prepare and request slot reset */
8707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8708 "0472 Unknown PCI error state: x%x\n", state);
8709 lpfc_sli_prep_dev_for_reset(phba);
8710 return PCI_ERS_RESULT_NEED_RESET;
8715 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8716 * @pdev: pointer to PCI device.
8718 * This routine is called from the PCI subsystem for error handling to
8719 * device with SLI-3 interface spec. This is called after PCI bus has been
8720 * reset to restart the PCI card from scratch, as if from a cold-boot.
8721 * During the PCI subsystem error recovery, after driver returns
8722 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8723 * recovery and then call this routine before calling the .resume method
8724 * to recover the device. This function will initialize the HBA device,
8725 * enable the interrupt, but it will just put the HBA to offline state
8726 * without passing any I/O traffic.
8729 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8730 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8732 static pci_ers_result_t
8733 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8735 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8736 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8737 struct lpfc_sli *psli = &phba->sli;
8740 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8741 if (pci_enable_device_mem(pdev)) {
8742 printk(KERN_ERR "lpfc: Cannot re-enable "
8743 "PCI device after reset.\n");
8744 return PCI_ERS_RESULT_DISCONNECT;
8747 pci_restore_state(pdev);
8750 * As the new kernel behavior of pci_restore_state() API call clears
8751 * device saved_state flag, need to save the restored state again.
8753 pci_save_state(pdev);
8755 if (pdev->is_busmaster)
8756 pci_set_master(pdev);
8758 spin_lock_irq(&phba->hbalock);
8759 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8760 spin_unlock_irq(&phba->hbalock);
8762 /* Configure and enable interrupt */
8763 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8764 if (intr_mode == LPFC_INTR_ERROR) {
8765 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8766 "0427 Cannot re-enable interrupt after "
8768 return PCI_ERS_RESULT_DISCONNECT;
8770 phba->intr_mode = intr_mode;
8772 /* Take device offline, it will perform cleanup */
8773 lpfc_offline_prep(phba);
8775 lpfc_sli_brdrestart(phba);
8777 /* Log the current active interrupt mode */
8778 lpfc_log_intr_mode(phba, phba->intr_mode);
8780 return PCI_ERS_RESULT_RECOVERED;
8784 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8785 * @pdev: pointer to PCI device
8787 * This routine is called from the PCI subsystem for error handling to device
8788 * with SLI-3 interface spec. It is called when kernel error recovery tells
8789 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8790 * error recovery. After this call, traffic can start to flow from this device
8794 lpfc_io_resume_s3(struct pci_dev *pdev)
8796 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8797 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8799 /* Bring device online, it will be no-op for non-fatal error resume */
8802 /* Clean up Advanced Error Reporting (AER) if needed */
8803 if (phba->hba_flag & HBA_AER_ENABLED)
8804 pci_cleanup_aer_uncorrect_error_status(pdev);
8808 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8809 * @phba: pointer to lpfc hba data structure.
8811 * returns the number of ELS/CT IOCBs to reserve
8814 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8816 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8818 if (phba->sli_rev == LPFC_SLI_REV4) {
8821 else if (max_xri <= 256)
8823 else if (max_xri <= 512)
8825 else if (max_xri <= 1024)
8834 * lpfc_write_firmware - attempt to write a firmware image to the port
8835 * @phba: pointer to lpfc hba data structure.
8836 * @fw: pointer to firmware image returned from request_firmware.
8838 * returns the number of bytes written if write is successful.
8839 * returns a negative error value if there were errors.
8840 * returns 0 if firmware matches currently active firmware on port.
8843 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8846 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8847 struct list_head dma_buffer_list;
8849 struct lpfc_dmabuf *dmabuf, *next;
8850 uint32_t offset = 0, temp_offset = 0;
8852 INIT_LIST_HEAD(&dma_buffer_list);
8853 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8854 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8855 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8856 (image->size != fw->size)) {
8857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8858 "3022 Invalid FW image found. "
8859 "Magic:%d Type:%x ID:%x\n",
8860 image->magic_number,
8861 bf_get(lpfc_grp_hdr_file_type, image),
8862 bf_get(lpfc_grp_hdr_id, image));
8865 lpfc_decode_firmware_rev(phba, fwrev, 1);
8866 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8868 "3023 Updating Firmware. Current Version:%s "
8870 fwrev, image->revision);
8871 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8872 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8878 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8882 if (!dmabuf->virt) {
8887 list_add_tail(&dmabuf->list, &dma_buffer_list);
8889 while (offset < fw->size) {
8890 temp_offset = offset;
8891 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8892 if (offset + SLI4_PAGE_SIZE > fw->size) {
8893 temp_offset += fw->size - offset;
8894 memcpy(dmabuf->virt,
8895 fw->data + temp_offset,
8899 memcpy(dmabuf->virt, fw->data + temp_offset,
8901 temp_offset += SLI4_PAGE_SIZE;
8903 rc = lpfc_wr_object(phba, &dma_buffer_list,
8904 (fw->size - offset), &offset);
8906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8907 "3024 Firmware update failed. "
8915 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8916 list_del(&dmabuf->list);
8917 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8918 dmabuf->virt, dmabuf->phys);
8925 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8926 * @pdev: pointer to PCI device
8927 * @pid: pointer to PCI device identifier
8929 * This routine is called from the kernel's PCI subsystem to device with
8930 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8931 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8932 * information of the device and driver to see if the driver state that it
8933 * can support this kind of device. If the match is successful, the driver
8934 * core invokes this routine. If this routine determines it can claim the HBA,
8935 * it does all the initialization that it needs to do to handle the HBA
8939 * 0 - driver can claim the device
8940 * negative value - driver can not claim the device
8942 static int __devinit
8943 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8945 struct lpfc_hba *phba;
8946 struct lpfc_vport *vport = NULL;
8947 struct Scsi_Host *shost = NULL;
8949 uint32_t cfg_mode, intr_mode;
8951 int adjusted_fcp_eq_count;
8953 const struct firmware *fw;
8954 uint8_t file_name[16];
8956 /* Allocate memory for HBA structure */
8957 phba = lpfc_hba_alloc(pdev);
8961 /* Perform generic PCI device enabling operation */
8962 error = lpfc_enable_pci_dev(phba);
8964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8965 "1409 Failed to enable pci device.\n");
8969 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
8970 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8972 goto out_disable_pci_dev;
8974 /* Set up SLI-4 specific device PCI memory space */
8975 error = lpfc_sli4_pci_mem_setup(phba);
8977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8978 "1410 Failed to set up pci memory space.\n");
8979 goto out_disable_pci_dev;
8982 /* Set up phase-1 common device driver resources */
8983 error = lpfc_setup_driver_resource_phase1(phba);
8985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8986 "1411 Failed to set up driver resource.\n");
8987 goto out_unset_pci_mem_s4;
8990 /* Set up SLI-4 Specific device driver resources */
8991 error = lpfc_sli4_driver_resource_setup(phba);
8993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8994 "1412 Failed to set up driver resource.\n");
8995 goto out_unset_pci_mem_s4;
8998 /* Initialize and populate the iocb list per host */
9000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9001 "2821 initialize iocb list %d.\n",
9002 phba->cfg_iocb_cnt*1024);
9003 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
9006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9007 "1413 Failed to initialize iocb list.\n");
9008 goto out_unset_driver_resource_s4;
9011 INIT_LIST_HEAD(&phba->active_rrq_list);
9013 /* Set up common device driver resources */
9014 error = lpfc_setup_driver_resource_phase2(phba);
9016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9017 "1414 Failed to set up driver resource.\n");
9018 goto out_free_iocb_list;
9021 /* Create SCSI host to the physical port */
9022 error = lpfc_create_shost(phba);
9024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9025 "1415 Failed to create scsi host.\n");
9026 goto out_unset_driver_resource;
9029 /* Configure sysfs attributes */
9030 vport = phba->pport;
9031 error = lpfc_alloc_sysfs_attr(vport);
9033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9034 "1416 Failed to allocate sysfs attr\n");
9035 goto out_destroy_shost;
9038 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9039 /* Now, trying to enable interrupt and bring up the device */
9040 cfg_mode = phba->cfg_use_msi;
9042 /* Put device to a known state before enabling interrupt */
9043 lpfc_stop_port(phba);
9044 /* Configure and enable interrupt */
9045 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
9046 if (intr_mode == LPFC_INTR_ERROR) {
9047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9048 "0426 Failed to enable interrupt.\n");
9050 goto out_free_sysfs_attr;
9052 /* Default to single EQ for non-MSI-X */
9053 if (phba->intr_type != MSIX)
9054 adjusted_fcp_eq_count = 0;
9055 else if (phba->sli4_hba.msix_vec_nr <
9056 phba->cfg_fcp_eq_count + 1)
9057 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9059 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9060 /* Free unused EQs */
9061 for (fcp_qidx = adjusted_fcp_eq_count;
9062 fcp_qidx < phba->cfg_fcp_eq_count;
9064 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9065 /* do not delete the first fcp_cq */
9067 lpfc_sli4_queue_free(
9068 phba->sli4_hba.fcp_cq[fcp_qidx]);
9070 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9071 /* Set up SLI-4 HBA */
9072 if (lpfc_sli4_hba_setup(phba)) {
9073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9074 "1421 Failed to set up hba\n");
9076 goto out_disable_intr;
9079 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
9081 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
9084 /* Check active interrupts received only for MSI/MSI-X */
9085 if (intr_mode == 0 ||
9086 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
9087 /* Log the current active interrupt mode */
9088 phba->intr_mode = intr_mode;
9089 lpfc_log_intr_mode(phba, intr_mode);
9092 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9093 "0451 Configure interrupt mode (%d) "
9094 "failed active interrupt test.\n",
9096 /* Unset the previous SLI-4 HBA setup. */
9098 * TODO: Is this operation compatible with IF TYPE 2
9099 * devices? All port state is deleted and cleared.
9101 lpfc_sli4_unset_hba(phba);
9102 /* Try next level of interrupt mode */
9103 cfg_mode = --intr_mode;
9106 /* Perform post initialization setup */
9107 lpfc_post_init_setup(phba);
9109 /* check for firmware upgrade or downgrade */
9110 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9111 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9113 lpfc_write_firmware(phba, fw);
9114 release_firmware(fw);
9117 /* Check if there are static vports to be created. */
9118 lpfc_create_static_vport(phba);
9123 lpfc_sli4_disable_intr(phba);
9124 out_free_sysfs_attr:
9125 lpfc_free_sysfs_attr(vport);
9127 lpfc_destroy_shost(phba);
9128 out_unset_driver_resource:
9129 lpfc_unset_driver_resource_phase2(phba);
9131 lpfc_free_iocb_list(phba);
9132 out_unset_driver_resource_s4:
9133 lpfc_sli4_driver_resource_unset(phba);
9134 out_unset_pci_mem_s4:
9135 lpfc_sli4_pci_mem_unset(phba);
9136 out_disable_pci_dev:
9137 lpfc_disable_pci_dev(phba);
9139 scsi_host_put(shost);
9141 lpfc_hba_free(phba);
9146 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
9147 * @pdev: pointer to PCI device
9149 * This routine is called from the kernel's PCI subsystem to device with
9150 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
9151 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9152 * device to be removed from the PCI subsystem properly.
9154 static void __devexit
9155 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9157 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9158 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9159 struct lpfc_vport **vports;
9160 struct lpfc_hba *phba = vport->phba;
9163 /* Mark the device unloading flag */
9164 spin_lock_irq(&phba->hbalock);
9165 vport->load_flag |= FC_UNLOADING;
9166 spin_unlock_irq(&phba->hbalock);
9168 /* Free the HBA sysfs attributes */
9169 lpfc_free_sysfs_attr(vport);
9171 /* Release all the vports against this physical port */
9172 vports = lpfc_create_vport_work_array(phba);
9174 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
9175 fc_vport_terminate(vports[i]->fc_vport);
9176 lpfc_destroy_vport_work_array(phba, vports);
9178 /* Remove FC host and then SCSI host with the physical port */
9179 fc_remove_host(shost);
9180 scsi_remove_host(shost);
9182 /* Perform cleanup on the physical port */
9183 lpfc_cleanup(vport);
9186 * Bring down the SLI Layer. This step disables all interrupts,
9187 * clears the rings, discards all mailbox commands, and resets
9188 * the HBA FCoE function.
9190 lpfc_debugfs_terminate(vport);
9191 lpfc_sli4_hba_unset(phba);
9193 spin_lock_irq(&phba->hbalock);
9194 list_del_init(&vport->listentry);
9195 spin_unlock_irq(&phba->hbalock);
9197 /* Perform scsi free before driver resource_unset since scsi
9198 * buffers are released to their corresponding pools here.
9200 lpfc_scsi_free(phba);
9201 lpfc_sli4_driver_resource_unset(phba);
9203 /* Unmap adapter Control and Doorbell registers */
9204 lpfc_sli4_pci_mem_unset(phba);
9206 /* Release PCI resources and disable device's PCI function */
9207 scsi_host_put(shost);
9208 lpfc_disable_pci_dev(phba);
9210 /* Finally, free the driver's device data structure */
9211 lpfc_hba_free(phba);
9217 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
9218 * @pdev: pointer to PCI device
9219 * @msg: power management message
9221 * This routine is called from the kernel's PCI subsystem to support system
9222 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
9223 * this method, it quiesces the device by stopping the driver's worker
9224 * thread for the device, turning off device's interrupt and DMA, and bring
9225 * the device offline. Note that as the driver implements the minimum PM
9226 * requirements to a power-aware driver's PM support for suspend/resume -- all
9227 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
9228 * method call will be treated as SUSPEND and the driver will fully
9229 * reinitialize its device during resume() method call, the driver will set
9230 * device to PCI_D3hot state in PCI config space instead of setting it
9231 * according to the @msg provided by the PM.
9234 * 0 - driver suspended the device
9238 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9240 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9241 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9244 "2843 PCI device Power Management suspend.\n");
9246 /* Bring down the device */
9247 lpfc_offline_prep(phba);
9249 kthread_stop(phba->worker_thread);
9251 /* Disable interrupt from device */
9252 lpfc_sli4_disable_intr(phba);
9254 /* Save device state to PCI config space */
9255 pci_save_state(pdev);
9256 pci_set_power_state(pdev, PCI_D3hot);
9262 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
9263 * @pdev: pointer to PCI device
9265 * This routine is called from the kernel's PCI subsystem to support system
9266 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
9267 * this method, it restores the device's PCI config space state and fully
9268 * reinitializes the device and brings it online. Note that as the driver
9269 * implements the minimum PM requirements to a power-aware driver's PM for
9270 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9271 * to the suspend() method call will be treated as SUSPEND and the driver
9272 * will fully reinitialize its device during resume() method call, the device
9273 * will be set to PCI_D0 directly in PCI config space before restoring the
9277 * 0 - driver suspended the device
9281 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
9283 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9284 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9289 "0292 PCI device Power Management resume.\n");
9291 /* Restore device state from PCI config space */
9292 pci_set_power_state(pdev, PCI_D0);
9293 pci_restore_state(pdev);
9296 * As the new kernel behavior of pci_restore_state() API call clears
9297 * device saved_state flag, need to save the restored state again.
9299 pci_save_state(pdev);
9301 if (pdev->is_busmaster)
9302 pci_set_master(pdev);
9304 /* Startup the kernel thread for this host adapter. */
9305 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9306 "lpfc_worker_%d", phba->brd_no);
9307 if (IS_ERR(phba->worker_thread)) {
9308 error = PTR_ERR(phba->worker_thread);
9309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9310 "0293 PM resume failed to start worker "
9311 "thread: error=x%x.\n", error);
9315 /* Configure and enable interrupt */
9316 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9317 if (intr_mode == LPFC_INTR_ERROR) {
9318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9319 "0294 PM resume Failed to enable interrupt\n");
9322 phba->intr_mode = intr_mode;
9324 /* Restart HBA and bring it online */
9325 lpfc_sli_brdrestart(phba);
9328 /* Log the current active interrupt mode */
9329 lpfc_log_intr_mode(phba, phba->intr_mode);
9335 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
9336 * @phba: pointer to lpfc hba data structure.
9338 * This routine is called to prepare the SLI4 device for PCI slot recover. It
9339 * aborts all the outstanding SCSI I/Os to the pci device.
9342 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
9344 struct lpfc_sli *psli = &phba->sli;
9345 struct lpfc_sli_ring *pring;
9347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9348 "2828 PCI channel I/O abort preparing for recovery\n");
9350 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9351 * and let the SCSI mid-layer to retry them to recover.
9353 pring = &psli->ring[psli->fcp_ring];
9354 lpfc_sli_abort_iocb_ring(phba, pring);
9358 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
9359 * @phba: pointer to lpfc hba data structure.
9361 * This routine is called to prepare the SLI4 device for PCI slot reset. It
9362 * disables the device interrupt and pci device, and aborts the internal FCP
9366 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9369 "2826 PCI channel disable preparing for reset\n");
9371 /* Block any management I/Os to the device */
9372 lpfc_block_mgmt_io(phba);
9374 /* Block all SCSI devices' I/Os on the host */
9375 lpfc_scsi_dev_block(phba);
9377 /* stop all timers */
9378 lpfc_stop_hba_timers(phba);
9380 /* Disable interrupt and pci device */
9381 lpfc_sli4_disable_intr(phba);
9382 pci_disable_device(phba->pcidev);
9384 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9385 lpfc_sli_flush_fcp_rings(phba);
9389 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9390 * @phba: pointer to lpfc hba data structure.
9392 * This routine is called to prepare the SLI4 device for PCI slot permanently
9393 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9397 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9400 "2827 PCI channel permanent disable for failure\n");
9402 /* Block all SCSI devices' I/Os on the host */
9403 lpfc_scsi_dev_block(phba);
9405 /* stop all timers */
9406 lpfc_stop_hba_timers(phba);
9408 /* Clean up all driver's outstanding SCSI I/Os */
9409 lpfc_sli_flush_fcp_rings(phba);
9413 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9414 * @pdev: pointer to PCI device.
9415 * @state: the current PCI connection state.
9417 * This routine is called from the PCI subsystem for error handling to device
9418 * with SLI-4 interface spec. This function is called by the PCI subsystem
9419 * after a PCI bus error affecting this device has been detected. When this
9420 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9421 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9422 * for the PCI subsystem to perform proper recovery as desired.
9425 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9426 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9428 static pci_ers_result_t
9429 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9431 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9432 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9435 case pci_channel_io_normal:
9436 /* Non-fatal error, prepare for recovery */
9437 lpfc_sli4_prep_dev_for_recover(phba);
9438 return PCI_ERS_RESULT_CAN_RECOVER;
9439 case pci_channel_io_frozen:
9440 /* Fatal error, prepare for slot reset */
9441 lpfc_sli4_prep_dev_for_reset(phba);
9442 return PCI_ERS_RESULT_NEED_RESET;
9443 case pci_channel_io_perm_failure:
9444 /* Permanent failure, prepare for device down */
9445 lpfc_sli4_prep_dev_for_perm_failure(phba);
9446 return PCI_ERS_RESULT_DISCONNECT;
9448 /* Unknown state, prepare and request slot reset */
9449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9450 "2825 Unknown PCI error state: x%x\n", state);
9451 lpfc_sli4_prep_dev_for_reset(phba);
9452 return PCI_ERS_RESULT_NEED_RESET;
9457 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9458 * @pdev: pointer to PCI device.
9460 * This routine is called from the PCI subsystem for error handling to device
9461 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9462 * restart the PCI card from scratch, as if from a cold-boot. During the
9463 * PCI subsystem error recovery, after the driver returns
9464 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9465 * recovery and then call this routine before calling the .resume method to
9466 * recover the device. This function will initialize the HBA device, enable
9467 * the interrupt, but it will just put the HBA to offline state without
9468 * passing any I/O traffic.
9471 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9472 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9474 static pci_ers_result_t
9475 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9477 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9478 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9479 struct lpfc_sli *psli = &phba->sli;
9482 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9483 if (pci_enable_device_mem(pdev)) {
9484 printk(KERN_ERR "lpfc: Cannot re-enable "
9485 "PCI device after reset.\n");
9486 return PCI_ERS_RESULT_DISCONNECT;
9489 pci_restore_state(pdev);
9490 if (pdev->is_busmaster)
9491 pci_set_master(pdev);
9493 spin_lock_irq(&phba->hbalock);
9494 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9495 spin_unlock_irq(&phba->hbalock);
9497 /* Configure and enable interrupt */
9498 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9499 if (intr_mode == LPFC_INTR_ERROR) {
9500 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9501 "2824 Cannot re-enable interrupt after "
9503 return PCI_ERS_RESULT_DISCONNECT;
9505 phba->intr_mode = intr_mode;
9507 /* Log the current active interrupt mode */
9508 lpfc_log_intr_mode(phba, phba->intr_mode);
9510 return PCI_ERS_RESULT_RECOVERED;
9514 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9515 * @pdev: pointer to PCI device
9517 * This routine is called from the PCI subsystem for error handling to device
9518 * with SLI-4 interface spec. It is called when kernel error recovery tells
9519 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9520 * error recovery. After this call, traffic can start to flow from this device
9524 lpfc_io_resume_s4(struct pci_dev *pdev)
9526 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9527 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9530 * In case of slot reset, as function reset is performed through
9531 * mailbox command which needs DMA to be enabled, this operation
9532 * has to be moved to the io resume phase. Taking device offline
9533 * will perform the necessary cleanup.
9535 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9536 /* Perform device reset */
9537 lpfc_offline_prep(phba);
9539 lpfc_sli_brdrestart(phba);
9540 /* Bring the device back online */
9544 /* Clean up Advanced Error Reporting (AER) if needed */
9545 if (phba->hba_flag & HBA_AER_ENABLED)
9546 pci_cleanup_aer_uncorrect_error_status(pdev);
9550 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9551 * @pdev: pointer to PCI device
9552 * @pid: pointer to PCI device identifier
9554 * This routine is to be registered to the kernel's PCI subsystem. When an
9555 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9556 * at PCI device-specific information of the device and driver to see if the
9557 * driver state that it can support this kind of device. If the match is
9558 * successful, the driver core invokes this routine. This routine dispatches
9559 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9560 * do all the initialization that it needs to do to handle the HBA device
9564 * 0 - driver can claim the device
9565 * negative value - driver can not claim the device
9567 static int __devinit
9568 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9571 struct lpfc_sli_intf intf;
9573 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9576 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9577 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9578 rc = lpfc_pci_probe_one_s4(pdev, pid);
9580 rc = lpfc_pci_probe_one_s3(pdev, pid);
9586 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9587 * @pdev: pointer to PCI device
9589 * This routine is to be registered to the kernel's PCI subsystem. When an
9590 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9591 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9592 * remove routine, which will perform all the necessary cleanup for the
9593 * device to be removed from the PCI subsystem properly.
9595 static void __devexit
9596 lpfc_pci_remove_one(struct pci_dev *pdev)
9598 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9599 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9601 switch (phba->pci_dev_grp) {
9602 case LPFC_PCI_DEV_LP:
9603 lpfc_pci_remove_one_s3(pdev);
9605 case LPFC_PCI_DEV_OC:
9606 lpfc_pci_remove_one_s4(pdev);
9609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9610 "1424 Invalid PCI device group: 0x%x\n",
9618 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9619 * @pdev: pointer to PCI device
9620 * @msg: power management message
9622 * This routine is to be registered to the kernel's PCI subsystem to support
9623 * system Power Management (PM). When PM invokes this method, it dispatches
9624 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9625 * suspend the device.
9628 * 0 - driver suspended the device
9632 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9634 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9635 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9638 switch (phba->pci_dev_grp) {
9639 case LPFC_PCI_DEV_LP:
9640 rc = lpfc_pci_suspend_one_s3(pdev, msg);
9642 case LPFC_PCI_DEV_OC:
9643 rc = lpfc_pci_suspend_one_s4(pdev, msg);
9646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9647 "1425 Invalid PCI device group: 0x%x\n",
9655 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9656 * @pdev: pointer to PCI device
9658 * This routine is to be registered to the kernel's PCI subsystem to support
9659 * system Power Management (PM). When PM invokes this method, it dispatches
9660 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9661 * resume the device.
9664 * 0 - driver suspended the device
9668 lpfc_pci_resume_one(struct pci_dev *pdev)
9670 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9671 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9674 switch (phba->pci_dev_grp) {
9675 case LPFC_PCI_DEV_LP:
9676 rc = lpfc_pci_resume_one_s3(pdev);
9678 case LPFC_PCI_DEV_OC:
9679 rc = lpfc_pci_resume_one_s4(pdev);
9682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9683 "1426 Invalid PCI device group: 0x%x\n",
9691 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9692 * @pdev: pointer to PCI device.
9693 * @state: the current PCI connection state.
9695 * This routine is registered to the PCI subsystem for error handling. This
9696 * function is called by the PCI subsystem after a PCI bus error affecting
9697 * this device has been detected. When this routine is invoked, it dispatches
9698 * the action to the proper SLI-3 or SLI-4 device error detected handling
9699 * routine, which will perform the proper error detected operation.
9702 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9703 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9705 static pci_ers_result_t
9706 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9708 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9709 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9710 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9712 switch (phba->pci_dev_grp) {
9713 case LPFC_PCI_DEV_LP:
9714 rc = lpfc_io_error_detected_s3(pdev, state);
9716 case LPFC_PCI_DEV_OC:
9717 rc = lpfc_io_error_detected_s4(pdev, state);
9720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9721 "1427 Invalid PCI device group: 0x%x\n",
9729 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9730 * @pdev: pointer to PCI device.
9732 * This routine is registered to the PCI subsystem for error handling. This
9733 * function is called after PCI bus has been reset to restart the PCI card
9734 * from scratch, as if from a cold-boot. When this routine is invoked, it
9735 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9736 * routine, which will perform the proper device reset.
9739 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9740 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9742 static pci_ers_result_t
9743 lpfc_io_slot_reset(struct pci_dev *pdev)
9745 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9746 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9747 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9749 switch (phba->pci_dev_grp) {
9750 case LPFC_PCI_DEV_LP:
9751 rc = lpfc_io_slot_reset_s3(pdev);
9753 case LPFC_PCI_DEV_OC:
9754 rc = lpfc_io_slot_reset_s4(pdev);
9757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9758 "1428 Invalid PCI device group: 0x%x\n",
9766 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9767 * @pdev: pointer to PCI device
9769 * This routine is registered to the PCI subsystem for error handling. It
9770 * is called when kernel error recovery tells the lpfc driver that it is
9771 * OK to resume normal PCI operation after PCI bus error recovery. When
9772 * this routine is invoked, it dispatches the action to the proper SLI-3
9773 * or SLI-4 device io_resume routine, which will resume the device operation.
9776 lpfc_io_resume(struct pci_dev *pdev)
9778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9781 switch (phba->pci_dev_grp) {
9782 case LPFC_PCI_DEV_LP:
9783 lpfc_io_resume_s3(pdev);
9785 case LPFC_PCI_DEV_OC:
9786 lpfc_io_resume_s4(pdev);
9789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9790 "1429 Invalid PCI device group: 0x%x\n",
9797 static struct pci_device_id lpfc_id_table[] = {
9798 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9799 PCI_ANY_ID, PCI_ANY_ID, },
9800 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9801 PCI_ANY_ID, PCI_ANY_ID, },
9802 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9803 PCI_ANY_ID, PCI_ANY_ID, },
9804 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9805 PCI_ANY_ID, PCI_ANY_ID, },
9806 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9807 PCI_ANY_ID, PCI_ANY_ID, },
9808 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9809 PCI_ANY_ID, PCI_ANY_ID, },
9810 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9811 PCI_ANY_ID, PCI_ANY_ID, },
9812 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9813 PCI_ANY_ID, PCI_ANY_ID, },
9814 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9815 PCI_ANY_ID, PCI_ANY_ID, },
9816 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9817 PCI_ANY_ID, PCI_ANY_ID, },
9818 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9819 PCI_ANY_ID, PCI_ANY_ID, },
9820 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9821 PCI_ANY_ID, PCI_ANY_ID, },
9822 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9823 PCI_ANY_ID, PCI_ANY_ID, },
9824 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9825 PCI_ANY_ID, PCI_ANY_ID, },
9826 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9827 PCI_ANY_ID, PCI_ANY_ID, },
9828 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9829 PCI_ANY_ID, PCI_ANY_ID, },
9830 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9831 PCI_ANY_ID, PCI_ANY_ID, },
9832 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9833 PCI_ANY_ID, PCI_ANY_ID, },
9834 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9835 PCI_ANY_ID, PCI_ANY_ID, },
9836 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9837 PCI_ANY_ID, PCI_ANY_ID, },
9838 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9839 PCI_ANY_ID, PCI_ANY_ID, },
9840 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9841 PCI_ANY_ID, PCI_ANY_ID, },
9842 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9843 PCI_ANY_ID, PCI_ANY_ID, },
9844 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9845 PCI_ANY_ID, PCI_ANY_ID, },
9846 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9847 PCI_ANY_ID, PCI_ANY_ID, },
9848 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9849 PCI_ANY_ID, PCI_ANY_ID, },
9850 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9851 PCI_ANY_ID, PCI_ANY_ID, },
9852 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9853 PCI_ANY_ID, PCI_ANY_ID, },
9854 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9855 PCI_ANY_ID, PCI_ANY_ID, },
9856 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9857 PCI_ANY_ID, PCI_ANY_ID, },
9858 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9859 PCI_ANY_ID, PCI_ANY_ID, },
9860 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9861 PCI_ANY_ID, PCI_ANY_ID, },
9862 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9863 PCI_ANY_ID, PCI_ANY_ID, },
9864 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9865 PCI_ANY_ID, PCI_ANY_ID, },
9866 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9867 PCI_ANY_ID, PCI_ANY_ID, },
9868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9869 PCI_ANY_ID, PCI_ANY_ID, },
9870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9871 PCI_ANY_ID, PCI_ANY_ID, },
9872 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9873 PCI_ANY_ID, PCI_ANY_ID, },
9874 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9875 PCI_ANY_ID, PCI_ANY_ID, },
9876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9877 PCI_ANY_ID, PCI_ANY_ID, },
9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9879 PCI_ANY_ID, PCI_ANY_ID, },
9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9881 PCI_ANY_ID, PCI_ANY_ID, },
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9883 PCI_ANY_ID, PCI_ANY_ID, },
9884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9885 PCI_ANY_ID, PCI_ANY_ID, },
9886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9887 PCI_ANY_ID, PCI_ANY_ID, },
9891 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9893 static struct pci_error_handlers lpfc_err_handler = {
9894 .error_detected = lpfc_io_error_detected,
9895 .slot_reset = lpfc_io_slot_reset,
9896 .resume = lpfc_io_resume,
9899 static struct pci_driver lpfc_driver = {
9900 .name = LPFC_DRIVER_NAME,
9901 .id_table = lpfc_id_table,
9902 .probe = lpfc_pci_probe_one,
9903 .remove = __devexit_p(lpfc_pci_remove_one),
9904 .suspend = lpfc_pci_suspend_one,
9905 .resume = lpfc_pci_resume_one,
9906 .err_handler = &lpfc_err_handler,
9910 * lpfc_init - lpfc module initialization routine
9912 * This routine is to be invoked when the lpfc module is loaded into the
9913 * kernel. The special kernel macro module_init() is used to indicate the
9914 * role of this routine to the kernel as lpfc module entry point.
9918 * -ENOMEM - FC attach transport failed
9919 * all others - failed
9926 printk(LPFC_MODULE_DESC "\n");
9927 printk(LPFC_COPYRIGHT "\n");
9929 if (lpfc_enable_npiv) {
9930 lpfc_transport_functions.vport_create = lpfc_vport_create;
9931 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9933 lpfc_transport_template =
9934 fc_attach_transport(&lpfc_transport_functions);
9935 if (lpfc_transport_template == NULL)
9937 if (lpfc_enable_npiv) {
9938 lpfc_vport_transport_template =
9939 fc_attach_transport(&lpfc_vport_transport_functions);
9940 if (lpfc_vport_transport_template == NULL) {
9941 fc_release_transport(lpfc_transport_template);
9945 error = pci_register_driver(&lpfc_driver);
9947 fc_release_transport(lpfc_transport_template);
9948 if (lpfc_enable_npiv)
9949 fc_release_transport(lpfc_vport_transport_template);
9956 * lpfc_exit - lpfc module removal routine
9958 * This routine is invoked when the lpfc module is removed from the kernel.
9959 * The special kernel macro module_exit() is used to indicate the role of
9960 * this routine to the kernel as lpfc module exit point.
9965 pci_unregister_driver(&lpfc_driver);
9966 fc_release_transport(lpfc_transport_template);
9967 if (lpfc_enable_npiv)
9968 fc_release_transport(lpfc_vport_transport_template);
9969 if (_dump_buf_data) {
9970 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
9971 "_dump_buf_data at 0x%p\n",
9972 (1L << _dump_buf_data_order), _dump_buf_data);
9973 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9976 if (_dump_buf_dif) {
9977 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
9978 "_dump_buf_dif at 0x%p\n",
9979 (1L << _dump_buf_dif_order), _dump_buf_dif);
9980 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9984 module_init(lpfc_init);
9985 module_exit(lpfc_exit);
9986 MODULE_LICENSE("GPL");
9987 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9988 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9989 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);