1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/lockdep.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <linux/aer.h>
37 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_sli4.h"
44 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_compat.h"
51 #include "lpfc_debugfs.h"
52 #include "lpfc_vport.h"
53 #include "lpfc_version.h"
55 /* There are only four IOCB completion types. */
56 typedef enum _lpfc_iocb_type {
64 /* Provide function prototypes local to this module. */
65 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
67 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
68 uint8_t *, uint32_t *);
69 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
71 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
73 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
75 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
77 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
79 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
80 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
81 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
82 struct lpfc_sli_ring *pring,
83 struct lpfc_iocbq *cmdiocb);
86 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
92 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
93 * @q: The Work Queue to operate on.
94 * @wqe: The work Queue Entry to put on the Work queue.
96 * This routine will copy the contents of @wqe to the next available entry on
97 * the @q. This function will then ring the Work Queue Doorbell to signal the
98 * HBA to start processing the Work Queue Entry. This function returns 0 if
99 * successful. If no entries are available on @q then this function will return
101 * The caller is expected to hold the hbalock when calling this routine.
104 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
106 union lpfc_wqe *temp_wqe;
107 struct lpfc_register doorbell;
111 /* sanity check on queue memory */
114 temp_wqe = q->qe[q->host_index].wqe;
116 /* If the host has not yet processed the next entry then we are done */
117 idx = ((q->host_index + 1) % q->entry_count);
118 if (idx == q->hba_index) {
123 /* set consumption flag every once in a while */
124 if (!((q->host_index + 1) % q->entry_repost))
125 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
126 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
127 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
128 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
129 /* ensure WQE bcopy flushed before doorbell write */
132 /* Update the host index before invoking device */
133 host_index = q->host_index;
139 if (q->db_format == LPFC_DB_LIST_FORMAT) {
140 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
141 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
142 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
143 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
144 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
145 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
149 writel(doorbell.word0, q->db_regaddr);
155 * lpfc_sli4_wq_release - Updates internal hba index for WQ
156 * @q: The Work Queue to operate on.
157 * @index: The index to advance the hba index to.
159 * This routine will update the HBA index of a queue to reflect consumption of
160 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
161 * an entry the host calls this function to update the queue's internal
162 * pointers. This routine returns the number of entries that were consumed by
166 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
168 uint32_t released = 0;
170 /* sanity check on queue memory */
174 if (q->hba_index == index)
177 q->hba_index = ((q->hba_index + 1) % q->entry_count);
179 } while (q->hba_index != index);
184 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
185 * @q: The Mailbox Queue to operate on.
186 * @wqe: The Mailbox Queue Entry to put on the Work queue.
188 * This routine will copy the contents of @mqe to the next available entry on
189 * the @q. This function will then ring the Work Queue Doorbell to signal the
190 * HBA to start processing the Work Queue Entry. This function returns 0 if
191 * successful. If no entries are available on @q then this function will return
193 * The caller is expected to hold the hbalock when calling this routine.
196 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
198 struct lpfc_mqe *temp_mqe;
199 struct lpfc_register doorbell;
201 /* sanity check on queue memory */
204 temp_mqe = q->qe[q->host_index].mqe;
206 /* If the host has not yet processed the next entry then we are done */
207 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
209 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
210 /* Save off the mailbox pointer for completion */
211 q->phba->mbox = (MAILBOX_t *)temp_mqe;
213 /* Update the host index before invoking device */
214 q->host_index = ((q->host_index + 1) % q->entry_count);
218 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
219 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
220 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
225 * lpfc_sli4_mq_release - Updates internal hba index for MQ
226 * @q: The Mailbox Queue to operate on.
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers. This routine returns the number of entries that were consumed by
235 lpfc_sli4_mq_release(struct lpfc_queue *q)
237 /* sanity check on queue memory */
241 /* Clear the mailbox pointer for completion */
242 q->phba->mbox = NULL;
243 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
249 * @q: The Event Queue to get the first valid EQE from
251 * This routine will get the first valid Event Queue Entry from @q, update
252 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
253 * the Queue (no more work to do), or the Queue is full of EQEs that have been
254 * processed, but not popped back to the HBA then this routine will return NULL.
256 static struct lpfc_eqe *
257 lpfc_sli4_eq_get(struct lpfc_queue *q)
259 struct lpfc_eqe *eqe;
262 /* sanity check on queue memory */
265 eqe = q->qe[q->hba_index].eqe;
267 /* If the next EQE is not valid then we are done */
268 if (!bf_get_le32(lpfc_eqe_valid, eqe))
270 /* If the host has not yet processed the next entry then we are done */
271 idx = ((q->hba_index + 1) % q->entry_count);
272 if (idx == q->host_index)
278 * insert barrier for instruction interlock : data from the hardware
279 * must have the valid bit checked before it can be copied and acted
280 * upon. Speculative instructions were allowing a bcopy at the start
281 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
282 * after our return, to copy data before the valid bit check above
283 * was done. As such, some of the copied data was stale. The barrier
284 * ensures the check is before any data is copied.
291 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
292 * @q: The Event Queue to disable interrupts
296 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
298 struct lpfc_register doorbell;
301 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
302 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
303 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
304 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
305 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
306 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
310 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
311 * @q: The Event Queue that the host has completed processing for.
312 * @arm: Indicates whether the host wants to arms this CQ.
314 * This routine will mark all Event Queue Entries on @q, from the last
315 * known completed entry to the last entry that was processed, as completed
316 * by clearing the valid bit for each completion queue entry. Then it will
317 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
318 * The internal host index in the @q will be updated by this routine to indicate
319 * that the host has finished processing the entries. The @arm parameter
320 * indicates that the queue should be rearmed when ringing the doorbell.
322 * This function will return the number of EQEs that were popped.
325 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
327 uint32_t released = 0;
328 struct lpfc_eqe *temp_eqe;
329 struct lpfc_register doorbell;
331 /* sanity check on queue memory */
335 /* while there are valid entries */
336 while (q->hba_index != q->host_index) {
337 temp_eqe = q->qe[q->host_index].eqe;
338 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
340 q->host_index = ((q->host_index + 1) % q->entry_count);
342 if (unlikely(released == 0 && !arm))
345 /* ring doorbell for number popped */
348 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
349 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
351 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
352 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
353 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
354 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
355 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
356 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
357 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
358 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
359 readl(q->phba->sli4_hba.EQCQDBregaddr);
364 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
365 * @q: The Completion Queue to get the first valid CQE from
367 * This routine will get the first valid Completion Queue Entry from @q, update
368 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
369 * the Queue (no more work to do), or the Queue is full of CQEs that have been
370 * processed, but not popped back to the HBA then this routine will return NULL.
372 static struct lpfc_cqe *
373 lpfc_sli4_cq_get(struct lpfc_queue *q)
375 struct lpfc_cqe *cqe;
378 /* sanity check on queue memory */
382 /* If the next CQE is not valid then we are done */
383 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
385 /* If the host has not yet processed the next entry then we are done */
386 idx = ((q->hba_index + 1) % q->entry_count);
387 if (idx == q->host_index)
390 cqe = q->qe[q->hba_index].cqe;
394 * insert barrier for instruction interlock : data from the hardware
395 * must have the valid bit checked before it can be copied and acted
396 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
397 * instructions allowing action on content before valid bit checked,
398 * add barrier here as well. May not be needed as "content" is a
399 * single 32-bit entity here (vs multi word structure for cq's).
406 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
407 * @q: The Completion Queue that the host has completed processing for.
408 * @arm: Indicates whether the host wants to arms this CQ.
410 * This routine will mark all Completion queue entries on @q, from the last
411 * known completed entry to the last entry that was processed, as completed
412 * by clearing the valid bit for each completion queue entry. Then it will
413 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
414 * The internal host index in the @q will be updated by this routine to indicate
415 * that the host has finished processing the entries. The @arm parameter
416 * indicates that the queue should be rearmed when ringing the doorbell.
418 * This function will return the number of CQEs that were released.
421 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
423 uint32_t released = 0;
424 struct lpfc_cqe *temp_qe;
425 struct lpfc_register doorbell;
427 /* sanity check on queue memory */
430 /* while there are valid entries */
431 while (q->hba_index != q->host_index) {
432 temp_qe = q->qe[q->host_index].cqe;
433 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
435 q->host_index = ((q->host_index + 1) % q->entry_count);
437 if (unlikely(released == 0 && !arm))
440 /* ring doorbell for number popped */
443 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
444 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
445 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
446 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
447 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
448 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
454 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
455 * @q: The Header Receive Queue to operate on.
456 * @wqe: The Receive Queue Entry to put on the Receive queue.
458 * This routine will copy the contents of @wqe to the next available entry on
459 * the @q. This function will then ring the Receive Queue Doorbell to signal the
460 * HBA to start processing the Receive Queue Entry. This function returns the
461 * index that the rqe was copied to if successful. If no entries are available
462 * on @q then this function will return -ENOMEM.
463 * The caller is expected to hold the hbalock when calling this routine.
466 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
467 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
469 struct lpfc_rqe *temp_hrqe;
470 struct lpfc_rqe *temp_drqe;
471 struct lpfc_register doorbell;
474 /* sanity check on queue memory */
475 if (unlikely(!hq) || unlikely(!dq))
477 put_index = hq->host_index;
478 temp_hrqe = hq->qe[hq->host_index].rqe;
479 temp_drqe = dq->qe[dq->host_index].rqe;
481 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
483 if (hq->host_index != dq->host_index)
485 /* If the host has not yet processed the next entry then we are done */
486 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
488 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
489 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
491 /* Update the host index to point to the next slot */
492 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
493 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
495 /* Ring The Header Receive Queue Doorbell */
496 if (!(hq->host_index % hq->entry_repost)) {
498 if (hq->db_format == LPFC_DB_RING_FORMAT) {
499 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
501 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
502 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
503 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
505 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
507 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
511 writel(doorbell.word0, hq->db_regaddr);
517 * lpfc_sli4_rq_release - Updates internal hba index for RQ
518 * @q: The Header Receive Queue to operate on.
520 * This routine will update the HBA index of a queue to reflect consumption of
521 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
522 * consumed an entry the host calls this function to update the queue's
523 * internal pointers. This routine returns the number of entries that were
524 * consumed by the HBA.
527 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
529 /* sanity check on queue memory */
530 if (unlikely(!hq) || unlikely(!dq))
533 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
535 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
536 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
541 * lpfc_cmd_iocb - Get next command iocb entry in the ring
542 * @phba: Pointer to HBA context object.
543 * @pring: Pointer to driver SLI ring object.
545 * This function returns pointer to next command iocb entry
546 * in the command ring. The caller must hold hbalock to prevent
547 * other threads consume the next command iocb.
548 * SLI-2/SLI-3 provide different sized iocbs.
550 static inline IOCB_t *
551 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
553 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
554 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
558 * lpfc_resp_iocb - Get next response iocb entry in the ring
559 * @phba: Pointer to HBA context object.
560 * @pring: Pointer to driver SLI ring object.
562 * This function returns pointer to next response iocb entry
563 * in the response ring. The caller must hold hbalock to make sure
564 * that no other thread consume the next response iocb.
565 * SLI-2/SLI-3 provide different sized iocbs.
567 static inline IOCB_t *
568 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
570 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
571 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
575 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
576 * @phba: Pointer to HBA context object.
578 * This function is called with hbalock held. This function
579 * allocates a new driver iocb object from the iocb pool. If the
580 * allocation is successful, it returns pointer to the newly
581 * allocated iocb object else it returns NULL.
584 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
586 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
587 struct lpfc_iocbq * iocbq = NULL;
589 lockdep_assert_held(&phba->hbalock);
591 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
594 if (phba->iocb_cnt > phba->iocb_max)
595 phba->iocb_max = phba->iocb_cnt;
600 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
601 * @phba: Pointer to HBA context object.
602 * @xritag: XRI value.
604 * This function clears the sglq pointer from the array of acive
605 * sglq's. The xritag that is passed in is used to index into the
606 * array. Before the xritag can be used it needs to be adjusted
607 * by subtracting the xribase.
609 * Returns sglq ponter = success, NULL = Failure.
612 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
614 struct lpfc_sglq *sglq;
616 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
617 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
622 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
623 * @phba: Pointer to HBA context object.
624 * @xritag: XRI value.
626 * This function returns the sglq pointer from the array of acive
627 * sglq's. The xritag that is passed in is used to index into the
628 * array. Before the xritag can be used it needs to be adjusted
629 * by subtracting the xribase.
631 * Returns sglq ponter = success, NULL = Failure.
634 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
636 struct lpfc_sglq *sglq;
638 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
643 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
644 * @phba: Pointer to HBA context object.
645 * @xritag: xri used in this exchange.
646 * @rrq: The RRQ to be cleared.
650 lpfc_clr_rrq_active(struct lpfc_hba *phba,
652 struct lpfc_node_rrq *rrq)
654 struct lpfc_nodelist *ndlp = NULL;
656 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
657 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
659 /* The target DID could have been swapped (cable swap)
660 * we should use the ndlp from the findnode if it is
663 if ((!ndlp) && rrq->ndlp)
669 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
672 rrq->rrq_stop_time = 0;
675 mempool_free(rrq, phba->rrq_pool);
679 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
680 * @phba: Pointer to HBA context object.
682 * This function is called with hbalock held. This function
683 * Checks if stop_time (ratov from setting rrq active) has
684 * been reached, if it has and the send_rrq flag is set then
685 * it will call lpfc_send_rrq. If the send_rrq flag is not set
686 * then it will just call the routine to clear the rrq and
687 * free the rrq resource.
688 * The timer is set to the next rrq that is going to expire before
689 * leaving the routine.
693 lpfc_handle_rrq_active(struct lpfc_hba *phba)
695 struct lpfc_node_rrq *rrq;
696 struct lpfc_node_rrq *nextrrq;
697 unsigned long next_time;
698 unsigned long iflags;
701 spin_lock_irqsave(&phba->hbalock, iflags);
702 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
703 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
704 list_for_each_entry_safe(rrq, nextrrq,
705 &phba->active_rrq_list, list) {
706 if (time_after(jiffies, rrq->rrq_stop_time))
707 list_move(&rrq->list, &send_rrq);
708 else if (time_before(rrq->rrq_stop_time, next_time))
709 next_time = rrq->rrq_stop_time;
711 spin_unlock_irqrestore(&phba->hbalock, iflags);
712 if ((!list_empty(&phba->active_rrq_list)) &&
713 (!(phba->pport->load_flag & FC_UNLOADING)))
714 mod_timer(&phba->rrq_tmr, next_time);
715 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
716 list_del(&rrq->list);
718 /* this call will free the rrq */
719 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
720 else if (lpfc_send_rrq(phba, rrq)) {
721 /* if we send the rrq then the completion handler
722 * will clear the bit in the xribitmap.
724 lpfc_clr_rrq_active(phba, rrq->xritag,
731 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
732 * @vport: Pointer to vport context object.
733 * @xri: The xri used in the exchange.
734 * @did: The targets DID for this exchange.
736 * returns NULL = rrq not found in the phba->active_rrq_list.
737 * rrq = rrq for this xri and target.
739 struct lpfc_node_rrq *
740 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
742 struct lpfc_hba *phba = vport->phba;
743 struct lpfc_node_rrq *rrq;
744 struct lpfc_node_rrq *nextrrq;
745 unsigned long iflags;
747 if (phba->sli_rev != LPFC_SLI_REV4)
749 spin_lock_irqsave(&phba->hbalock, iflags);
750 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
751 if (rrq->vport == vport && rrq->xritag == xri &&
752 rrq->nlp_DID == did){
753 list_del(&rrq->list);
754 spin_unlock_irqrestore(&phba->hbalock, iflags);
758 spin_unlock_irqrestore(&phba->hbalock, iflags);
763 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
764 * @vport: Pointer to vport context object.
765 * @ndlp: Pointer to the lpfc_node_list structure.
766 * If ndlp is NULL Remove all active RRQs for this vport from the
767 * phba->active_rrq_list and clear the rrq.
768 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
771 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
774 struct lpfc_hba *phba = vport->phba;
775 struct lpfc_node_rrq *rrq;
776 struct lpfc_node_rrq *nextrrq;
777 unsigned long iflags;
780 if (phba->sli_rev != LPFC_SLI_REV4)
783 lpfc_sli4_vport_delete_els_xri_aborted(vport);
784 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
786 spin_lock_irqsave(&phba->hbalock, iflags);
787 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
788 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
789 list_move(&rrq->list, &rrq_list);
790 spin_unlock_irqrestore(&phba->hbalock, iflags);
792 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
793 list_del(&rrq->list);
794 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
799 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
800 * @phba: Pointer to HBA context object.
801 * @ndlp: Targets nodelist pointer for this exchange.
802 * @xritag the xri in the bitmap to test.
804 * This function is called with hbalock held. This function
805 * returns 0 = rrq not active for this xri
806 * 1 = rrq is valid for this xri.
809 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
812 lockdep_assert_held(&phba->hbalock);
815 if (!ndlp->active_rrqs_xri_bitmap)
817 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
824 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
825 * @phba: Pointer to HBA context object.
826 * @ndlp: nodelist pointer for this target.
827 * @xritag: xri used in this exchange.
828 * @rxid: Remote Exchange ID.
829 * @send_rrq: Flag used to determine if we should send rrq els cmd.
831 * This function takes the hbalock.
832 * The active bit is always set in the active rrq xri_bitmap even
833 * if there is no slot avaiable for the other rrq information.
835 * returns 0 rrq actived for this xri
836 * < 0 No memory or invalid ndlp.
839 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
840 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
842 unsigned long iflags;
843 struct lpfc_node_rrq *rrq;
849 if (!phba->cfg_enable_rrq)
852 spin_lock_irqsave(&phba->hbalock, iflags);
853 if (phba->pport->load_flag & FC_UNLOADING) {
854 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
859 * set the active bit even if there is no mem available.
861 if (NLP_CHK_FREE_REQ(ndlp))
864 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
867 if (!ndlp->active_rrqs_xri_bitmap)
870 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
873 spin_unlock_irqrestore(&phba->hbalock, iflags);
874 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
876 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
877 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
878 " DID:0x%x Send:%d\n",
879 xritag, rxid, ndlp->nlp_DID, send_rrq);
882 if (phba->cfg_enable_rrq == 1)
883 rrq->send_rrq = send_rrq;
886 rrq->xritag = xritag;
887 rrq->rrq_stop_time = jiffies +
888 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
890 rrq->nlp_DID = ndlp->nlp_DID;
891 rrq->vport = ndlp->vport;
893 spin_lock_irqsave(&phba->hbalock, iflags);
894 empty = list_empty(&phba->active_rrq_list);
895 list_add_tail(&rrq->list, &phba->active_rrq_list);
896 phba->hba_flag |= HBA_RRQ_ACTIVE;
898 lpfc_worker_wake_up(phba);
899 spin_unlock_irqrestore(&phba->hbalock, iflags);
902 spin_unlock_irqrestore(&phba->hbalock, iflags);
903 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
904 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
905 " DID:0x%x Send:%d\n",
906 xritag, rxid, ndlp->nlp_DID, send_rrq);
911 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
912 * @phba: Pointer to HBA context object.
913 * @piocb: Pointer to the iocbq.
915 * This function is called with the ring lock held. This function
916 * gets a new driver sglq object from the sglq list. If the
917 * list is not empty then it is successful, it returns pointer to the newly
918 * allocated sglq object else it returns NULL.
920 static struct lpfc_sglq *
921 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
923 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
924 struct lpfc_sglq *sglq = NULL;
925 struct lpfc_sglq *start_sglq = NULL;
926 struct lpfc_scsi_buf *lpfc_cmd;
927 struct lpfc_nodelist *ndlp;
930 lockdep_assert_held(&phba->hbalock);
932 if (piocbq->iocb_flag & LPFC_IO_FCP) {
933 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
934 ndlp = lpfc_cmd->rdata->pnode;
935 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
936 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
937 ndlp = piocbq->context_un.ndlp;
938 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
939 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
942 ndlp = piocbq->context_un.ndlp;
944 ndlp = piocbq->context1;
947 spin_lock(&phba->sli4_hba.sgl_list_lock);
948 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
953 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
954 test_bit(sglq->sli4_lxritag,
955 ndlp->active_rrqs_xri_bitmap)) {
956 /* This xri has an rrq outstanding for this DID.
957 * put it back in the list and get another xri.
959 list_add_tail(&sglq->list, lpfc_els_sgl_list);
961 list_remove_head(lpfc_els_sgl_list, sglq,
962 struct lpfc_sglq, list);
963 if (sglq == start_sglq) {
971 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
972 sglq->state = SGL_ALLOCATED;
974 spin_unlock(&phba->sli4_hba.sgl_list_lock);
979 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
980 * @phba: Pointer to HBA context object.
982 * This function is called with no lock held. This function
983 * allocates a new driver iocb object from the iocb pool. If the
984 * allocation is successful, it returns pointer to the newly
985 * allocated iocb object else it returns NULL.
988 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
990 struct lpfc_iocbq * iocbq = NULL;
991 unsigned long iflags;
993 spin_lock_irqsave(&phba->hbalock, iflags);
994 iocbq = __lpfc_sli_get_iocbq(phba);
995 spin_unlock_irqrestore(&phba->hbalock, iflags);
1000 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1001 * @phba: Pointer to HBA context object.
1002 * @iocbq: Pointer to driver iocb object.
1004 * This function is called with hbalock held to release driver
1005 * iocb object to the iocb pool. The iotag in the iocb object
1006 * does not change for each use of the iocb object. This function
1007 * clears all other fields of the iocb object when it is freed.
1008 * The sqlq structure that holds the xritag and phys and virtual
1009 * mappings for the scatter gather list is retrieved from the
1010 * active array of sglq. The get of the sglq pointer also clears
1011 * the entry in the array. If the status of the IO indiactes that
1012 * this IO was aborted then the sglq entry it put on the
1013 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1014 * IO has good status or fails for any other reason then the sglq
1015 * entry is added to the free list (lpfc_els_sgl_list).
1018 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1020 struct lpfc_sglq *sglq;
1021 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1022 unsigned long iflag = 0;
1023 struct lpfc_sli_ring *pring;
1025 lockdep_assert_held(&phba->hbalock);
1027 if (iocbq->sli4_xritag == NO_XRI)
1030 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1034 pring = phba->sli4_hba.els_wq->pring;
1035 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1036 (sglq->state != SGL_XRI_ABORTED)) {
1037 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1039 list_add(&sglq->list,
1040 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1041 spin_unlock_irqrestore(
1042 &phba->sli4_hba.sgl_list_lock, iflag);
1044 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1046 sglq->state = SGL_FREED;
1048 list_add_tail(&sglq->list,
1049 &phba->sli4_hba.lpfc_els_sgl_list);
1050 spin_unlock_irqrestore(
1051 &phba->sli4_hba.sgl_list_lock, iflag);
1053 /* Check if TXQ queue needs to be serviced */
1054 if (!list_empty(&pring->txq))
1055 lpfc_worker_wake_up(phba);
1060 * Clean all volatile data fields, preserve iotag and node struct.
1062 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1063 iocbq->sli4_lxritag = NO_XRI;
1064 iocbq->sli4_xritag = NO_XRI;
1065 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVME_LS);
1066 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1071 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1072 * @phba: Pointer to HBA context object.
1073 * @iocbq: Pointer to driver iocb object.
1075 * This function is called with hbalock held to release driver
1076 * iocb object to the iocb pool. The iotag in the iocb object
1077 * does not change for each use of the iocb object. This function
1078 * clears all other fields of the iocb object when it is freed.
1081 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1083 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1085 lockdep_assert_held(&phba->hbalock);
1088 * Clean all volatile data fields, preserve iotag and node struct.
1090 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1091 iocbq->sli4_xritag = NO_XRI;
1092 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1096 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1097 * @phba: Pointer to HBA context object.
1098 * @iocbq: Pointer to driver iocb object.
1100 * This function is called with hbalock held to release driver
1101 * iocb object to the iocb pool. The iotag in the iocb object
1102 * does not change for each use of the iocb object. This function
1103 * clears all other fields of the iocb object when it is freed.
1106 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1108 lockdep_assert_held(&phba->hbalock);
1110 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1115 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1116 * @phba: Pointer to HBA context object.
1117 * @iocbq: Pointer to driver iocb object.
1119 * This function is called with no lock held to release the iocb to
1123 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1125 unsigned long iflags;
1128 * Clean all volatile data fields, preserve iotag and node struct.
1130 spin_lock_irqsave(&phba->hbalock, iflags);
1131 __lpfc_sli_release_iocbq(phba, iocbq);
1132 spin_unlock_irqrestore(&phba->hbalock, iflags);
1136 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1137 * @phba: Pointer to HBA context object.
1138 * @iocblist: List of IOCBs.
1139 * @ulpstatus: ULP status in IOCB command field.
1140 * @ulpWord4: ULP word-4 in IOCB command field.
1142 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1143 * on the list by invoking the complete callback function associated with the
1144 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1148 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1149 uint32_t ulpstatus, uint32_t ulpWord4)
1151 struct lpfc_iocbq *piocb;
1153 while (!list_empty(iocblist)) {
1154 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1155 if (!piocb->iocb_cmpl)
1156 lpfc_sli_release_iocbq(phba, piocb);
1158 piocb->iocb.ulpStatus = ulpstatus;
1159 piocb->iocb.un.ulpWord[4] = ulpWord4;
1160 (piocb->iocb_cmpl) (phba, piocb, piocb);
1167 * lpfc_sli_iocb_cmd_type - Get the iocb type
1168 * @iocb_cmnd: iocb command code.
1170 * This function is called by ring event handler function to get the iocb type.
1171 * This function translates the iocb command to an iocb command type used to
1172 * decide the final disposition of each completed IOCB.
1173 * The function returns
1174 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1175 * LPFC_SOL_IOCB if it is a solicited iocb completion
1176 * LPFC_ABORT_IOCB if it is an abort iocb
1177 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1179 * The caller is not required to hold any lock.
1181 static lpfc_iocb_type
1182 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1184 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1186 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1189 switch (iocb_cmnd) {
1190 case CMD_XMIT_SEQUENCE_CR:
1191 case CMD_XMIT_SEQUENCE_CX:
1192 case CMD_XMIT_BCAST_CN:
1193 case CMD_XMIT_BCAST_CX:
1194 case CMD_ELS_REQUEST_CR:
1195 case CMD_ELS_REQUEST_CX:
1196 case CMD_CREATE_XRI_CR:
1197 case CMD_CREATE_XRI_CX:
1198 case CMD_GET_RPI_CN:
1199 case CMD_XMIT_ELS_RSP_CX:
1200 case CMD_GET_RPI_CR:
1201 case CMD_FCP_IWRITE_CR:
1202 case CMD_FCP_IWRITE_CX:
1203 case CMD_FCP_IREAD_CR:
1204 case CMD_FCP_IREAD_CX:
1205 case CMD_FCP_ICMND_CR:
1206 case CMD_FCP_ICMND_CX:
1207 case CMD_FCP_TSEND_CX:
1208 case CMD_FCP_TRSP_CX:
1209 case CMD_FCP_TRECEIVE_CX:
1210 case CMD_FCP_AUTO_TRSP_CX:
1211 case CMD_ADAPTER_MSG:
1212 case CMD_ADAPTER_DUMP:
1213 case CMD_XMIT_SEQUENCE64_CR:
1214 case CMD_XMIT_SEQUENCE64_CX:
1215 case CMD_XMIT_BCAST64_CN:
1216 case CMD_XMIT_BCAST64_CX:
1217 case CMD_ELS_REQUEST64_CR:
1218 case CMD_ELS_REQUEST64_CX:
1219 case CMD_FCP_IWRITE64_CR:
1220 case CMD_FCP_IWRITE64_CX:
1221 case CMD_FCP_IREAD64_CR:
1222 case CMD_FCP_IREAD64_CX:
1223 case CMD_FCP_ICMND64_CR:
1224 case CMD_FCP_ICMND64_CX:
1225 case CMD_FCP_TSEND64_CX:
1226 case CMD_FCP_TRSP64_CX:
1227 case CMD_FCP_TRECEIVE64_CX:
1228 case CMD_GEN_REQUEST64_CR:
1229 case CMD_GEN_REQUEST64_CX:
1230 case CMD_XMIT_ELS_RSP64_CX:
1231 case DSSCMD_IWRITE64_CR:
1232 case DSSCMD_IWRITE64_CX:
1233 case DSSCMD_IREAD64_CR:
1234 case DSSCMD_IREAD64_CX:
1235 type = LPFC_SOL_IOCB;
1237 case CMD_ABORT_XRI_CN:
1238 case CMD_ABORT_XRI_CX:
1239 case CMD_CLOSE_XRI_CN:
1240 case CMD_CLOSE_XRI_CX:
1241 case CMD_XRI_ABORTED_CX:
1242 case CMD_ABORT_MXRI64_CN:
1243 case CMD_XMIT_BLS_RSP64_CX:
1244 type = LPFC_ABORT_IOCB;
1246 case CMD_RCV_SEQUENCE_CX:
1247 case CMD_RCV_ELS_REQ_CX:
1248 case CMD_RCV_SEQUENCE64_CX:
1249 case CMD_RCV_ELS_REQ64_CX:
1250 case CMD_ASYNC_STATUS:
1251 case CMD_IOCB_RCV_SEQ64_CX:
1252 case CMD_IOCB_RCV_ELS64_CX:
1253 case CMD_IOCB_RCV_CONT64_CX:
1254 case CMD_IOCB_RET_XRI64_CX:
1255 type = LPFC_UNSOL_IOCB;
1257 case CMD_IOCB_XMIT_MSEQ64_CR:
1258 case CMD_IOCB_XMIT_MSEQ64_CX:
1259 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1260 case CMD_IOCB_RCV_ELS_LIST64_CX:
1261 case CMD_IOCB_CLOSE_EXTENDED_CN:
1262 case CMD_IOCB_ABORT_EXTENDED_CN:
1263 case CMD_IOCB_RET_HBQE64_CN:
1264 case CMD_IOCB_FCP_IBIDIR64_CR:
1265 case CMD_IOCB_FCP_IBIDIR64_CX:
1266 case CMD_IOCB_FCP_ITASKMGT64_CX:
1267 case CMD_IOCB_LOGENTRY_CN:
1268 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1269 printk("%s - Unhandled SLI-3 Command x%x\n",
1270 __func__, iocb_cmnd);
1271 type = LPFC_UNKNOWN_IOCB;
1274 type = LPFC_UNKNOWN_IOCB;
1282 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1283 * @phba: Pointer to HBA context object.
1285 * This function is called from SLI initialization code
1286 * to configure every ring of the HBA's SLI interface. The
1287 * caller is not required to hold any lock. This function issues
1288 * a config_ring mailbox command for each ring.
1289 * This function returns zero if successful else returns a negative
1293 lpfc_sli_ring_map(struct lpfc_hba *phba)
1295 struct lpfc_sli *psli = &phba->sli;
1300 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1304 phba->link_state = LPFC_INIT_MBX_CMDS;
1305 for (i = 0; i < psli->num_rings; i++) {
1306 lpfc_config_ring(phba, i, pmb);
1307 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1308 if (rc != MBX_SUCCESS) {
1309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1310 "0446 Adapter failed to init (%d), "
1311 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1313 rc, pmbox->mbxCommand,
1314 pmbox->mbxStatus, i);
1315 phba->link_state = LPFC_HBA_ERROR;
1320 mempool_free(pmb, phba->mbox_mem_pool);
1325 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1326 * @phba: Pointer to HBA context object.
1327 * @pring: Pointer to driver SLI ring object.
1328 * @piocb: Pointer to the driver iocb object.
1330 * This function is called with hbalock held. The function adds the
1331 * new iocb to txcmplq of the given ring. This function always returns
1332 * 0. If this function is called for ELS ring, this function checks if
1333 * there is a vport associated with the ELS command. This function also
1334 * starts els_tmofunc timer if this is an ELS command.
1337 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1338 struct lpfc_iocbq *piocb)
1340 lockdep_assert_held(&phba->hbalock);
1344 list_add_tail(&piocb->list, &pring->txcmplq);
1345 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1347 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1348 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1349 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1350 BUG_ON(!piocb->vport);
1351 if (!(piocb->vport->load_flag & FC_UNLOADING))
1352 mod_timer(&piocb->vport->els_tmofunc,
1354 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1361 * lpfc_sli_ringtx_get - Get first element of the txq
1362 * @phba: Pointer to HBA context object.
1363 * @pring: Pointer to driver SLI ring object.
1365 * This function is called with hbalock held to get next
1366 * iocb in txq of the given ring. If there is any iocb in
1367 * the txq, the function returns first iocb in the list after
1368 * removing the iocb from the list, else it returns NULL.
1371 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1373 struct lpfc_iocbq *cmd_iocb;
1375 lockdep_assert_held(&phba->hbalock);
1377 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1382 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1383 * @phba: Pointer to HBA context object.
1384 * @pring: Pointer to driver SLI ring object.
1386 * This function is called with hbalock held and the caller must post the
1387 * iocb without releasing the lock. If the caller releases the lock,
1388 * iocb slot returned by the function is not guaranteed to be available.
1389 * The function returns pointer to the next available iocb slot if there
1390 * is available slot in the ring, else it returns NULL.
1391 * If the get index of the ring is ahead of the put index, the function
1392 * will post an error attention event to the worker thread to take the
1393 * HBA to offline state.
1396 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1398 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1399 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1401 lockdep_assert_held(&phba->hbalock);
1403 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1404 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1405 pring->sli.sli3.next_cmdidx = 0;
1407 if (unlikely(pring->sli.sli3.local_getidx ==
1408 pring->sli.sli3.next_cmdidx)) {
1410 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1412 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1414 "0315 Ring %d issue: portCmdGet %d "
1415 "is bigger than cmd ring %d\n",
1417 pring->sli.sli3.local_getidx,
1420 phba->link_state = LPFC_HBA_ERROR;
1422 * All error attention handlers are posted to
1425 phba->work_ha |= HA_ERATT;
1426 phba->work_hs = HS_FFER3;
1428 lpfc_worker_wake_up(phba);
1433 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1437 return lpfc_cmd_iocb(phba, pring);
1441 * lpfc_sli_next_iotag - Get an iotag for the iocb
1442 * @phba: Pointer to HBA context object.
1443 * @iocbq: Pointer to driver iocb object.
1445 * This function gets an iotag for the iocb. If there is no unused iotag and
1446 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1447 * array and assigns a new iotag.
1448 * The function returns the allocated iotag if successful, else returns zero.
1449 * Zero is not a valid iotag.
1450 * The caller is not required to hold any lock.
1453 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1455 struct lpfc_iocbq **new_arr;
1456 struct lpfc_iocbq **old_arr;
1458 struct lpfc_sli *psli = &phba->sli;
1461 spin_lock_irq(&phba->hbalock);
1462 iotag = psli->last_iotag;
1463 if(++iotag < psli->iocbq_lookup_len) {
1464 psli->last_iotag = iotag;
1465 psli->iocbq_lookup[iotag] = iocbq;
1466 spin_unlock_irq(&phba->hbalock);
1467 iocbq->iotag = iotag;
1469 } else if (psli->iocbq_lookup_len < (0xffff
1470 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1471 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1472 spin_unlock_irq(&phba->hbalock);
1473 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1476 spin_lock_irq(&phba->hbalock);
1477 old_arr = psli->iocbq_lookup;
1478 if (new_len <= psli->iocbq_lookup_len) {
1479 /* highly unprobable case */
1481 iotag = psli->last_iotag;
1482 if(++iotag < psli->iocbq_lookup_len) {
1483 psli->last_iotag = iotag;
1484 psli->iocbq_lookup[iotag] = iocbq;
1485 spin_unlock_irq(&phba->hbalock);
1486 iocbq->iotag = iotag;
1489 spin_unlock_irq(&phba->hbalock);
1492 if (psli->iocbq_lookup)
1493 memcpy(new_arr, old_arr,
1494 ((psli->last_iotag + 1) *
1495 sizeof (struct lpfc_iocbq *)));
1496 psli->iocbq_lookup = new_arr;
1497 psli->iocbq_lookup_len = new_len;
1498 psli->last_iotag = iotag;
1499 psli->iocbq_lookup[iotag] = iocbq;
1500 spin_unlock_irq(&phba->hbalock);
1501 iocbq->iotag = iotag;
1506 spin_unlock_irq(&phba->hbalock);
1508 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1509 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1516 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1517 * @phba: Pointer to HBA context object.
1518 * @pring: Pointer to driver SLI ring object.
1519 * @iocb: Pointer to iocb slot in the ring.
1520 * @nextiocb: Pointer to driver iocb object which need to be
1521 * posted to firmware.
1523 * This function is called with hbalock held to post a new iocb to
1524 * the firmware. This function copies the new iocb to ring iocb slot and
1525 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1526 * a completion call back for this iocb else the function will free the
1530 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1531 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1533 lockdep_assert_held(&phba->hbalock);
1537 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1540 if (pring->ringno == LPFC_ELS_RING) {
1541 lpfc_debugfs_slow_ring_trc(phba,
1542 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1543 *(((uint32_t *) &nextiocb->iocb) + 4),
1544 *(((uint32_t *) &nextiocb->iocb) + 6),
1545 *(((uint32_t *) &nextiocb->iocb) + 7));
1549 * Issue iocb command to adapter
1551 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1553 pring->stats.iocb_cmd++;
1556 * If there is no completion routine to call, we can release the
1557 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1558 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1560 if (nextiocb->iocb_cmpl)
1561 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1563 __lpfc_sli_release_iocbq(phba, nextiocb);
1566 * Let the HBA know what IOCB slot will be the next one the
1567 * driver will put a command into.
1569 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1570 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1574 * lpfc_sli_update_full_ring - Update the chip attention register
1575 * @phba: Pointer to HBA context object.
1576 * @pring: Pointer to driver SLI ring object.
1578 * The caller is not required to hold any lock for calling this function.
1579 * This function updates the chip attention bits for the ring to inform firmware
1580 * that there are pending work to be done for this ring and requests an
1581 * interrupt when there is space available in the ring. This function is
1582 * called when the driver is unable to post more iocbs to the ring due
1583 * to unavailability of space in the ring.
1586 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1588 int ringno = pring->ringno;
1590 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1595 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1596 * The HBA will tell us when an IOCB entry is available.
1598 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1599 readl(phba->CAregaddr); /* flush */
1601 pring->stats.iocb_cmd_full++;
1605 * lpfc_sli_update_ring - Update chip attention register
1606 * @phba: Pointer to HBA context object.
1607 * @pring: Pointer to driver SLI ring object.
1609 * This function updates the chip attention register bit for the
1610 * given ring to inform HBA that there is more work to be done
1611 * in this ring. The caller is not required to hold any lock.
1614 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1616 int ringno = pring->ringno;
1619 * Tell the HBA that there is work to do in this ring.
1621 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1623 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1624 readl(phba->CAregaddr); /* flush */
1629 * lpfc_sli_resume_iocb - Process iocbs in the txq
1630 * @phba: Pointer to HBA context object.
1631 * @pring: Pointer to driver SLI ring object.
1633 * This function is called with hbalock held to post pending iocbs
1634 * in the txq to the firmware. This function is called when driver
1635 * detects space available in the ring.
1638 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1641 struct lpfc_iocbq *nextiocb;
1643 lockdep_assert_held(&phba->hbalock);
1647 * (a) there is anything on the txq to send
1649 * (c) link attention events can be processed (fcp ring only)
1650 * (d) IOCB processing is not blocked by the outstanding mbox command.
1653 if (lpfc_is_link_up(phba) &&
1654 (!list_empty(&pring->txq)) &&
1655 (pring->ringno != LPFC_FCP_RING ||
1656 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1658 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1659 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1660 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1663 lpfc_sli_update_ring(phba, pring);
1665 lpfc_sli_update_full_ring(phba, pring);
1672 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1673 * @phba: Pointer to HBA context object.
1674 * @hbqno: HBQ number.
1676 * This function is called with hbalock held to get the next
1677 * available slot for the given HBQ. If there is free slot
1678 * available for the HBQ it will return pointer to the next available
1679 * HBQ entry else it will return NULL.
1681 static struct lpfc_hbq_entry *
1682 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1684 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1686 lockdep_assert_held(&phba->hbalock);
1688 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1689 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1690 hbqp->next_hbqPutIdx = 0;
1692 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1693 uint32_t raw_index = phba->hbq_get[hbqno];
1694 uint32_t getidx = le32_to_cpu(raw_index);
1696 hbqp->local_hbqGetIdx = getidx;
1698 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1699 lpfc_printf_log(phba, KERN_ERR,
1700 LOG_SLI | LOG_VPORT,
1701 "1802 HBQ %d: local_hbqGetIdx "
1702 "%u is > than hbqp->entry_count %u\n",
1703 hbqno, hbqp->local_hbqGetIdx,
1706 phba->link_state = LPFC_HBA_ERROR;
1710 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1714 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1719 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1720 * @phba: Pointer to HBA context object.
1722 * This function is called with no lock held to free all the
1723 * hbq buffers while uninitializing the SLI interface. It also
1724 * frees the HBQ buffers returned by the firmware but not yet
1725 * processed by the upper layers.
1728 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1730 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1731 struct hbq_dmabuf *hbq_buf;
1732 unsigned long flags;
1735 hbq_count = lpfc_sli_hbq_count();
1736 /* Return all memory used by all HBQs */
1737 spin_lock_irqsave(&phba->hbalock, flags);
1738 for (i = 0; i < hbq_count; ++i) {
1739 list_for_each_entry_safe(dmabuf, next_dmabuf,
1740 &phba->hbqs[i].hbq_buffer_list, list) {
1741 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1742 list_del(&hbq_buf->dbuf.list);
1743 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1745 phba->hbqs[i].buffer_count = 0;
1748 /* Mark the HBQs not in use */
1749 phba->hbq_in_use = 0;
1750 spin_unlock_irqrestore(&phba->hbalock, flags);
1754 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1755 * @phba: Pointer to HBA context object.
1756 * @hbqno: HBQ number.
1757 * @hbq_buf: Pointer to HBQ buffer.
1759 * This function is called with the hbalock held to post a
1760 * hbq buffer to the firmware. If the function finds an empty
1761 * slot in the HBQ, it will post the buffer. The function will return
1762 * pointer to the hbq entry if it successfully post the buffer
1763 * else it will return NULL.
1766 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1767 struct hbq_dmabuf *hbq_buf)
1769 lockdep_assert_held(&phba->hbalock);
1770 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1774 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1775 * @phba: Pointer to HBA context object.
1776 * @hbqno: HBQ number.
1777 * @hbq_buf: Pointer to HBQ buffer.
1779 * This function is called with the hbalock held to post a hbq buffer to the
1780 * firmware. If the function finds an empty slot in the HBQ, it will post the
1781 * buffer and place it on the hbq_buffer_list. The function will return zero if
1782 * it successfully post the buffer else it will return an error.
1785 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1786 struct hbq_dmabuf *hbq_buf)
1788 struct lpfc_hbq_entry *hbqe;
1789 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1791 lockdep_assert_held(&phba->hbalock);
1792 /* Get next HBQ entry slot to use */
1793 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1795 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1797 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1798 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1799 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
1800 hbqe->bde.tus.f.bdeFlags = 0;
1801 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1802 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1804 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1805 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1807 readl(phba->hbq_put + hbqno);
1808 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1815 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1816 * @phba: Pointer to HBA context object.
1817 * @hbqno: HBQ number.
1818 * @hbq_buf: Pointer to HBQ buffer.
1820 * This function is called with the hbalock held to post an RQE to the SLI4
1821 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1822 * the hbq_buffer_list and return zero, otherwise it will return an error.
1825 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1826 struct hbq_dmabuf *hbq_buf)
1829 struct lpfc_rqe hrqe;
1830 struct lpfc_rqe drqe;
1831 struct lpfc_queue *hrq;
1832 struct lpfc_queue *drq;
1834 if (hbqno != LPFC_ELS_HBQ)
1836 hrq = phba->sli4_hba.hdr_rq;
1837 drq = phba->sli4_hba.dat_rq;
1839 lockdep_assert_held(&phba->hbalock);
1840 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1841 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1842 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1843 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1844 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
1847 hbq_buf->tag = (rc | (hbqno << 16));
1848 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1852 /* HBQ for ELS and CT traffic. */
1853 static struct lpfc_hbq_init lpfc_els_hbq = {
1858 .ring_mask = (1 << LPFC_ELS_RING),
1865 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1870 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1871 * @phba: Pointer to HBA context object.
1872 * @hbqno: HBQ number.
1873 * @count: Number of HBQ buffers to be posted.
1875 * This function is called with no lock held to post more hbq buffers to the
1876 * given HBQ. The function returns the number of HBQ buffers successfully
1880 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1882 uint32_t i, posted = 0;
1883 unsigned long flags;
1884 struct hbq_dmabuf *hbq_buffer;
1885 LIST_HEAD(hbq_buf_list);
1886 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1889 if ((phba->hbqs[hbqno].buffer_count + count) >
1890 lpfc_hbq_defs[hbqno]->entry_count)
1891 count = lpfc_hbq_defs[hbqno]->entry_count -
1892 phba->hbqs[hbqno].buffer_count;
1895 /* Allocate HBQ entries */
1896 for (i = 0; i < count; i++) {
1897 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1900 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1902 /* Check whether HBQ is still in use */
1903 spin_lock_irqsave(&phba->hbalock, flags);
1904 if (!phba->hbq_in_use)
1906 while (!list_empty(&hbq_buf_list)) {
1907 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1909 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1911 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1912 phba->hbqs[hbqno].buffer_count++;
1915 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1917 spin_unlock_irqrestore(&phba->hbalock, flags);
1920 spin_unlock_irqrestore(&phba->hbalock, flags);
1921 while (!list_empty(&hbq_buf_list)) {
1922 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1924 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1930 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1931 * @phba: Pointer to HBA context object.
1934 * This function posts more buffers to the HBQ. This function
1935 * is called with no lock held. The function returns the number of HBQ entries
1936 * successfully allocated.
1939 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1941 if (phba->sli_rev == LPFC_SLI_REV4)
1944 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1945 lpfc_hbq_defs[qno]->add_count);
1949 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1950 * @phba: Pointer to HBA context object.
1951 * @qno: HBQ queue number.
1953 * This function is called from SLI initialization code path with
1954 * no lock held to post initial HBQ buffers to firmware. The
1955 * function returns the number of HBQ entries successfully allocated.
1958 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1960 if (phba->sli_rev == LPFC_SLI_REV4)
1961 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1962 lpfc_hbq_defs[qno]->entry_count);
1964 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1965 lpfc_hbq_defs[qno]->init_count);
1969 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1970 * @phba: Pointer to HBA context object.
1971 * @hbqno: HBQ number.
1973 * This function removes the first hbq buffer on an hbq list and returns a
1974 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1976 static struct hbq_dmabuf *
1977 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1979 struct lpfc_dmabuf *d_buf;
1981 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1984 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1988 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1989 * @phba: Pointer to HBA context object.
1990 * @tag: Tag of the hbq buffer.
1992 * This function searches for the hbq buffer associated with the given tag in
1993 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
1994 * otherwise it returns NULL.
1996 static struct hbq_dmabuf *
1997 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1999 struct lpfc_dmabuf *d_buf;
2000 struct hbq_dmabuf *hbq_buf;
2004 if (hbqno >= LPFC_MAX_HBQS)
2007 spin_lock_irq(&phba->hbalock);
2008 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2009 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2010 if (hbq_buf->tag == tag) {
2011 spin_unlock_irq(&phba->hbalock);
2015 spin_unlock_irq(&phba->hbalock);
2016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2017 "1803 Bad hbq tag. Data: x%x x%x\n",
2018 tag, phba->hbqs[tag >> 16].buffer_count);
2023 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2024 * @phba: Pointer to HBA context object.
2025 * @hbq_buffer: Pointer to HBQ buffer.
2027 * This function is called with hbalock. This function gives back
2028 * the hbq buffer to firmware. If the HBQ does not have space to
2029 * post the buffer, it will free the buffer.
2032 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2037 hbqno = hbq_buffer->tag >> 16;
2038 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2039 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2044 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2045 * @mbxCommand: mailbox command code.
2047 * This function is called by the mailbox event handler function to verify
2048 * that the completed mailbox command is a legitimate mailbox command. If the
2049 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2050 * and the mailbox event handler will take the HBA offline.
2053 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2057 switch (mbxCommand) {
2061 case MBX_WRITE_VPARMS:
2062 case MBX_RUN_BIU_DIAG:
2065 case MBX_CONFIG_LINK:
2066 case MBX_CONFIG_RING:
2067 case MBX_RESET_RING:
2068 case MBX_READ_CONFIG:
2069 case MBX_READ_RCONFIG:
2070 case MBX_READ_SPARM:
2071 case MBX_READ_STATUS:
2075 case MBX_READ_LNK_STAT:
2077 case MBX_UNREG_LOGIN:
2079 case MBX_DUMP_MEMORY:
2080 case MBX_DUMP_CONTEXT:
2083 case MBX_UPDATE_CFG:
2085 case MBX_DEL_LD_ENTRY:
2086 case MBX_RUN_PROGRAM:
2088 case MBX_SET_VARIABLE:
2089 case MBX_UNREG_D_ID:
2090 case MBX_KILL_BOARD:
2091 case MBX_CONFIG_FARP:
2094 case MBX_RUN_BIU_DIAG64:
2095 case MBX_CONFIG_PORT:
2096 case MBX_READ_SPARM64:
2097 case MBX_READ_RPI64:
2098 case MBX_REG_LOGIN64:
2099 case MBX_READ_TOPOLOGY:
2102 case MBX_LOAD_EXP_ROM:
2103 case MBX_ASYNCEVT_ENABLE:
2107 case MBX_PORT_CAPABILITIES:
2108 case MBX_PORT_IOV_CONTROL:
2109 case MBX_SLI4_CONFIG:
2110 case MBX_SLI4_REQ_FTRS:
2112 case MBX_UNREG_FCFI:
2117 case MBX_RESUME_RPI:
2118 case MBX_READ_EVENT_LOG_STATUS:
2119 case MBX_READ_EVENT_LOG:
2120 case MBX_SECURITY_MGMT:
2122 case MBX_ACCESS_VDATA:
2133 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2134 * @phba: Pointer to HBA context object.
2135 * @pmboxq: Pointer to mailbox command.
2137 * This is completion handler function for mailbox commands issued from
2138 * lpfc_sli_issue_mbox_wait function. This function is called by the
2139 * mailbox event handler function with no lock held. This function
2140 * will wake up thread waiting on the wait queue pointed by context1
2144 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2146 wait_queue_head_t *pdone_q;
2147 unsigned long drvr_flag;
2150 * If pdone_q is empty, the driver thread gave up waiting and
2151 * continued running.
2153 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2154 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2155 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2157 wake_up_interruptible(pdone_q);
2158 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2164 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2165 * @phba: Pointer to HBA context object.
2166 * @pmb: Pointer to mailbox object.
2168 * This function is the default mailbox completion handler. It
2169 * frees the memory resources associated with the completed mailbox
2170 * command. If the completed command is a REG_LOGIN mailbox command,
2171 * this function will issue a UREG_LOGIN to re-claim the RPI.
2174 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2176 struct lpfc_vport *vport = pmb->vport;
2177 struct lpfc_dmabuf *mp;
2178 struct lpfc_nodelist *ndlp;
2179 struct Scsi_Host *shost;
2183 mp = (struct lpfc_dmabuf *) (pmb->context1);
2186 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2191 * If a REG_LOGIN succeeded after node is destroyed or node
2192 * is in re-discovery driver need to cleanup the RPI.
2194 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2195 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2196 !pmb->u.mb.mbxStatus) {
2197 rpi = pmb->u.mb.un.varWords[0];
2198 vpi = pmb->u.mb.un.varRegLogin.vpi;
2199 lpfc_unreg_login(phba, vpi, rpi, pmb);
2201 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2202 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2203 if (rc != MBX_NOT_FINISHED)
2207 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2208 !(phba->pport->load_flag & FC_UNLOADING) &&
2209 !pmb->u.mb.mbxStatus) {
2210 shost = lpfc_shost_from_vport(vport);
2211 spin_lock_irq(shost->host_lock);
2212 vport->vpi_state |= LPFC_VPI_REGISTERED;
2213 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2214 spin_unlock_irq(shost->host_lock);
2217 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2218 ndlp = (struct lpfc_nodelist *)pmb->context2;
2220 pmb->context2 = NULL;
2223 /* Check security permission status on INIT_LINK mailbox command */
2224 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2225 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2226 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2227 "2860 SLI authentication is required "
2228 "for INIT_LINK but has not done yet\n");
2230 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2231 lpfc_sli4_mbox_cmd_free(phba, pmb);
2233 mempool_free(pmb, phba->mbox_mem_pool);
2236 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2237 * @phba: Pointer to HBA context object.
2238 * @pmb: Pointer to mailbox object.
2240 * This function is the unreg rpi mailbox completion handler. It
2241 * frees the memory resources associated with the completed mailbox
2242 * command. An additional refrenece is put on the ndlp to prevent
2243 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2244 * the unreg mailbox command completes, this routine puts the
2249 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2251 struct lpfc_vport *vport = pmb->vport;
2252 struct lpfc_nodelist *ndlp;
2254 ndlp = pmb->context1;
2255 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2256 if (phba->sli_rev == LPFC_SLI_REV4 &&
2257 (bf_get(lpfc_sli_intf_if_type,
2258 &phba->sli4_hba.sli_intf) ==
2259 LPFC_SLI_INTF_IF_TYPE_2)) {
2261 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2262 "0010 UNREG_LOGIN vpi:%x "
2263 "rpi:%x DID:%x map:%x %p\n",
2264 vport->vpi, ndlp->nlp_rpi,
2266 ndlp->nlp_usg_map, ndlp);
2267 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2273 mempool_free(pmb, phba->mbox_mem_pool);
2277 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2278 * @phba: Pointer to HBA context object.
2280 * This function is called with no lock held. This function processes all
2281 * the completed mailbox commands and gives it to upper layers. The interrupt
2282 * service routine processes mailbox completion interrupt and adds completed
2283 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2284 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2285 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2286 * function returns the mailbox commands to the upper layer by calling the
2287 * completion handler function of each mailbox.
2290 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2297 phba->sli.slistat.mbox_event++;
2299 /* Get all completed mailboxe buffers into the cmplq */
2300 spin_lock_irq(&phba->hbalock);
2301 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2302 spin_unlock_irq(&phba->hbalock);
2304 /* Get a Mailbox buffer to setup mailbox commands for callback */
2306 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2312 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2314 lpfc_debugfs_disc_trc(pmb->vport,
2315 LPFC_DISC_TRC_MBOX_VPORT,
2316 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2317 (uint32_t)pmbox->mbxCommand,
2318 pmbox->un.varWords[0],
2319 pmbox->un.varWords[1]);
2322 lpfc_debugfs_disc_trc(phba->pport,
2324 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2325 (uint32_t)pmbox->mbxCommand,
2326 pmbox->un.varWords[0],
2327 pmbox->un.varWords[1]);
2332 * It is a fatal error if unknown mbox command completion.
2334 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2336 /* Unknown mailbox command compl */
2337 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2338 "(%d):0323 Unknown Mailbox command "
2339 "x%x (x%x/x%x) Cmpl\n",
2340 pmb->vport ? pmb->vport->vpi : 0,
2342 lpfc_sli_config_mbox_subsys_get(phba,
2344 lpfc_sli_config_mbox_opcode_get(phba,
2346 phba->link_state = LPFC_HBA_ERROR;
2347 phba->work_hs = HS_FFER3;
2348 lpfc_handle_eratt(phba);
2352 if (pmbox->mbxStatus) {
2353 phba->sli.slistat.mbox_stat_err++;
2354 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2355 /* Mbox cmd cmpl error - RETRYing */
2356 lpfc_printf_log(phba, KERN_INFO,
2358 "(%d):0305 Mbox cmd cmpl "
2359 "error - RETRYing Data: x%x "
2360 "(x%x/x%x) x%x x%x x%x\n",
2361 pmb->vport ? pmb->vport->vpi : 0,
2363 lpfc_sli_config_mbox_subsys_get(phba,
2365 lpfc_sli_config_mbox_opcode_get(phba,
2368 pmbox->un.varWords[0],
2369 pmb->vport->port_state);
2370 pmbox->mbxStatus = 0;
2371 pmbox->mbxOwner = OWN_HOST;
2372 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2373 if (rc != MBX_NOT_FINISHED)
2378 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2379 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2380 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2381 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2383 pmb->vport ? pmb->vport->vpi : 0,
2385 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2386 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2388 *((uint32_t *) pmbox),
2389 pmbox->un.varWords[0],
2390 pmbox->un.varWords[1],
2391 pmbox->un.varWords[2],
2392 pmbox->un.varWords[3],
2393 pmbox->un.varWords[4],
2394 pmbox->un.varWords[5],
2395 pmbox->un.varWords[6],
2396 pmbox->un.varWords[7],
2397 pmbox->un.varWords[8],
2398 pmbox->un.varWords[9],
2399 pmbox->un.varWords[10]);
2402 pmb->mbox_cmpl(phba,pmb);
2408 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2409 * @phba: Pointer to HBA context object.
2410 * @pring: Pointer to driver SLI ring object.
2413 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2414 * is set in the tag the buffer is posted for a particular exchange,
2415 * the function will return the buffer without replacing the buffer.
2416 * If the buffer is for unsolicited ELS or CT traffic, this function
2417 * returns the buffer and also posts another buffer to the firmware.
2419 static struct lpfc_dmabuf *
2420 lpfc_sli_get_buff(struct lpfc_hba *phba,
2421 struct lpfc_sli_ring *pring,
2424 struct hbq_dmabuf *hbq_entry;
2426 if (tag & QUE_BUFTAG_BIT)
2427 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2428 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2431 return &hbq_entry->dbuf;
2435 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2436 * @phba: Pointer to HBA context object.
2437 * @pring: Pointer to driver SLI ring object.
2438 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2439 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2440 * @fch_type: the type for the first frame of the sequence.
2442 * This function is called with no lock held. This function uses the r_ctl and
2443 * type of the received sequence to find the correct callback function to call
2444 * to process the sequence.
2447 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2448 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2453 /* unSolicited Responses */
2454 if (pring->prt[0].profile) {
2455 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2456 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2460 /* We must search, based on rctl / type
2461 for the right routine */
2462 for (i = 0; i < pring->num_mask; i++) {
2463 if ((pring->prt[i].rctl == fch_r_ctl) &&
2464 (pring->prt[i].type == fch_type)) {
2465 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2466 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2467 (phba, pring, saveq);
2475 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2476 * @phba: Pointer to HBA context object.
2477 * @pring: Pointer to driver SLI ring object.
2478 * @saveq: Pointer to the unsolicited iocb.
2480 * This function is called with no lock held by the ring event handler
2481 * when there is an unsolicited iocb posted to the response ring by the
2482 * firmware. This function gets the buffer associated with the iocbs
2483 * and calls the event handler for the ring. This function handles both
2484 * qring buffers and hbq buffers.
2485 * When the function returns 1 the caller can free the iocb object otherwise
2486 * upper layer functions will free the iocb objects.
2489 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2490 struct lpfc_iocbq *saveq)
2494 uint32_t Rctl, Type;
2495 struct lpfc_iocbq *iocbq;
2496 struct lpfc_dmabuf *dmzbuf;
2498 irsp = &(saveq->iocb);
2500 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2501 if (pring->lpfc_sli_rcv_async_status)
2502 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2504 lpfc_printf_log(phba,
2507 "0316 Ring %d handler: unexpected "
2508 "ASYNC_STATUS iocb received evt_code "
2511 irsp->un.asyncstat.evt_code);
2515 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2516 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2517 if (irsp->ulpBdeCount > 0) {
2518 dmzbuf = lpfc_sli_get_buff(phba, pring,
2519 irsp->un.ulpWord[3]);
2520 lpfc_in_buf_free(phba, dmzbuf);
2523 if (irsp->ulpBdeCount > 1) {
2524 dmzbuf = lpfc_sli_get_buff(phba, pring,
2525 irsp->unsli3.sli3Words[3]);
2526 lpfc_in_buf_free(phba, dmzbuf);
2529 if (irsp->ulpBdeCount > 2) {
2530 dmzbuf = lpfc_sli_get_buff(phba, pring,
2531 irsp->unsli3.sli3Words[7]);
2532 lpfc_in_buf_free(phba, dmzbuf);
2538 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2539 if (irsp->ulpBdeCount != 0) {
2540 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2541 irsp->un.ulpWord[3]);
2542 if (!saveq->context2)
2543 lpfc_printf_log(phba,
2546 "0341 Ring %d Cannot find buffer for "
2547 "an unsolicited iocb. tag 0x%x\n",
2549 irsp->un.ulpWord[3]);
2551 if (irsp->ulpBdeCount == 2) {
2552 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2553 irsp->unsli3.sli3Words[7]);
2554 if (!saveq->context3)
2555 lpfc_printf_log(phba,
2558 "0342 Ring %d Cannot find buffer for an"
2559 " unsolicited iocb. tag 0x%x\n",
2561 irsp->unsli3.sli3Words[7]);
2563 list_for_each_entry(iocbq, &saveq->list, list) {
2564 irsp = &(iocbq->iocb);
2565 if (irsp->ulpBdeCount != 0) {
2566 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2567 irsp->un.ulpWord[3]);
2568 if (!iocbq->context2)
2569 lpfc_printf_log(phba,
2572 "0343 Ring %d Cannot find "
2573 "buffer for an unsolicited iocb"
2574 ". tag 0x%x\n", pring->ringno,
2575 irsp->un.ulpWord[3]);
2577 if (irsp->ulpBdeCount == 2) {
2578 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2579 irsp->unsli3.sli3Words[7]);
2580 if (!iocbq->context3)
2581 lpfc_printf_log(phba,
2584 "0344 Ring %d Cannot find "
2585 "buffer for an unsolicited "
2588 irsp->unsli3.sli3Words[7]);
2592 if (irsp->ulpBdeCount != 0 &&
2593 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2594 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2597 /* search continue save q for same XRI */
2598 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2599 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2600 saveq->iocb.unsli3.rcvsli3.ox_id) {
2601 list_add_tail(&saveq->list, &iocbq->list);
2607 list_add_tail(&saveq->clist,
2608 &pring->iocb_continue_saveq);
2609 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2610 list_del_init(&iocbq->clist);
2612 irsp = &(saveq->iocb);
2616 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2617 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2618 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2619 Rctl = FC_RCTL_ELS_REQ;
2622 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2623 Rctl = w5p->hcsw.Rctl;
2624 Type = w5p->hcsw.Type;
2626 /* Firmware Workaround */
2627 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2628 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2629 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2630 Rctl = FC_RCTL_ELS_REQ;
2632 w5p->hcsw.Rctl = Rctl;
2633 w5p->hcsw.Type = Type;
2637 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2638 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2639 "0313 Ring %d handler: unexpected Rctl x%x "
2640 "Type x%x received\n",
2641 pring->ringno, Rctl, Type);
2647 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2648 * @phba: Pointer to HBA context object.
2649 * @pring: Pointer to driver SLI ring object.
2650 * @prspiocb: Pointer to response iocb object.
2652 * This function looks up the iocb_lookup table to get the command iocb
2653 * corresponding to the given response iocb using the iotag of the
2654 * response iocb. This function is called with the hbalock held.
2655 * This function returns the command iocb object if it finds the command
2656 * iocb else returns NULL.
2658 static struct lpfc_iocbq *
2659 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2660 struct lpfc_sli_ring *pring,
2661 struct lpfc_iocbq *prspiocb)
2663 struct lpfc_iocbq *cmd_iocb = NULL;
2665 lockdep_assert_held(&phba->hbalock);
2667 iotag = prspiocb->iocb.ulpIoTag;
2669 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2670 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2671 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2672 /* remove from txcmpl queue list */
2673 list_del_init(&cmd_iocb->list);
2674 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2679 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2680 "0317 iotag x%x is out of "
2681 "range: max iotag x%x wd0 x%x\n",
2682 iotag, phba->sli.last_iotag,
2683 *(((uint32_t *) &prspiocb->iocb) + 7));
2688 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2689 * @phba: Pointer to HBA context object.
2690 * @pring: Pointer to driver SLI ring object.
2693 * This function looks up the iocb_lookup table to get the command iocb
2694 * corresponding to the given iotag. This function is called with the
2696 * This function returns the command iocb object if it finds the command
2697 * iocb else returns NULL.
2699 static struct lpfc_iocbq *
2700 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2701 struct lpfc_sli_ring *pring, uint16_t iotag)
2703 struct lpfc_iocbq *cmd_iocb = NULL;
2705 lockdep_assert_held(&phba->hbalock);
2706 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2707 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2708 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2709 /* remove from txcmpl queue list */
2710 list_del_init(&cmd_iocb->list);
2711 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2717 "0372 iotag x%x lookup error: max iotag (x%x) "
2719 iotag, phba->sli.last_iotag,
2720 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2725 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2726 * @phba: Pointer to HBA context object.
2727 * @pring: Pointer to driver SLI ring object.
2728 * @saveq: Pointer to the response iocb to be processed.
2730 * This function is called by the ring event handler for non-fcp
2731 * rings when there is a new response iocb in the response ring.
2732 * The caller is not required to hold any locks. This function
2733 * gets the command iocb associated with the response iocb and
2734 * calls the completion handler for the command iocb. If there
2735 * is no completion handler, the function will free the resources
2736 * associated with command iocb. If the response iocb is for
2737 * an already aborted command iocb, the status of the completion
2738 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2739 * This function always returns 1.
2742 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2743 struct lpfc_iocbq *saveq)
2745 struct lpfc_iocbq *cmdiocbp;
2747 unsigned long iflag;
2749 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2750 spin_lock_irqsave(&phba->hbalock, iflag);
2751 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2752 spin_unlock_irqrestore(&phba->hbalock, iflag);
2755 if (cmdiocbp->iocb_cmpl) {
2757 * If an ELS command failed send an event to mgmt
2760 if (saveq->iocb.ulpStatus &&
2761 (pring->ringno == LPFC_ELS_RING) &&
2762 (cmdiocbp->iocb.ulpCommand ==
2763 CMD_ELS_REQUEST64_CR))
2764 lpfc_send_els_failure_event(phba,
2768 * Post all ELS completions to the worker thread.
2769 * All other are passed to the completion callback.
2771 if (pring->ringno == LPFC_ELS_RING) {
2772 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2773 (cmdiocbp->iocb_flag &
2774 LPFC_DRIVER_ABORTED)) {
2775 spin_lock_irqsave(&phba->hbalock,
2777 cmdiocbp->iocb_flag &=
2778 ~LPFC_DRIVER_ABORTED;
2779 spin_unlock_irqrestore(&phba->hbalock,
2781 saveq->iocb.ulpStatus =
2782 IOSTAT_LOCAL_REJECT;
2783 saveq->iocb.un.ulpWord[4] =
2786 /* Firmware could still be in progress
2787 * of DMAing payload, so don't free data
2788 * buffer till after a hbeat.
2790 spin_lock_irqsave(&phba->hbalock,
2792 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2793 spin_unlock_irqrestore(&phba->hbalock,
2796 if (phba->sli_rev == LPFC_SLI_REV4) {
2797 if (saveq->iocb_flag &
2798 LPFC_EXCHANGE_BUSY) {
2799 /* Set cmdiocb flag for the
2800 * exchange busy so sgl (xri)
2801 * will not be released until
2802 * the abort xri is received
2806 &phba->hbalock, iflag);
2807 cmdiocbp->iocb_flag |=
2809 spin_unlock_irqrestore(
2810 &phba->hbalock, iflag);
2812 if (cmdiocbp->iocb_flag &
2813 LPFC_DRIVER_ABORTED) {
2815 * Clear LPFC_DRIVER_ABORTED
2816 * bit in case it was driver
2820 &phba->hbalock, iflag);
2821 cmdiocbp->iocb_flag &=
2822 ~LPFC_DRIVER_ABORTED;
2823 spin_unlock_irqrestore(
2824 &phba->hbalock, iflag);
2825 cmdiocbp->iocb.ulpStatus =
2826 IOSTAT_LOCAL_REJECT;
2827 cmdiocbp->iocb.un.ulpWord[4] =
2828 IOERR_ABORT_REQUESTED;
2830 * For SLI4, irsiocb contains
2831 * NO_XRI in sli_xritag, it
2832 * shall not affect releasing
2833 * sgl (xri) process.
2835 saveq->iocb.ulpStatus =
2836 IOSTAT_LOCAL_REJECT;
2837 saveq->iocb.un.ulpWord[4] =
2840 &phba->hbalock, iflag);
2842 LPFC_DELAY_MEM_FREE;
2843 spin_unlock_irqrestore(
2844 &phba->hbalock, iflag);
2848 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2850 lpfc_sli_release_iocbq(phba, cmdiocbp);
2853 * Unknown initiating command based on the response iotag.
2854 * This could be the case on the ELS ring because of
2857 if (pring->ringno != LPFC_ELS_RING) {
2859 * Ring <ringno> handler: unexpected completion IoTag
2862 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2863 "0322 Ring %d handler: "
2864 "unexpected completion IoTag x%x "
2865 "Data: x%x x%x x%x x%x\n",
2867 saveq->iocb.ulpIoTag,
2868 saveq->iocb.ulpStatus,
2869 saveq->iocb.un.ulpWord[4],
2870 saveq->iocb.ulpCommand,
2871 saveq->iocb.ulpContext);
2879 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2880 * @phba: Pointer to HBA context object.
2881 * @pring: Pointer to driver SLI ring object.
2883 * This function is called from the iocb ring event handlers when
2884 * put pointer is ahead of the get pointer for a ring. This function signal
2885 * an error attention condition to the worker thread and the worker
2886 * thread will transition the HBA to offline state.
2889 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2891 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2893 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2894 * rsp ring <portRspMax>
2896 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2897 "0312 Ring %d handler: portRspPut %d "
2898 "is bigger than rsp ring %d\n",
2899 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2900 pring->sli.sli3.numRiocb);
2902 phba->link_state = LPFC_HBA_ERROR;
2905 * All error attention handlers are posted to
2908 phba->work_ha |= HA_ERATT;
2909 phba->work_hs = HS_FFER3;
2911 lpfc_worker_wake_up(phba);
2917 * lpfc_poll_eratt - Error attention polling timer timeout handler
2918 * @ptr: Pointer to address of HBA context object.
2920 * This function is invoked by the Error Attention polling timer when the
2921 * timer times out. It will check the SLI Error Attention register for
2922 * possible attention events. If so, it will post an Error Attention event
2923 * and wake up worker thread to process it. Otherwise, it will set up the
2924 * Error Attention polling timer for the next poll.
2926 void lpfc_poll_eratt(unsigned long ptr)
2928 struct lpfc_hba *phba;
2930 uint64_t sli_intr, cnt;
2932 phba = (struct lpfc_hba *)ptr;
2934 /* Here we will also keep track of interrupts per sec of the hba */
2935 sli_intr = phba->sli.slistat.sli_intr;
2937 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2938 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2941 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2943 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
2944 do_div(cnt, phba->eratt_poll_interval);
2945 phba->sli.slistat.sli_ips = cnt;
2947 phba->sli.slistat.sli_prev_intr = sli_intr;
2949 /* Check chip HA register for error event */
2950 eratt = lpfc_sli_check_eratt(phba);
2953 /* Tell the worker thread there is work to do */
2954 lpfc_worker_wake_up(phba);
2956 /* Restart the timer for next eratt poll */
2957 mod_timer(&phba->eratt_poll,
2959 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
2965 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2966 * @phba: Pointer to HBA context object.
2967 * @pring: Pointer to driver SLI ring object.
2968 * @mask: Host attention register mask for this ring.
2970 * This function is called from the interrupt context when there is a ring
2971 * event for the fcp ring. The caller does not hold any lock.
2972 * The function processes each response iocb in the response ring until it
2973 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2974 * LE bit set. The function will call the completion handler of the command iocb
2975 * if the response iocb indicates a completion for a command iocb or it is
2976 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2977 * function if this is an unsolicited iocb.
2978 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2979 * to check it explicitly.
2982 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2983 struct lpfc_sli_ring *pring, uint32_t mask)
2985 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2986 IOCB_t *irsp = NULL;
2987 IOCB_t *entry = NULL;
2988 struct lpfc_iocbq *cmdiocbq = NULL;
2989 struct lpfc_iocbq rspiocbq;
2991 uint32_t portRspPut, portRspMax;
2993 lpfc_iocb_type type;
2994 unsigned long iflag;
2995 uint32_t rsp_cmpl = 0;
2997 spin_lock_irqsave(&phba->hbalock, iflag);
2998 pring->stats.iocb_event++;
3001 * The next available response entry should never exceed the maximum
3002 * entries. If it does, treat it as an adapter hardware error.
3004 portRspMax = pring->sli.sli3.numRiocb;
3005 portRspPut = le32_to_cpu(pgp->rspPutInx);
3006 if (unlikely(portRspPut >= portRspMax)) {
3007 lpfc_sli_rsp_pointers_error(phba, pring);
3008 spin_unlock_irqrestore(&phba->hbalock, iflag);
3011 if (phba->fcp_ring_in_use) {
3012 spin_unlock_irqrestore(&phba->hbalock, iflag);
3015 phba->fcp_ring_in_use = 1;
3018 while (pring->sli.sli3.rspidx != portRspPut) {
3020 * Fetch an entry off the ring and copy it into a local data
3021 * structure. The copy involves a byte-swap since the
3022 * network byte order and pci byte orders are different.
3024 entry = lpfc_resp_iocb(phba, pring);
3025 phba->last_completion_time = jiffies;
3027 if (++pring->sli.sli3.rspidx >= portRspMax)
3028 pring->sli.sli3.rspidx = 0;
3030 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3031 (uint32_t *) &rspiocbq.iocb,
3032 phba->iocb_rsp_size);
3033 INIT_LIST_HEAD(&(rspiocbq.list));
3034 irsp = &rspiocbq.iocb;
3036 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3037 pring->stats.iocb_rsp++;
3040 if (unlikely(irsp->ulpStatus)) {
3042 * If resource errors reported from HBA, reduce
3043 * queuedepths of the SCSI device.
3045 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3046 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3047 IOERR_NO_RESOURCES)) {
3048 spin_unlock_irqrestore(&phba->hbalock, iflag);
3049 phba->lpfc_rampdown_queue_depth(phba);
3050 spin_lock_irqsave(&phba->hbalock, iflag);
3053 /* Rsp ring <ringno> error: IOCB */
3054 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3055 "0336 Rsp Ring %d error: IOCB Data: "
3056 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3058 irsp->un.ulpWord[0],
3059 irsp->un.ulpWord[1],
3060 irsp->un.ulpWord[2],
3061 irsp->un.ulpWord[3],
3062 irsp->un.ulpWord[4],
3063 irsp->un.ulpWord[5],
3064 *(uint32_t *)&irsp->un1,
3065 *((uint32_t *)&irsp->un1 + 1));
3069 case LPFC_ABORT_IOCB:
3072 * Idle exchange closed via ABTS from port. No iocb
3073 * resources need to be recovered.
3075 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3076 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3077 "0333 IOCB cmd 0x%x"
3078 " processed. Skipping"
3084 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3086 if (unlikely(!cmdiocbq))
3088 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3089 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3090 if (cmdiocbq->iocb_cmpl) {
3091 spin_unlock_irqrestore(&phba->hbalock, iflag);
3092 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3094 spin_lock_irqsave(&phba->hbalock, iflag);
3097 case LPFC_UNSOL_IOCB:
3098 spin_unlock_irqrestore(&phba->hbalock, iflag);
3099 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3100 spin_lock_irqsave(&phba->hbalock, iflag);
3103 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3104 char adaptermsg[LPFC_MAX_ADPTMSG];
3105 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3106 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3108 dev_warn(&((phba->pcidev)->dev),
3110 phba->brd_no, adaptermsg);
3112 /* Unknown IOCB command */
3113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3114 "0334 Unknown IOCB command "
3115 "Data: x%x, x%x x%x x%x x%x\n",
3116 type, irsp->ulpCommand,
3125 * The response IOCB has been processed. Update the ring
3126 * pointer in SLIM. If the port response put pointer has not
3127 * been updated, sync the pgp->rspPutInx and fetch the new port
3128 * response put pointer.
3130 writel(pring->sli.sli3.rspidx,
3131 &phba->host_gp[pring->ringno].rspGetInx);
3133 if (pring->sli.sli3.rspidx == portRspPut)
3134 portRspPut = le32_to_cpu(pgp->rspPutInx);
3137 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3138 pring->stats.iocb_rsp_full++;
3139 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3140 writel(status, phba->CAregaddr);
3141 readl(phba->CAregaddr);
3143 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3144 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3145 pring->stats.iocb_cmd_empty++;
3147 /* Force update of the local copy of cmdGetInx */
3148 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3149 lpfc_sli_resume_iocb(phba, pring);
3151 if ((pring->lpfc_sli_cmd_available))
3152 (pring->lpfc_sli_cmd_available) (phba, pring);
3156 phba->fcp_ring_in_use = 0;
3157 spin_unlock_irqrestore(&phba->hbalock, iflag);
3162 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3163 * @phba: Pointer to HBA context object.
3164 * @pring: Pointer to driver SLI ring object.
3165 * @rspiocbp: Pointer to driver response IOCB object.
3167 * This function is called from the worker thread when there is a slow-path
3168 * response IOCB to process. This function chains all the response iocbs until
3169 * seeing the iocb with the LE bit set. The function will call
3170 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3171 * completion of a command iocb. The function will call the
3172 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3173 * The function frees the resources or calls the completion handler if this
3174 * iocb is an abort completion. The function returns NULL when the response
3175 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3176 * this function shall chain the iocb on to the iocb_continueq and return the
3177 * response iocb passed in.
3179 static struct lpfc_iocbq *
3180 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3181 struct lpfc_iocbq *rspiocbp)
3183 struct lpfc_iocbq *saveq;
3184 struct lpfc_iocbq *cmdiocbp;
3185 struct lpfc_iocbq *next_iocb;
3186 IOCB_t *irsp = NULL;
3187 uint32_t free_saveq;
3188 uint8_t iocb_cmd_type;
3189 lpfc_iocb_type type;
3190 unsigned long iflag;
3193 spin_lock_irqsave(&phba->hbalock, iflag);
3194 /* First add the response iocb to the countinueq list */
3195 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3196 pring->iocb_continueq_cnt++;
3198 /* Now, determine whether the list is completed for processing */
3199 irsp = &rspiocbp->iocb;
3202 * By default, the driver expects to free all resources
3203 * associated with this iocb completion.
3206 saveq = list_get_first(&pring->iocb_continueq,
3207 struct lpfc_iocbq, list);
3208 irsp = &(saveq->iocb);
3209 list_del_init(&pring->iocb_continueq);
3210 pring->iocb_continueq_cnt = 0;
3212 pring->stats.iocb_rsp++;
3215 * If resource errors reported from HBA, reduce
3216 * queuedepths of the SCSI device.
3218 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3219 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3220 IOERR_NO_RESOURCES)) {
3221 spin_unlock_irqrestore(&phba->hbalock, iflag);
3222 phba->lpfc_rampdown_queue_depth(phba);
3223 spin_lock_irqsave(&phba->hbalock, iflag);
3226 if (irsp->ulpStatus) {
3227 /* Rsp ring <ringno> error: IOCB */
3228 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3229 "0328 Rsp Ring %d error: "
3234 "x%x x%x x%x x%x\n",
3236 irsp->un.ulpWord[0],
3237 irsp->un.ulpWord[1],
3238 irsp->un.ulpWord[2],
3239 irsp->un.ulpWord[3],
3240 irsp->un.ulpWord[4],
3241 irsp->un.ulpWord[5],
3242 *(((uint32_t *) irsp) + 6),
3243 *(((uint32_t *) irsp) + 7),
3244 *(((uint32_t *) irsp) + 8),
3245 *(((uint32_t *) irsp) + 9),
3246 *(((uint32_t *) irsp) + 10),
3247 *(((uint32_t *) irsp) + 11),
3248 *(((uint32_t *) irsp) + 12),
3249 *(((uint32_t *) irsp) + 13),
3250 *(((uint32_t *) irsp) + 14),
3251 *(((uint32_t *) irsp) + 15));
3255 * Fetch the IOCB command type and call the correct completion
3256 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3257 * get freed back to the lpfc_iocb_list by the discovery
3260 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3261 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3264 spin_unlock_irqrestore(&phba->hbalock, iflag);
3265 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3266 spin_lock_irqsave(&phba->hbalock, iflag);
3269 case LPFC_UNSOL_IOCB:
3270 spin_unlock_irqrestore(&phba->hbalock, iflag);
3271 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3272 spin_lock_irqsave(&phba->hbalock, iflag);
3277 case LPFC_ABORT_IOCB:
3279 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3280 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3283 /* Call the specified completion routine */
3284 if (cmdiocbp->iocb_cmpl) {
3285 spin_unlock_irqrestore(&phba->hbalock,
3287 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3289 spin_lock_irqsave(&phba->hbalock,
3292 __lpfc_sli_release_iocbq(phba,
3297 case LPFC_UNKNOWN_IOCB:
3298 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3299 char adaptermsg[LPFC_MAX_ADPTMSG];
3300 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3301 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3303 dev_warn(&((phba->pcidev)->dev),
3305 phba->brd_no, adaptermsg);
3307 /* Unknown IOCB command */
3308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3309 "0335 Unknown IOCB "
3310 "command Data: x%x "
3321 list_for_each_entry_safe(rspiocbp, next_iocb,
3322 &saveq->list, list) {
3323 list_del_init(&rspiocbp->list);
3324 __lpfc_sli_release_iocbq(phba, rspiocbp);
3326 __lpfc_sli_release_iocbq(phba, saveq);
3330 spin_unlock_irqrestore(&phba->hbalock, iflag);
3335 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3336 * @phba: Pointer to HBA context object.
3337 * @pring: Pointer to driver SLI ring object.
3338 * @mask: Host attention register mask for this ring.
3340 * This routine wraps the actual slow_ring event process routine from the
3341 * API jump table function pointer from the lpfc_hba struct.
3344 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3345 struct lpfc_sli_ring *pring, uint32_t mask)
3347 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3351 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3352 * @phba: Pointer to HBA context object.
3353 * @pring: Pointer to driver SLI ring object.
3354 * @mask: Host attention register mask for this ring.
3356 * This function is called from the worker thread when there is a ring event
3357 * for non-fcp rings. The caller does not hold any lock. The function will
3358 * remove each response iocb in the response ring and calls the handle
3359 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3362 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3363 struct lpfc_sli_ring *pring, uint32_t mask)
3365 struct lpfc_pgp *pgp;
3367 IOCB_t *irsp = NULL;
3368 struct lpfc_iocbq *rspiocbp = NULL;
3369 uint32_t portRspPut, portRspMax;
3370 unsigned long iflag;
3373 pgp = &phba->port_gp[pring->ringno];
3374 spin_lock_irqsave(&phba->hbalock, iflag);
3375 pring->stats.iocb_event++;
3378 * The next available response entry should never exceed the maximum
3379 * entries. If it does, treat it as an adapter hardware error.
3381 portRspMax = pring->sli.sli3.numRiocb;
3382 portRspPut = le32_to_cpu(pgp->rspPutInx);
3383 if (portRspPut >= portRspMax) {
3385 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3386 * rsp ring <portRspMax>
3388 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3389 "0303 Ring %d handler: portRspPut %d "
3390 "is bigger than rsp ring %d\n",
3391 pring->ringno, portRspPut, portRspMax);
3393 phba->link_state = LPFC_HBA_ERROR;
3394 spin_unlock_irqrestore(&phba->hbalock, iflag);
3396 phba->work_hs = HS_FFER3;
3397 lpfc_handle_eratt(phba);
3403 while (pring->sli.sli3.rspidx != portRspPut) {
3405 * Build a completion list and call the appropriate handler.
3406 * The process is to get the next available response iocb, get
3407 * a free iocb from the list, copy the response data into the
3408 * free iocb, insert to the continuation list, and update the
3409 * next response index to slim. This process makes response
3410 * iocb's in the ring available to DMA as fast as possible but
3411 * pays a penalty for a copy operation. Since the iocb is
3412 * only 32 bytes, this penalty is considered small relative to
3413 * the PCI reads for register values and a slim write. When
3414 * the ulpLe field is set, the entire Command has been
3417 entry = lpfc_resp_iocb(phba, pring);
3419 phba->last_completion_time = jiffies;
3420 rspiocbp = __lpfc_sli_get_iocbq(phba);
3421 if (rspiocbp == NULL) {
3422 printk(KERN_ERR "%s: out of buffers! Failing "
3423 "completion.\n", __func__);
3427 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3428 phba->iocb_rsp_size);
3429 irsp = &rspiocbp->iocb;
3431 if (++pring->sli.sli3.rspidx >= portRspMax)
3432 pring->sli.sli3.rspidx = 0;
3434 if (pring->ringno == LPFC_ELS_RING) {
3435 lpfc_debugfs_slow_ring_trc(phba,
3436 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3437 *(((uint32_t *) irsp) + 4),
3438 *(((uint32_t *) irsp) + 6),
3439 *(((uint32_t *) irsp) + 7));
3442 writel(pring->sli.sli3.rspidx,
3443 &phba->host_gp[pring->ringno].rspGetInx);
3445 spin_unlock_irqrestore(&phba->hbalock, iflag);
3446 /* Handle the response IOCB */
3447 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3448 spin_lock_irqsave(&phba->hbalock, iflag);
3451 * If the port response put pointer has not been updated, sync
3452 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3453 * response put pointer.
3455 if (pring->sli.sli3.rspidx == portRspPut) {
3456 portRspPut = le32_to_cpu(pgp->rspPutInx);
3458 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3460 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3461 /* At least one response entry has been freed */
3462 pring->stats.iocb_rsp_full++;
3463 /* SET RxRE_RSP in Chip Att register */
3464 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3465 writel(status, phba->CAregaddr);
3466 readl(phba->CAregaddr); /* flush */
3468 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3469 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3470 pring->stats.iocb_cmd_empty++;
3472 /* Force update of the local copy of cmdGetInx */
3473 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3474 lpfc_sli_resume_iocb(phba, pring);
3476 if ((pring->lpfc_sli_cmd_available))
3477 (pring->lpfc_sli_cmd_available) (phba, pring);
3481 spin_unlock_irqrestore(&phba->hbalock, iflag);
3486 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3487 * @phba: Pointer to HBA context object.
3488 * @pring: Pointer to driver SLI ring object.
3489 * @mask: Host attention register mask for this ring.
3491 * This function is called from the worker thread when there is a pending
3492 * ELS response iocb on the driver internal slow-path response iocb worker
3493 * queue. The caller does not hold any lock. The function will remove each
3494 * response iocb from the response worker queue and calls the handle
3495 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3498 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3499 struct lpfc_sli_ring *pring, uint32_t mask)
3501 struct lpfc_iocbq *irspiocbq;
3502 struct hbq_dmabuf *dmabuf;
3503 struct lpfc_cq_event *cq_event;
3504 unsigned long iflag;
3506 spin_lock_irqsave(&phba->hbalock, iflag);
3507 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3508 spin_unlock_irqrestore(&phba->hbalock, iflag);
3509 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3510 /* Get the response iocb from the head of work queue */
3511 spin_lock_irqsave(&phba->hbalock, iflag);
3512 list_remove_head(&phba->sli4_hba.sp_queue_event,
3513 cq_event, struct lpfc_cq_event, list);
3514 spin_unlock_irqrestore(&phba->hbalock, iflag);
3516 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3517 case CQE_CODE_COMPL_WQE:
3518 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3520 /* Translate ELS WCQE to response IOCBQ */
3521 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3524 lpfc_sli_sp_handle_rspiocb(phba, pring,
3527 case CQE_CODE_RECEIVE:
3528 case CQE_CODE_RECEIVE_V1:
3529 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3531 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3540 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3541 * @phba: Pointer to HBA context object.
3542 * @pring: Pointer to driver SLI ring object.
3544 * This function aborts all iocbs in the given ring and frees all the iocb
3545 * objects in txq. This function issues an abort iocb for all the iocb commands
3546 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3547 * the return of this function. The caller is not required to hold any locks.
3550 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3552 LIST_HEAD(completions);
3553 struct lpfc_iocbq *iocb, *next_iocb;
3555 if (pring->ringno == LPFC_ELS_RING) {
3556 lpfc_fabric_abort_hba(phba);
3559 /* Error everything on txq and txcmplq
3562 if (phba->sli_rev >= LPFC_SLI_REV4) {
3563 spin_lock_irq(&pring->ring_lock);
3564 list_splice_init(&pring->txq, &completions);
3566 spin_unlock_irq(&pring->ring_lock);
3568 spin_lock_irq(&phba->hbalock);
3569 /* Next issue ABTS for everything on the txcmplq */
3570 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3571 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3572 spin_unlock_irq(&phba->hbalock);
3574 spin_lock_irq(&phba->hbalock);
3575 list_splice_init(&pring->txq, &completions);
3578 /* Next issue ABTS for everything on the txcmplq */
3579 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3580 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3581 spin_unlock_irq(&phba->hbalock);
3584 /* Cancel all the IOCBs from the completions list */
3585 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3590 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3591 * @phba: Pointer to HBA context object.
3592 * @pring: Pointer to driver SLI ring object.
3594 * This function aborts all iocbs in the given ring and frees all the iocb
3595 * objects in txq. This function issues an abort iocb for all the iocb commands
3596 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3597 * the return of this function. The caller is not required to hold any locks.
3600 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3602 LIST_HEAD(completions);
3603 struct lpfc_iocbq *iocb, *next_iocb;
3605 if (pring->ringno == LPFC_ELS_RING)
3606 lpfc_fabric_abort_hba(phba);
3608 spin_lock_irq(&phba->hbalock);
3609 /* Next issue ABTS for everything on the txcmplq */
3610 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3611 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3612 spin_unlock_irq(&phba->hbalock);
3617 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3618 * @phba: Pointer to HBA context object.
3619 * @pring: Pointer to driver SLI ring object.
3621 * This function aborts all iocbs in FCP rings and frees all the iocb
3622 * objects in txq. This function issues an abort iocb for all the iocb commands
3623 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3624 * the return of this function. The caller is not required to hold any locks.
3627 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3629 struct lpfc_sli *psli = &phba->sli;
3630 struct lpfc_sli_ring *pring;
3633 /* Look on all the FCP Rings for the iotag */
3634 if (phba->sli_rev >= LPFC_SLI_REV4) {
3635 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3636 pring = phba->sli4_hba.fcp_wq[i]->pring;
3637 lpfc_sli_abort_iocb_ring(phba, pring);
3640 pring = &psli->sli3_ring[LPFC_FCP_RING];
3641 lpfc_sli_abort_iocb_ring(phba, pring);
3646 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3647 * @phba: Pointer to HBA context object.
3649 * This function aborts all wqes in NVME rings. This function issues an
3650 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3651 * the txcmplq is not guaranteed to complete before the return of this
3652 * function. The caller is not required to hold any locks.
3655 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3657 struct lpfc_sli_ring *pring;
3660 if (phba->sli_rev < LPFC_SLI_REV4)
3663 /* Abort all IO on each NVME ring. */
3664 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3665 pring = phba->sli4_hba.nvme_wq[i]->pring;
3666 lpfc_sli_abort_wqe_ring(phba, pring);
3672 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3673 * @phba: Pointer to HBA context object.
3675 * This function flushes all iocbs in the fcp ring and frees all the iocb
3676 * objects in txq and txcmplq. This function will not issue abort iocbs
3677 * for all the iocb commands in txcmplq, they will just be returned with
3678 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3679 * slot has been permanently disabled.
3682 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3686 struct lpfc_sli *psli = &phba->sli;
3687 struct lpfc_sli_ring *pring;
3690 spin_lock_irq(&phba->hbalock);
3691 /* Indicate the I/O queues are flushed */
3692 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3693 spin_unlock_irq(&phba->hbalock);
3695 /* Look on all the FCP Rings for the iotag */
3696 if (phba->sli_rev >= LPFC_SLI_REV4) {
3697 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3698 pring = phba->sli4_hba.fcp_wq[i]->pring;
3700 spin_lock_irq(&pring->ring_lock);
3701 /* Retrieve everything on txq */
3702 list_splice_init(&pring->txq, &txq);
3703 /* Retrieve everything on the txcmplq */
3704 list_splice_init(&pring->txcmplq, &txcmplq);
3706 pring->txcmplq_cnt = 0;
3707 spin_unlock_irq(&pring->ring_lock);
3710 lpfc_sli_cancel_iocbs(phba, &txq,
3711 IOSTAT_LOCAL_REJECT,
3713 /* Flush the txcmpq */
3714 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3715 IOSTAT_LOCAL_REJECT,
3719 pring = &psli->sli3_ring[LPFC_FCP_RING];
3721 spin_lock_irq(&phba->hbalock);
3722 /* Retrieve everything on txq */
3723 list_splice_init(&pring->txq, &txq);
3724 /* Retrieve everything on the txcmplq */
3725 list_splice_init(&pring->txcmplq, &txcmplq);
3727 pring->txcmplq_cnt = 0;
3728 spin_unlock_irq(&phba->hbalock);
3731 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3733 /* Flush the txcmpq */
3734 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3740 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3741 * @phba: Pointer to HBA context object.
3743 * This function flushes all wqes in the nvme rings and frees all resources
3744 * in the txcmplq. This function does not issue abort wqes for the IO
3745 * commands in txcmplq, they will just be returned with
3746 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3747 * slot has been permanently disabled.
3750 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3753 struct lpfc_sli_ring *pring;
3756 if (phba->sli_rev < LPFC_SLI_REV4)
3759 /* Hint to other driver operations that a flush is in progress. */
3760 spin_lock_irq(&phba->hbalock);
3761 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3762 spin_unlock_irq(&phba->hbalock);
3764 /* Cycle through all NVME rings and complete each IO with
3765 * a local driver reason code. This is a flush so no
3766 * abort exchange to FW.
3768 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3769 pring = phba->sli4_hba.nvme_wq[i]->pring;
3771 /* Retrieve everything on the txcmplq */
3772 spin_lock_irq(&pring->ring_lock);
3773 list_splice_init(&pring->txcmplq, &txcmplq);
3774 pring->txcmplq_cnt = 0;
3775 spin_unlock_irq(&pring->ring_lock);
3777 /* Flush the txcmpq &&&PAE */
3778 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3779 IOSTAT_LOCAL_REJECT,
3785 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3786 * @phba: Pointer to HBA context object.
3787 * @mask: Bit mask to be checked.
3789 * This function reads the host status register and compares
3790 * with the provided bit mask to check if HBA completed
3791 * the restart. This function will wait in a loop for the
3792 * HBA to complete restart. If the HBA does not restart within
3793 * 15 iterations, the function will reset the HBA again. The
3794 * function returns 1 when HBA fail to restart otherwise returns
3798 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3804 /* Read the HBA Host Status Register */
3805 if (lpfc_readl(phba->HSregaddr, &status))
3809 * Check status register every 100ms for 5 retries, then every
3810 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3811 * every 2.5 sec for 4.
3812 * Break our of the loop if errors occurred during init.
3814 while (((status & mask) != mask) &&
3815 !(status & HS_FFERM) &&
3827 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3828 lpfc_sli_brdrestart(phba);
3830 /* Read the HBA Host Status Register */
3831 if (lpfc_readl(phba->HSregaddr, &status)) {
3837 /* Check to see if any errors occurred during init */
3838 if ((status & HS_FFERM) || (i >= 20)) {
3839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3840 "2751 Adapter failed to restart, "
3841 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3843 readl(phba->MBslimaddr + 0xa8),
3844 readl(phba->MBslimaddr + 0xac));
3845 phba->link_state = LPFC_HBA_ERROR;
3853 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3854 * @phba: Pointer to HBA context object.
3855 * @mask: Bit mask to be checked.
3857 * This function checks the host status register to check if HBA is
3858 * ready. This function will wait in a loop for the HBA to be ready
3859 * If the HBA is not ready , the function will will reset the HBA PCI
3860 * function again. The function returns 1 when HBA fail to be ready
3861 * otherwise returns zero.
3864 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3869 /* Read the HBA Host Status Register */
3870 status = lpfc_sli4_post_status_check(phba);
3873 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3874 lpfc_sli_brdrestart(phba);
3875 status = lpfc_sli4_post_status_check(phba);
3878 /* Check to see if any errors occurred during init */
3880 phba->link_state = LPFC_HBA_ERROR;
3883 phba->sli4_hba.intr_enable = 0;
3889 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3890 * @phba: Pointer to HBA context object.
3891 * @mask: Bit mask to be checked.
3893 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3894 * from the API jump table function pointer from the lpfc_hba struct.
3897 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3899 return phba->lpfc_sli_brdready(phba, mask);
3902 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3905 * lpfc_reset_barrier - Make HBA ready for HBA reset
3906 * @phba: Pointer to HBA context object.
3908 * This function is called before resetting an HBA. This function is called
3909 * with hbalock held and requests HBA to quiesce DMAs before a reset.
3911 void lpfc_reset_barrier(struct lpfc_hba *phba)
3913 uint32_t __iomem *resp_buf;
3914 uint32_t __iomem *mbox_buf;
3915 volatile uint32_t mbox;
3916 uint32_t hc_copy, ha_copy, resp_data;
3920 lockdep_assert_held(&phba->hbalock);
3922 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3923 if (hdrtype != 0x80 ||
3924 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3925 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3929 * Tell the other part of the chip to suspend temporarily all
3932 resp_buf = phba->MBslimaddr;
3934 /* Disable the error attention */
3935 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3937 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3938 readl(phba->HCregaddr); /* flush */
3939 phba->link_flag |= LS_IGNORE_ERATT;
3941 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3943 if (ha_copy & HA_ERATT) {
3944 /* Clear Chip error bit */
3945 writel(HA_ERATT, phba->HAregaddr);
3946 phba->pport->stopped = 1;
3950 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3951 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3953 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3954 mbox_buf = phba->MBslimaddr;
3955 writel(mbox, mbox_buf);
3957 for (i = 0; i < 50; i++) {
3958 if (lpfc_readl((resp_buf + 1), &resp_data))
3960 if (resp_data != ~(BARRIER_TEST_PATTERN))
3966 if (lpfc_readl((resp_buf + 1), &resp_data))
3968 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
3969 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3970 phba->pport->stopped)
3976 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3978 for (i = 0; i < 500; i++) {
3979 if (lpfc_readl(resp_buf, &resp_data))
3981 if (resp_data != mbox)
3990 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3992 if (!(ha_copy & HA_ERATT))
3998 if (readl(phba->HAregaddr) & HA_ERATT) {
3999 writel(HA_ERATT, phba->HAregaddr);
4000 phba->pport->stopped = 1;
4004 phba->link_flag &= ~LS_IGNORE_ERATT;
4005 writel(hc_copy, phba->HCregaddr);
4006 readl(phba->HCregaddr); /* flush */
4010 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4011 * @phba: Pointer to HBA context object.
4013 * This function issues a kill_board mailbox command and waits for
4014 * the error attention interrupt. This function is called for stopping
4015 * the firmware processing. The caller is not required to hold any
4016 * locks. This function calls lpfc_hba_down_post function to free
4017 * any pending commands after the kill. The function will return 1 when it
4018 * fails to kill the board else will return 0.
4021 lpfc_sli_brdkill(struct lpfc_hba *phba)
4023 struct lpfc_sli *psli;
4033 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4034 "0329 Kill HBA Data: x%x x%x\n",
4035 phba->pport->port_state, psli->sli_flag);
4037 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4041 /* Disable the error attention */
4042 spin_lock_irq(&phba->hbalock);
4043 if (lpfc_readl(phba->HCregaddr, &status)) {
4044 spin_unlock_irq(&phba->hbalock);
4045 mempool_free(pmb, phba->mbox_mem_pool);
4048 status &= ~HC_ERINT_ENA;
4049 writel(status, phba->HCregaddr);
4050 readl(phba->HCregaddr); /* flush */
4051 phba->link_flag |= LS_IGNORE_ERATT;
4052 spin_unlock_irq(&phba->hbalock);
4054 lpfc_kill_board(phba, pmb);
4055 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4056 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4058 if (retval != MBX_SUCCESS) {
4059 if (retval != MBX_BUSY)
4060 mempool_free(pmb, phba->mbox_mem_pool);
4061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4062 "2752 KILL_BOARD command failed retval %d\n",
4064 spin_lock_irq(&phba->hbalock);
4065 phba->link_flag &= ~LS_IGNORE_ERATT;
4066 spin_unlock_irq(&phba->hbalock);
4070 spin_lock_irq(&phba->hbalock);
4071 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4072 spin_unlock_irq(&phba->hbalock);
4074 mempool_free(pmb, phba->mbox_mem_pool);
4076 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4077 * attention every 100ms for 3 seconds. If we don't get ERATT after
4078 * 3 seconds we still set HBA_ERROR state because the status of the
4079 * board is now undefined.
4081 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4083 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4085 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4089 del_timer_sync(&psli->mbox_tmo);
4090 if (ha_copy & HA_ERATT) {
4091 writel(HA_ERATT, phba->HAregaddr);
4092 phba->pport->stopped = 1;
4094 spin_lock_irq(&phba->hbalock);
4095 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4096 psli->mbox_active = NULL;
4097 phba->link_flag &= ~LS_IGNORE_ERATT;
4098 spin_unlock_irq(&phba->hbalock);
4100 lpfc_hba_down_post(phba);
4101 phba->link_state = LPFC_HBA_ERROR;
4103 return ha_copy & HA_ERATT ? 0 : 1;
4107 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4108 * @phba: Pointer to HBA context object.
4110 * This function resets the HBA by writing HC_INITFF to the control
4111 * register. After the HBA resets, this function resets all the iocb ring
4112 * indices. This function disables PCI layer parity checking during
4114 * This function returns 0 always.
4115 * The caller is not required to hold any locks.
4118 lpfc_sli_brdreset(struct lpfc_hba *phba)
4120 struct lpfc_sli *psli;
4121 struct lpfc_sli_ring *pring;
4128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4129 "0325 Reset HBA Data: x%x x%x\n",
4130 phba->pport->port_state, psli->sli_flag);
4132 /* perform board reset */
4133 phba->fc_eventTag = 0;
4134 phba->link_events = 0;
4135 phba->pport->fc_myDID = 0;
4136 phba->pport->fc_prevDID = 0;
4138 /* Turn off parity checking and serr during the physical reset */
4139 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4140 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4142 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4144 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4146 /* Now toggle INITFF bit in the Host Control Register */
4147 writel(HC_INITFF, phba->HCregaddr);
4149 readl(phba->HCregaddr); /* flush */
4150 writel(0, phba->HCregaddr);
4151 readl(phba->HCregaddr); /* flush */
4153 /* Restore PCI cmd register */
4154 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4156 /* Initialize relevant SLI info */
4157 for (i = 0; i < psli->num_rings; i++) {
4158 pring = &psli->sli3_ring[i];
4160 pring->sli.sli3.rspidx = 0;
4161 pring->sli.sli3.next_cmdidx = 0;
4162 pring->sli.sli3.local_getidx = 0;
4163 pring->sli.sli3.cmdidx = 0;
4164 pring->missbufcnt = 0;
4167 phba->link_state = LPFC_WARM_START;
4172 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4173 * @phba: Pointer to HBA context object.
4175 * This function resets a SLI4 HBA. This function disables PCI layer parity
4176 * checking during resets the device. The caller is not required to hold
4179 * This function returns 0 always.
4182 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4184 struct lpfc_sli *psli = &phba->sli;
4189 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4190 "0295 Reset HBA Data: x%x x%x x%x\n",
4191 phba->pport->port_state, psli->sli_flag,
4194 /* perform board reset */
4195 phba->fc_eventTag = 0;
4196 phba->link_events = 0;
4197 phba->pport->fc_myDID = 0;
4198 phba->pport->fc_prevDID = 0;
4200 spin_lock_irq(&phba->hbalock);
4201 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4202 phba->fcf.fcf_flag = 0;
4203 spin_unlock_irq(&phba->hbalock);
4205 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4206 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4207 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4211 /* Now physically reset the device */
4212 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4213 "0389 Performing PCI function reset!\n");
4215 /* Turn off parity checking and serr during the physical reset */
4216 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4217 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4218 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4220 /* Perform FCoE PCI function reset before freeing queue memory */
4221 rc = lpfc_pci_function_reset(phba);
4222 lpfc_sli4_queue_destroy(phba);
4224 /* Restore PCI cmd register */
4225 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4231 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4232 * @phba: Pointer to HBA context object.
4234 * This function is called in the SLI initialization code path to
4235 * restart the HBA. The caller is not required to hold any lock.
4236 * This function writes MBX_RESTART mailbox command to the SLIM and
4237 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4238 * function to free any pending commands. The function enables
4239 * POST only during the first initialization. The function returns zero.
4240 * The function does not guarantee completion of MBX_RESTART mailbox
4241 * command before the return of this function.
4244 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4247 struct lpfc_sli *psli;
4248 volatile uint32_t word0;
4249 void __iomem *to_slim;
4250 uint32_t hba_aer_enabled;
4252 spin_lock_irq(&phba->hbalock);
4254 /* Take PCIe device Advanced Error Reporting (AER) state */
4255 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4260 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4261 "0337 Restart HBA Data: x%x x%x\n",
4262 phba->pport->port_state, psli->sli_flag);
4265 mb = (MAILBOX_t *) &word0;
4266 mb->mbxCommand = MBX_RESTART;
4269 lpfc_reset_barrier(phba);
4271 to_slim = phba->MBslimaddr;
4272 writel(*(uint32_t *) mb, to_slim);
4273 readl(to_slim); /* flush */
4275 /* Only skip post after fc_ffinit is completed */
4276 if (phba->pport->port_state)
4277 word0 = 1; /* This is really setting up word1 */
4279 word0 = 0; /* This is really setting up word1 */
4280 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4281 writel(*(uint32_t *) mb, to_slim);
4282 readl(to_slim); /* flush */
4284 lpfc_sli_brdreset(phba);
4285 phba->pport->stopped = 0;
4286 phba->link_state = LPFC_INIT_START;
4288 spin_unlock_irq(&phba->hbalock);
4290 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4291 psli->stats_start = get_seconds();
4293 /* Give the INITFF and Post time to settle. */
4296 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4297 if (hba_aer_enabled)
4298 pci_disable_pcie_error_reporting(phba->pcidev);
4300 lpfc_hba_down_post(phba);
4306 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4307 * @phba: Pointer to HBA context object.
4309 * This function is called in the SLI initialization code path to restart
4310 * a SLI4 HBA. The caller is not required to hold any lock.
4311 * At the end of the function, it calls lpfc_hba_down_post function to
4312 * free any pending commands.
4315 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4317 struct lpfc_sli *psli = &phba->sli;
4318 uint32_t hba_aer_enabled;
4322 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4323 "0296 Restart HBA Data: x%x x%x\n",
4324 phba->pport->port_state, psli->sli_flag);
4326 /* Take PCIe device Advanced Error Reporting (AER) state */
4327 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4329 rc = lpfc_sli4_brdreset(phba);
4331 spin_lock_irq(&phba->hbalock);
4332 phba->pport->stopped = 0;
4333 phba->link_state = LPFC_INIT_START;
4335 spin_unlock_irq(&phba->hbalock);
4337 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4338 psli->stats_start = get_seconds();
4340 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4341 if (hba_aer_enabled)
4342 pci_disable_pcie_error_reporting(phba->pcidev);
4344 lpfc_hba_down_post(phba);
4350 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4351 * @phba: Pointer to HBA context object.
4353 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4354 * API jump table function pointer from the lpfc_hba struct.
4357 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4359 return phba->lpfc_sli_brdrestart(phba);
4363 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4364 * @phba: Pointer to HBA context object.
4366 * This function is called after a HBA restart to wait for successful
4367 * restart of the HBA. Successful restart of the HBA is indicated by
4368 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4369 * iteration, the function will restart the HBA again. The function returns
4370 * zero if HBA successfully restarted else returns negative error code.
4373 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4375 uint32_t status, i = 0;
4377 /* Read the HBA Host Status Register */
4378 if (lpfc_readl(phba->HSregaddr, &status))
4381 /* Check status register to see what current state is */
4383 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4385 /* Check every 10ms for 10 retries, then every 100ms for 90
4386 * retries, then every 1 sec for 50 retires for a total of
4387 * ~60 seconds before reset the board again and check every
4388 * 1 sec for 50 retries. The up to 60 seconds before the
4389 * board ready is required by the Falcon FIPS zeroization
4390 * complete, and any reset the board in between shall cause
4391 * restart of zeroization, further delay the board ready.
4394 /* Adapter failed to init, timeout, status reg
4396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4397 "0436 Adapter failed to init, "
4398 "timeout, status reg x%x, "
4399 "FW Data: A8 x%x AC x%x\n", status,
4400 readl(phba->MBslimaddr + 0xa8),
4401 readl(phba->MBslimaddr + 0xac));
4402 phba->link_state = LPFC_HBA_ERROR;
4406 /* Check to see if any errors occurred during init */
4407 if (status & HS_FFERM) {
4408 /* ERROR: During chipset initialization */
4409 /* Adapter failed to init, chipset, status reg
4411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4412 "0437 Adapter failed to init, "
4413 "chipset, status reg x%x, "
4414 "FW Data: A8 x%x AC x%x\n", status,
4415 readl(phba->MBslimaddr + 0xa8),
4416 readl(phba->MBslimaddr + 0xac));
4417 phba->link_state = LPFC_HBA_ERROR;
4430 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4431 lpfc_sli_brdrestart(phba);
4433 /* Read the HBA Host Status Register */
4434 if (lpfc_readl(phba->HSregaddr, &status))
4438 /* Check to see if any errors occurred during init */
4439 if (status & HS_FFERM) {
4440 /* ERROR: During chipset initialization */
4441 /* Adapter failed to init, chipset, status reg <status> */
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "0438 Adapter failed to init, chipset, "
4445 "FW Data: A8 x%x AC x%x\n", status,
4446 readl(phba->MBslimaddr + 0xa8),
4447 readl(phba->MBslimaddr + 0xac));
4448 phba->link_state = LPFC_HBA_ERROR;
4452 /* Clear all interrupt enable conditions */
4453 writel(0, phba->HCregaddr);
4454 readl(phba->HCregaddr); /* flush */
4456 /* setup host attn register */
4457 writel(0xffffffff, phba->HAregaddr);
4458 readl(phba->HAregaddr); /* flush */
4463 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4465 * This function calculates and returns the number of HBQs required to be
4469 lpfc_sli_hbq_count(void)
4471 return ARRAY_SIZE(lpfc_hbq_defs);
4475 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4477 * This function adds the number of hbq entries in every HBQ to get
4478 * the total number of hbq entries required for the HBA and returns
4482 lpfc_sli_hbq_entry_count(void)
4484 int hbq_count = lpfc_sli_hbq_count();
4488 for (i = 0; i < hbq_count; ++i)
4489 count += lpfc_hbq_defs[i]->entry_count;
4494 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4496 * This function calculates amount of memory required for all hbq entries
4497 * to be configured and returns the total memory required.
4500 lpfc_sli_hbq_size(void)
4502 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4506 * lpfc_sli_hbq_setup - configure and initialize HBQs
4507 * @phba: Pointer to HBA context object.
4509 * This function is called during the SLI initialization to configure
4510 * all the HBQs and post buffers to the HBQ. The caller is not
4511 * required to hold any locks. This function will return zero if successful
4512 * else it will return negative error code.
4515 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4517 int hbq_count = lpfc_sli_hbq_count();
4521 uint32_t hbq_entry_index;
4523 /* Get a Mailbox buffer to setup mailbox
4524 * commands for HBA initialization
4526 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4533 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4534 phba->link_state = LPFC_INIT_MBX_CMDS;
4535 phba->hbq_in_use = 1;
4537 hbq_entry_index = 0;
4538 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4539 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4540 phba->hbqs[hbqno].hbqPutIdx = 0;
4541 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4542 phba->hbqs[hbqno].entry_count =
4543 lpfc_hbq_defs[hbqno]->entry_count;
4544 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4545 hbq_entry_index, pmb);
4546 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4548 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4549 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4550 mbxStatus <status>, ring <num> */
4552 lpfc_printf_log(phba, KERN_ERR,
4553 LOG_SLI | LOG_VPORT,
4554 "1805 Adapter failed to init. "
4555 "Data: x%x x%x x%x\n",
4557 pmbox->mbxStatus, hbqno);
4559 phba->link_state = LPFC_HBA_ERROR;
4560 mempool_free(pmb, phba->mbox_mem_pool);
4564 phba->hbq_count = hbq_count;
4566 mempool_free(pmb, phba->mbox_mem_pool);
4568 /* Initially populate or replenish the HBQs */
4569 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4570 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4575 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4576 * @phba: Pointer to HBA context object.
4578 * This function is called during the SLI initialization to configure
4579 * all the HBQs and post buffers to the HBQ. The caller is not
4580 * required to hold any locks. This function will return zero if successful
4581 * else it will return negative error code.
4584 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4586 phba->hbq_in_use = 1;
4587 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4588 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4589 phba->hbq_count = 1;
4590 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4591 /* Initially populate or replenish the HBQs */
4596 * lpfc_sli_config_port - Issue config port mailbox command
4597 * @phba: Pointer to HBA context object.
4598 * @sli_mode: sli mode - 2/3
4600 * This function is called by the sli intialization code path
4601 * to issue config_port mailbox command. This function restarts the
4602 * HBA firmware and issues a config_port mailbox command to configure
4603 * the SLI interface in the sli mode specified by sli_mode
4604 * variable. The caller is not required to hold any locks.
4605 * The function returns 0 if successful, else returns negative error
4609 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4612 uint32_t resetcount = 0, rc = 0, done = 0;
4614 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4616 phba->link_state = LPFC_HBA_ERROR;
4620 phba->sli_rev = sli_mode;
4621 while (resetcount < 2 && !done) {
4622 spin_lock_irq(&phba->hbalock);
4623 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4624 spin_unlock_irq(&phba->hbalock);
4625 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4626 lpfc_sli_brdrestart(phba);
4627 rc = lpfc_sli_chipset_init(phba);
4631 spin_lock_irq(&phba->hbalock);
4632 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4633 spin_unlock_irq(&phba->hbalock);
4636 /* Call pre CONFIG_PORT mailbox command initialization. A
4637 * value of 0 means the call was successful. Any other
4638 * nonzero value is a failure, but if ERESTART is returned,
4639 * the driver may reset the HBA and try again.
4641 rc = lpfc_config_port_prep(phba);
4642 if (rc == -ERESTART) {
4643 phba->link_state = LPFC_LINK_UNKNOWN;
4648 phba->link_state = LPFC_INIT_MBX_CMDS;
4649 lpfc_config_port(phba, pmb);
4650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4651 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4652 LPFC_SLI3_HBQ_ENABLED |
4653 LPFC_SLI3_CRP_ENABLED |
4654 LPFC_SLI3_BG_ENABLED |
4655 LPFC_SLI3_DSS_ENABLED);
4656 if (rc != MBX_SUCCESS) {
4657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4658 "0442 Adapter failed to init, mbxCmd x%x "
4659 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4660 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4661 spin_lock_irq(&phba->hbalock);
4662 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4663 spin_unlock_irq(&phba->hbalock);
4666 /* Allow asynchronous mailbox command to go through */
4667 spin_lock_irq(&phba->hbalock);
4668 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4669 spin_unlock_irq(&phba->hbalock);
4672 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4673 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4674 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4675 "3110 Port did not grant ASABT\n");
4680 goto do_prep_failed;
4682 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4683 if (!pmb->u.mb.un.varCfgPort.cMA) {
4685 goto do_prep_failed;
4687 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4688 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4689 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4690 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4691 phba->max_vpi : phba->max_vports;
4695 phba->fips_level = 0;
4696 phba->fips_spec_rev = 0;
4697 if (pmb->u.mb.un.varCfgPort.gdss) {
4698 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4699 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4700 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4702 "2850 Security Crypto Active. FIPS x%d "
4704 phba->fips_level, phba->fips_spec_rev);
4706 if (pmb->u.mb.un.varCfgPort.sec_err) {
4707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4708 "2856 Config Port Security Crypto "
4710 pmb->u.mb.un.varCfgPort.sec_err);
4712 if (pmb->u.mb.un.varCfgPort.gerbm)
4713 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4714 if (pmb->u.mb.un.varCfgPort.gcrp)
4715 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4717 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4718 phba->port_gp = phba->mbox->us.s3_pgp.port;
4720 if (phba->cfg_enable_bg) {
4721 if (pmb->u.mb.un.varCfgPort.gbg)
4722 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4725 "0443 Adapter did not grant "
4729 phba->hbq_get = NULL;
4730 phba->port_gp = phba->mbox->us.s2.port;
4734 mempool_free(pmb, phba->mbox_mem_pool);
4740 * lpfc_sli_hba_setup - SLI intialization function
4741 * @phba: Pointer to HBA context object.
4743 * This function is the main SLI intialization function. This function
4744 * is called by the HBA intialization code, HBA reset code and HBA
4745 * error attention handler code. Caller is not required to hold any
4746 * locks. This function issues config_port mailbox command to configure
4747 * the SLI, setup iocb rings and HBQ rings. In the end the function
4748 * calls the config_port_post function to issue init_link mailbox
4749 * command and to start the discovery. The function will return zero
4750 * if successful, else it will return negative error code.
4753 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4759 switch (phba->cfg_sli_mode) {
4761 if (phba->cfg_enable_npiv) {
4762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4763 "1824 NPIV enabled: Override sli_mode "
4764 "parameter (%d) to auto (0).\n",
4765 phba->cfg_sli_mode);
4774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4775 "1819 Unrecognized sli_mode parameter: %d.\n",
4776 phba->cfg_sli_mode);
4780 phba->fcp_embed_io = 0; /* SLI4 FC support only */
4782 rc = lpfc_sli_config_port(phba, mode);
4784 if (rc && phba->cfg_sli_mode == 3)
4785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4786 "1820 Unable to select SLI-3. "
4787 "Not supported by adapter.\n");
4788 if (rc && mode != 2)
4789 rc = lpfc_sli_config_port(phba, 2);
4790 else if (rc && mode == 2)
4791 rc = lpfc_sli_config_port(phba, 3);
4793 goto lpfc_sli_hba_setup_error;
4795 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4796 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4797 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4800 "2709 This device supports "
4801 "Advanced Error Reporting (AER)\n");
4802 spin_lock_irq(&phba->hbalock);
4803 phba->hba_flag |= HBA_AER_ENABLED;
4804 spin_unlock_irq(&phba->hbalock);
4806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4807 "2708 This device does not support "
4808 "Advanced Error Reporting (AER): %d\n",
4810 phba->cfg_aer_support = 0;
4814 if (phba->sli_rev == 3) {
4815 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4816 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4818 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4819 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4820 phba->sli3_options = 0;
4823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4824 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4825 phba->sli_rev, phba->max_vpi);
4826 rc = lpfc_sli_ring_map(phba);
4829 goto lpfc_sli_hba_setup_error;
4831 /* Initialize VPIs. */
4832 if (phba->sli_rev == LPFC_SLI_REV3) {
4834 * The VPI bitmask and physical ID array are allocated
4835 * and initialized once only - at driver load. A port
4836 * reset doesn't need to reinitialize this memory.
4838 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4839 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4840 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4842 if (!phba->vpi_bmask) {
4844 goto lpfc_sli_hba_setup_error;
4847 phba->vpi_ids = kzalloc(
4848 (phba->max_vpi+1) * sizeof(uint16_t),
4850 if (!phba->vpi_ids) {
4851 kfree(phba->vpi_bmask);
4853 goto lpfc_sli_hba_setup_error;
4855 for (i = 0; i < phba->max_vpi; i++)
4856 phba->vpi_ids[i] = i;
4861 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4862 rc = lpfc_sli_hbq_setup(phba);
4864 goto lpfc_sli_hba_setup_error;
4866 spin_lock_irq(&phba->hbalock);
4867 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4868 spin_unlock_irq(&phba->hbalock);
4870 rc = lpfc_config_port_post(phba);
4872 goto lpfc_sli_hba_setup_error;
4876 lpfc_sli_hba_setup_error:
4877 phba->link_state = LPFC_HBA_ERROR;
4878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4879 "0445 Firmware initialization failed\n");
4884 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4885 * @phba: Pointer to HBA context object.
4886 * @mboxq: mailbox pointer.
4887 * This function issue a dump mailbox command to read config region
4888 * 23 and parse the records in the region and populate driver
4892 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4894 LPFC_MBOXQ_t *mboxq;
4895 struct lpfc_dmabuf *mp;
4896 struct lpfc_mqe *mqe;
4897 uint32_t data_length;
4900 /* Program the default value of vlan_id and fc_map */
4901 phba->valid_vlan = 0;
4902 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4903 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4904 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4906 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4910 mqe = &mboxq->u.mqe;
4911 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4913 goto out_free_mboxq;
4916 mp = (struct lpfc_dmabuf *) mboxq->context1;
4917 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4919 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4920 "(%d):2571 Mailbox cmd x%x Status x%x "
4921 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4922 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4923 "CQ: x%x x%x x%x x%x\n",
4924 mboxq->vport ? mboxq->vport->vpi : 0,
4925 bf_get(lpfc_mqe_command, mqe),
4926 bf_get(lpfc_mqe_status, mqe),
4927 mqe->un.mb_words[0], mqe->un.mb_words[1],
4928 mqe->un.mb_words[2], mqe->un.mb_words[3],
4929 mqe->un.mb_words[4], mqe->un.mb_words[5],
4930 mqe->un.mb_words[6], mqe->un.mb_words[7],
4931 mqe->un.mb_words[8], mqe->un.mb_words[9],
4932 mqe->un.mb_words[10], mqe->un.mb_words[11],
4933 mqe->un.mb_words[12], mqe->un.mb_words[13],
4934 mqe->un.mb_words[14], mqe->un.mb_words[15],
4935 mqe->un.mb_words[16], mqe->un.mb_words[50],
4937 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4938 mboxq->mcqe.trailer);
4941 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4944 goto out_free_mboxq;
4946 data_length = mqe->un.mb_words[5];
4947 if (data_length > DMP_RGN23_SIZE) {
4948 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4951 goto out_free_mboxq;
4954 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4955 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4960 mempool_free(mboxq, phba->mbox_mem_pool);
4965 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4966 * @phba: pointer to lpfc hba data structure.
4967 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4968 * @vpd: pointer to the memory to hold resulting port vpd data.
4969 * @vpd_size: On input, the number of bytes allocated to @vpd.
4970 * On output, the number of data bytes in @vpd.
4972 * This routine executes a READ_REV SLI4 mailbox command. In
4973 * addition, this routine gets the port vpd data.
4977 * -ENOMEM - could not allocated memory.
4980 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4981 uint8_t *vpd, uint32_t *vpd_size)
4985 struct lpfc_dmabuf *dmabuf;
4986 struct lpfc_mqe *mqe;
4988 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4993 * Get a DMA buffer for the vpd data resulting from the READ_REV
4996 dma_size = *vpd_size;
4997 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4998 &dmabuf->phys, GFP_KERNEL);
4999 if (!dmabuf->virt) {
5005 * The SLI4 implementation of READ_REV conflicts at word1,
5006 * bits 31:16 and SLI4 adds vpd functionality not present
5007 * in SLI3. This code corrects the conflicts.
5009 lpfc_read_rev(phba, mboxq);
5010 mqe = &mboxq->u.mqe;
5011 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5012 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5013 mqe->un.read_rev.word1 &= 0x0000FFFF;
5014 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5015 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5017 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5019 dma_free_coherent(&phba->pcidev->dev, dma_size,
5020 dmabuf->virt, dmabuf->phys);
5026 * The available vpd length cannot be bigger than the
5027 * DMA buffer passed to the port. Catch the less than
5028 * case and update the caller's size.
5030 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5031 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5033 memcpy(vpd, dmabuf->virt, *vpd_size);
5035 dma_free_coherent(&phba->pcidev->dev, dma_size,
5036 dmabuf->virt, dmabuf->phys);
5042 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5043 * @phba: pointer to lpfc hba data structure.
5045 * This routine retrieves SLI4 device physical port name this PCI function
5050 * otherwise - failed to retrieve physical port name
5053 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5055 LPFC_MBOXQ_t *mboxq;
5056 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5057 struct lpfc_controller_attribute *cntl_attr;
5058 struct lpfc_mbx_get_port_name *get_port_name;
5059 void *virtaddr = NULL;
5060 uint32_t alloclen, reqlen;
5061 uint32_t shdr_status, shdr_add_status;
5062 union lpfc_sli4_cfg_shdr *shdr;
5063 char cport_name = 0;
5066 /* We assume nothing at this point */
5067 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5068 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5070 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5073 /* obtain link type and link number via READ_CONFIG */
5074 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5075 lpfc_sli4_read_config(phba);
5076 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5077 goto retrieve_ppname;
5079 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5080 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5081 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5082 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5083 LPFC_SLI4_MBX_NEMBED);
5084 if (alloclen < reqlen) {
5085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5086 "3084 Allocated DMA memory size (%d) is "
5087 "less than the requested DMA memory size "
5088 "(%d)\n", alloclen, reqlen);
5090 goto out_free_mboxq;
5092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5093 virtaddr = mboxq->sge_array->addr[0];
5094 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5095 shdr = &mbx_cntl_attr->cfg_shdr;
5096 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5097 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5098 if (shdr_status || shdr_add_status || rc) {
5099 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5100 "3085 Mailbox x%x (x%x/x%x) failed, "
5101 "rc:x%x, status:x%x, add_status:x%x\n",
5102 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5103 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5104 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5105 rc, shdr_status, shdr_add_status);
5107 goto out_free_mboxq;
5109 cntl_attr = &mbx_cntl_attr->cntl_attr;
5110 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5111 phba->sli4_hba.lnk_info.lnk_tp =
5112 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5113 phba->sli4_hba.lnk_info.lnk_no =
5114 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5115 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5116 "3086 lnk_type:%d, lnk_numb:%d\n",
5117 phba->sli4_hba.lnk_info.lnk_tp,
5118 phba->sli4_hba.lnk_info.lnk_no);
5121 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5122 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5123 sizeof(struct lpfc_mbx_get_port_name) -
5124 sizeof(struct lpfc_sli4_cfg_mhdr),
5125 LPFC_SLI4_MBX_EMBED);
5126 get_port_name = &mboxq->u.mqe.un.get_port_name;
5127 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5128 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5129 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5130 phba->sli4_hba.lnk_info.lnk_tp);
5131 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5134 if (shdr_status || shdr_add_status || rc) {
5135 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5136 "3087 Mailbox x%x (x%x/x%x) failed: "
5137 "rc:x%x, status:x%x, add_status:x%x\n",
5138 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5139 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5140 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5141 rc, shdr_status, shdr_add_status);
5143 goto out_free_mboxq;
5145 switch (phba->sli4_hba.lnk_info.lnk_no) {
5146 case LPFC_LINK_NUMBER_0:
5147 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5148 &get_port_name->u.response);
5149 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5151 case LPFC_LINK_NUMBER_1:
5152 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5153 &get_port_name->u.response);
5154 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5156 case LPFC_LINK_NUMBER_2:
5157 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5158 &get_port_name->u.response);
5159 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5161 case LPFC_LINK_NUMBER_3:
5162 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5163 &get_port_name->u.response);
5164 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5170 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5171 phba->Port[0] = cport_name;
5172 phba->Port[1] = '\0';
5173 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5174 "3091 SLI get port name: %s\n", phba->Port);
5178 if (rc != MBX_TIMEOUT) {
5179 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5180 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5182 mempool_free(mboxq, phba->mbox_mem_pool);
5188 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5189 * @phba: pointer to lpfc hba data structure.
5191 * This routine is called to explicitly arm the SLI4 device's completion and
5195 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5199 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5200 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5201 if (phba->sli4_hba.nvmels_cq)
5202 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5205 if (phba->sli4_hba.fcp_cq)
5206 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5207 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5210 if (phba->sli4_hba.nvme_cq)
5211 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5212 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5216 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5218 if (phba->sli4_hba.hba_eq)
5219 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5220 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5224 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5228 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5229 * @phba: Pointer to HBA context object.
5230 * @type: The resource extent type.
5231 * @extnt_count: buffer to hold port available extent count.
5232 * @extnt_size: buffer to hold element count per extent.
5234 * This function calls the port and retrievs the number of available
5235 * extents and their size for a particular extent type.
5237 * Returns: 0 if successful. Nonzero otherwise.
5240 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5241 uint16_t *extnt_count, uint16_t *extnt_size)
5246 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5249 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5253 /* Find out how many extents are available for this resource type */
5254 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5255 sizeof(struct lpfc_sli4_cfg_mhdr));
5256 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5257 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5258 length, LPFC_SLI4_MBX_EMBED);
5260 /* Send an extents count of 0 - the GET doesn't use it. */
5261 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5262 LPFC_SLI4_MBX_EMBED);
5268 if (!phba->sli4_hba.intr_enable)
5269 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5271 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5272 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5279 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5280 if (bf_get(lpfc_mbox_hdr_status,
5281 &rsrc_info->header.cfg_shdr.response)) {
5282 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5283 "2930 Failed to get resource extents "
5284 "Status 0x%x Add'l Status 0x%x\n",
5285 bf_get(lpfc_mbox_hdr_status,
5286 &rsrc_info->header.cfg_shdr.response),
5287 bf_get(lpfc_mbox_hdr_add_status,
5288 &rsrc_info->header.cfg_shdr.response));
5293 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5295 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5298 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5299 "3162 Retrieved extents type-%d from port: count:%d, "
5300 "size:%d\n", type, *extnt_count, *extnt_size);
5303 mempool_free(mbox, phba->mbox_mem_pool);
5308 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5309 * @phba: Pointer to HBA context object.
5310 * @type: The extent type to check.
5312 * This function reads the current available extents from the port and checks
5313 * if the extent count or extent size has changed since the last access.
5314 * Callers use this routine post port reset to understand if there is a
5315 * extent reprovisioning requirement.
5318 * -Error: error indicates problem.
5319 * 1: Extent count or size has changed.
5323 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5325 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5326 uint16_t size_diff, rsrc_ext_size;
5328 struct lpfc_rsrc_blks *rsrc_entry;
5329 struct list_head *rsrc_blk_list = NULL;
5333 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5340 case LPFC_RSC_TYPE_FCOE_RPI:
5341 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5343 case LPFC_RSC_TYPE_FCOE_VPI:
5344 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5346 case LPFC_RSC_TYPE_FCOE_XRI:
5347 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5349 case LPFC_RSC_TYPE_FCOE_VFI:
5350 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5356 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5358 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5362 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5369 * lpfc_sli4_cfg_post_extnts -
5370 * @phba: Pointer to HBA context object.
5371 * @extnt_cnt - number of available extents.
5372 * @type - the extent type (rpi, xri, vfi, vpi).
5373 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5374 * @mbox - pointer to the caller's allocated mailbox structure.
5376 * This function executes the extents allocation request. It also
5377 * takes care of the amount of memory needed to allocate or get the
5378 * allocated extents. It is the caller's responsibility to evaluate
5382 * -Error: Error value describes the condition found.
5386 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5387 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5392 uint32_t alloc_len, mbox_tmo;
5394 /* Calculate the total requested length of the dma memory */
5395 req_len = extnt_cnt * sizeof(uint16_t);
5398 * Calculate the size of an embedded mailbox. The uint32_t
5399 * accounts for extents-specific word.
5401 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5405 * Presume the allocation and response will fit into an embedded
5406 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5408 *emb = LPFC_SLI4_MBX_EMBED;
5409 if (req_len > emb_len) {
5410 req_len = extnt_cnt * sizeof(uint16_t) +
5411 sizeof(union lpfc_sli4_cfg_shdr) +
5413 *emb = LPFC_SLI4_MBX_NEMBED;
5416 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5417 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5419 if (alloc_len < req_len) {
5420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5421 "2982 Allocated DMA memory size (x%x) is "
5422 "less than the requested DMA memory "
5423 "size (x%x)\n", alloc_len, req_len);
5426 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5430 if (!phba->sli4_hba.intr_enable)
5431 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5433 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5434 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5443 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5444 * @phba: Pointer to HBA context object.
5445 * @type: The resource extent type to allocate.
5447 * This function allocates the number of elements for the specified
5451 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5454 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5455 uint16_t rsrc_id, rsrc_start, j, k;
5458 unsigned long longs;
5459 unsigned long *bmask;
5460 struct lpfc_rsrc_blks *rsrc_blks;
5463 struct lpfc_id_range *id_array = NULL;
5464 void *virtaddr = NULL;
5465 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5466 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5467 struct list_head *ext_blk_list;
5469 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5475 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5476 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5477 "3009 No available Resource Extents "
5478 "for resource type 0x%x: Count: 0x%x, "
5479 "Size 0x%x\n", type, rsrc_cnt,
5484 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5485 "2903 Post resource extents type-0x%x: "
5486 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5488 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5492 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5499 * Figure out where the response is located. Then get local pointers
5500 * to the response data. The port does not guarantee to respond to
5501 * all extents counts request so update the local variable with the
5502 * allocated count from the port.
5504 if (emb == LPFC_SLI4_MBX_EMBED) {
5505 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5506 id_array = &rsrc_ext->u.rsp.id[0];
5507 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5509 virtaddr = mbox->sge_array->addr[0];
5510 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5511 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5512 id_array = &n_rsrc->id;
5515 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5516 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5519 * Based on the resource size and count, correct the base and max
5522 length = sizeof(struct lpfc_rsrc_blks);
5524 case LPFC_RSC_TYPE_FCOE_RPI:
5525 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5526 sizeof(unsigned long),
5528 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5532 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5535 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5536 kfree(phba->sli4_hba.rpi_bmask);
5542 * The next_rpi was initialized with the maximum available
5543 * count but the port may allocate a smaller number. Catch
5544 * that case and update the next_rpi.
5546 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5548 /* Initialize local ptrs for common extent processing later. */
5549 bmask = phba->sli4_hba.rpi_bmask;
5550 ids = phba->sli4_hba.rpi_ids;
5551 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5553 case LPFC_RSC_TYPE_FCOE_VPI:
5554 phba->vpi_bmask = kzalloc(longs *
5555 sizeof(unsigned long),
5557 if (unlikely(!phba->vpi_bmask)) {
5561 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5564 if (unlikely(!phba->vpi_ids)) {
5565 kfree(phba->vpi_bmask);
5570 /* Initialize local ptrs for common extent processing later. */
5571 bmask = phba->vpi_bmask;
5572 ids = phba->vpi_ids;
5573 ext_blk_list = &phba->lpfc_vpi_blk_list;
5575 case LPFC_RSC_TYPE_FCOE_XRI:
5576 phba->sli4_hba.xri_bmask = kzalloc(longs *
5577 sizeof(unsigned long),
5579 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5583 phba->sli4_hba.max_cfg_param.xri_used = 0;
5584 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5587 if (unlikely(!phba->sli4_hba.xri_ids)) {
5588 kfree(phba->sli4_hba.xri_bmask);
5593 /* Initialize local ptrs for common extent processing later. */
5594 bmask = phba->sli4_hba.xri_bmask;
5595 ids = phba->sli4_hba.xri_ids;
5596 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5598 case LPFC_RSC_TYPE_FCOE_VFI:
5599 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5600 sizeof(unsigned long),
5602 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5606 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5609 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5610 kfree(phba->sli4_hba.vfi_bmask);
5615 /* Initialize local ptrs for common extent processing later. */
5616 bmask = phba->sli4_hba.vfi_bmask;
5617 ids = phba->sli4_hba.vfi_ids;
5618 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5621 /* Unsupported Opcode. Fail call. */
5625 ext_blk_list = NULL;
5630 * Complete initializing the extent configuration with the
5631 * allocated ids assigned to this function. The bitmask serves
5632 * as an index into the array and manages the available ids. The
5633 * array just stores the ids communicated to the port via the wqes.
5635 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5637 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5640 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5643 rsrc_blks = kzalloc(length, GFP_KERNEL);
5644 if (unlikely(!rsrc_blks)) {
5650 rsrc_blks->rsrc_start = rsrc_id;
5651 rsrc_blks->rsrc_size = rsrc_size;
5652 list_add_tail(&rsrc_blks->list, ext_blk_list);
5653 rsrc_start = rsrc_id;
5654 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5655 phba->sli4_hba.scsi_xri_start = rsrc_start +
5656 lpfc_sli4_get_iocb_cnt(phba);
5657 phba->sli4_hba.nvme_xri_start =
5658 phba->sli4_hba.scsi_xri_start +
5659 phba->sli4_hba.scsi_xri_max;
5662 while (rsrc_id < (rsrc_start + rsrc_size)) {
5667 /* Entire word processed. Get next word.*/
5672 lpfc_sli4_mbox_cmd_free(phba, mbox);
5679 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5680 * @phba: Pointer to HBA context object.
5681 * @type: the extent's type.
5683 * This function deallocates all extents of a particular resource type.
5684 * SLI4 does not allow for deallocating a particular extent range. It
5685 * is the caller's responsibility to release all kernel memory resources.
5688 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5691 uint32_t length, mbox_tmo = 0;
5693 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5694 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5696 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5701 * This function sends an embedded mailbox because it only sends the
5702 * the resource type. All extents of this type are released by the
5705 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5706 sizeof(struct lpfc_sli4_cfg_mhdr));
5707 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5708 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5709 length, LPFC_SLI4_MBX_EMBED);
5711 /* Send an extents count of 0 - the dealloc doesn't use it. */
5712 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5713 LPFC_SLI4_MBX_EMBED);
5718 if (!phba->sli4_hba.intr_enable)
5719 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5721 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5722 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5729 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5730 if (bf_get(lpfc_mbox_hdr_status,
5731 &dealloc_rsrc->header.cfg_shdr.response)) {
5732 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5733 "2919 Failed to release resource extents "
5734 "for type %d - Status 0x%x Add'l Status 0x%x. "
5735 "Resource memory not released.\n",
5737 bf_get(lpfc_mbox_hdr_status,
5738 &dealloc_rsrc->header.cfg_shdr.response),
5739 bf_get(lpfc_mbox_hdr_add_status,
5740 &dealloc_rsrc->header.cfg_shdr.response));
5745 /* Release kernel memory resources for the specific type. */
5747 case LPFC_RSC_TYPE_FCOE_VPI:
5748 kfree(phba->vpi_bmask);
5749 kfree(phba->vpi_ids);
5750 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5751 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5752 &phba->lpfc_vpi_blk_list, list) {
5753 list_del_init(&rsrc_blk->list);
5756 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5758 case LPFC_RSC_TYPE_FCOE_XRI:
5759 kfree(phba->sli4_hba.xri_bmask);
5760 kfree(phba->sli4_hba.xri_ids);
5761 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5762 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5763 list_del_init(&rsrc_blk->list);
5767 case LPFC_RSC_TYPE_FCOE_VFI:
5768 kfree(phba->sli4_hba.vfi_bmask);
5769 kfree(phba->sli4_hba.vfi_ids);
5770 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5771 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5772 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5773 list_del_init(&rsrc_blk->list);
5777 case LPFC_RSC_TYPE_FCOE_RPI:
5778 /* RPI bitmask and physical id array are cleaned up earlier. */
5779 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5780 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5781 list_del_init(&rsrc_blk->list);
5789 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5792 mempool_free(mbox, phba->mbox_mem_pool);
5797 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5802 len = sizeof(struct lpfc_mbx_set_feature) -
5803 sizeof(struct lpfc_sli4_cfg_mhdr);
5804 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5805 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5806 LPFC_SLI4_MBX_EMBED);
5809 case LPFC_SET_UE_RECOVERY:
5810 bf_set(lpfc_mbx_set_feature_UER,
5811 &mbox->u.mqe.un.set_feature, 1);
5812 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5813 mbox->u.mqe.un.set_feature.param_len = 8;
5815 case LPFC_SET_MDS_DIAGS:
5816 bf_set(lpfc_mbx_set_feature_mds,
5817 &mbox->u.mqe.un.set_feature, 1);
5818 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5819 &mbox->u.mqe.un.set_feature, 0);
5820 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5821 mbox->u.mqe.un.set_feature.param_len = 8;
5829 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5830 * @phba: Pointer to HBA context object.
5832 * This function allocates all SLI4 resource identifiers.
5835 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5837 int i, rc, error = 0;
5838 uint16_t count, base;
5839 unsigned long longs;
5841 if (!phba->sli4_hba.rpi_hdrs_in_use)
5842 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5843 if (phba->sli4_hba.extents_in_use) {
5845 * The port supports resource extents. The XRI, VPI, VFI, RPI
5846 * resource extent count must be read and allocated before
5847 * provisioning the resource id arrays.
5849 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5850 LPFC_IDX_RSRC_RDY) {
5852 * Extent-based resources are set - the driver could
5853 * be in a port reset. Figure out if any corrective
5854 * actions need to be taken.
5856 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5857 LPFC_RSC_TYPE_FCOE_VFI);
5860 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5861 LPFC_RSC_TYPE_FCOE_VPI);
5864 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5865 LPFC_RSC_TYPE_FCOE_XRI);
5868 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5869 LPFC_RSC_TYPE_FCOE_RPI);
5874 * It's possible that the number of resources
5875 * provided to this port instance changed between
5876 * resets. Detect this condition and reallocate
5877 * resources. Otherwise, there is no action.
5880 lpfc_printf_log(phba, KERN_INFO,
5881 LOG_MBOX | LOG_INIT,
5882 "2931 Detected extent resource "
5883 "change. Reallocating all "
5885 rc = lpfc_sli4_dealloc_extent(phba,
5886 LPFC_RSC_TYPE_FCOE_VFI);
5887 rc = lpfc_sli4_dealloc_extent(phba,
5888 LPFC_RSC_TYPE_FCOE_VPI);
5889 rc = lpfc_sli4_dealloc_extent(phba,
5890 LPFC_RSC_TYPE_FCOE_XRI);
5891 rc = lpfc_sli4_dealloc_extent(phba,
5892 LPFC_RSC_TYPE_FCOE_RPI);
5897 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5901 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5905 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5909 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5912 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5917 * The port does not support resource extents. The XRI, VPI,
5918 * VFI, RPI resource ids were determined from READ_CONFIG.
5919 * Just allocate the bitmasks and provision the resource id
5920 * arrays. If a port reset is active, the resources don't
5921 * need any action - just exit.
5923 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5924 LPFC_IDX_RSRC_RDY) {
5925 lpfc_sli4_dealloc_resource_identifiers(phba);
5926 lpfc_sli4_remove_rpis(phba);
5929 count = phba->sli4_hba.max_cfg_param.max_rpi;
5931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5932 "3279 Invalid provisioning of "
5937 base = phba->sli4_hba.max_cfg_param.rpi_base;
5938 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5939 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5940 sizeof(unsigned long),
5942 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5946 phba->sli4_hba.rpi_ids = kzalloc(count *
5949 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5951 goto free_rpi_bmask;
5954 for (i = 0; i < count; i++)
5955 phba->sli4_hba.rpi_ids[i] = base + i;
5958 count = phba->sli4_hba.max_cfg_param.max_vpi;
5960 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5961 "3280 Invalid provisioning of "
5966 base = phba->sli4_hba.max_cfg_param.vpi_base;
5967 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5968 phba->vpi_bmask = kzalloc(longs *
5969 sizeof(unsigned long),
5971 if (unlikely(!phba->vpi_bmask)) {
5975 phba->vpi_ids = kzalloc(count *
5978 if (unlikely(!phba->vpi_ids)) {
5980 goto free_vpi_bmask;
5983 for (i = 0; i < count; i++)
5984 phba->vpi_ids[i] = base + i;
5987 count = phba->sli4_hba.max_cfg_param.max_xri;
5989 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5990 "3281 Invalid provisioning of "
5995 base = phba->sli4_hba.max_cfg_param.xri_base;
5996 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5997 phba->sli4_hba.xri_bmask = kzalloc(longs *
5998 sizeof(unsigned long),
6000 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6004 phba->sli4_hba.max_cfg_param.xri_used = 0;
6005 phba->sli4_hba.xri_ids = kzalloc(count *
6008 if (unlikely(!phba->sli4_hba.xri_ids)) {
6010 goto free_xri_bmask;
6013 for (i = 0; i < count; i++)
6014 phba->sli4_hba.xri_ids[i] = base + i;
6017 count = phba->sli4_hba.max_cfg_param.max_vfi;
6019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6020 "3282 Invalid provisioning of "
6025 base = phba->sli4_hba.max_cfg_param.vfi_base;
6026 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6027 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6028 sizeof(unsigned long),
6030 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6034 phba->sli4_hba.vfi_ids = kzalloc(count *
6037 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6039 goto free_vfi_bmask;
6042 for (i = 0; i < count; i++)
6043 phba->sli4_hba.vfi_ids[i] = base + i;
6046 * Mark all resources ready. An HBA reset doesn't need
6047 * to reset the initialization.
6049 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6055 kfree(phba->sli4_hba.vfi_bmask);
6056 phba->sli4_hba.vfi_bmask = NULL;
6058 kfree(phba->sli4_hba.xri_ids);
6059 phba->sli4_hba.xri_ids = NULL;
6061 kfree(phba->sli4_hba.xri_bmask);
6062 phba->sli4_hba.xri_bmask = NULL;
6064 kfree(phba->vpi_ids);
6065 phba->vpi_ids = NULL;
6067 kfree(phba->vpi_bmask);
6068 phba->vpi_bmask = NULL;
6070 kfree(phba->sli4_hba.rpi_ids);
6071 phba->sli4_hba.rpi_ids = NULL;
6073 kfree(phba->sli4_hba.rpi_bmask);
6074 phba->sli4_hba.rpi_bmask = NULL;
6080 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6081 * @phba: Pointer to HBA context object.
6083 * This function allocates the number of elements for the specified
6087 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6089 if (phba->sli4_hba.extents_in_use) {
6090 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6091 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6092 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6093 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6095 kfree(phba->vpi_bmask);
6096 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6097 kfree(phba->vpi_ids);
6098 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6099 kfree(phba->sli4_hba.xri_bmask);
6100 kfree(phba->sli4_hba.xri_ids);
6101 kfree(phba->sli4_hba.vfi_bmask);
6102 kfree(phba->sli4_hba.vfi_ids);
6103 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6104 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6111 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6112 * @phba: Pointer to HBA context object.
6113 * @type: The resource extent type.
6114 * @extnt_count: buffer to hold port extent count response
6115 * @extnt_size: buffer to hold port extent size response.
6117 * This function calls the port to read the host allocated extents
6118 * for a particular type.
6121 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6122 uint16_t *extnt_cnt, uint16_t *extnt_size)
6126 uint16_t curr_blks = 0;
6127 uint32_t req_len, emb_len;
6128 uint32_t alloc_len, mbox_tmo;
6129 struct list_head *blk_list_head;
6130 struct lpfc_rsrc_blks *rsrc_blk;
6132 void *virtaddr = NULL;
6133 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6134 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6135 union lpfc_sli4_cfg_shdr *shdr;
6138 case LPFC_RSC_TYPE_FCOE_VPI:
6139 blk_list_head = &phba->lpfc_vpi_blk_list;
6141 case LPFC_RSC_TYPE_FCOE_XRI:
6142 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6144 case LPFC_RSC_TYPE_FCOE_VFI:
6145 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6147 case LPFC_RSC_TYPE_FCOE_RPI:
6148 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6154 /* Count the number of extents currently allocatd for this type. */
6155 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6156 if (curr_blks == 0) {
6158 * The GET_ALLOCATED mailbox does not return the size,
6159 * just the count. The size should be just the size
6160 * stored in the current allocated block and all sizes
6161 * for an extent type are the same so set the return
6164 *extnt_size = rsrc_blk->rsrc_size;
6170 * Calculate the size of an embedded mailbox. The uint32_t
6171 * accounts for extents-specific word.
6173 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6177 * Presume the allocation and response will fit into an embedded
6178 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6180 emb = LPFC_SLI4_MBX_EMBED;
6182 if (req_len > emb_len) {
6183 req_len = curr_blks * sizeof(uint16_t) +
6184 sizeof(union lpfc_sli4_cfg_shdr) +
6186 emb = LPFC_SLI4_MBX_NEMBED;
6189 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6192 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6194 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6195 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6197 if (alloc_len < req_len) {
6198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6199 "2983 Allocated DMA memory size (x%x) is "
6200 "less than the requested DMA memory "
6201 "size (x%x)\n", alloc_len, req_len);
6205 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6211 if (!phba->sli4_hba.intr_enable)
6212 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6214 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6215 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6224 * Figure out where the response is located. Then get local pointers
6225 * to the response data. The port does not guarantee to respond to
6226 * all extents counts request so update the local variable with the
6227 * allocated count from the port.
6229 if (emb == LPFC_SLI4_MBX_EMBED) {
6230 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6231 shdr = &rsrc_ext->header.cfg_shdr;
6232 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6234 virtaddr = mbox->sge_array->addr[0];
6235 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6236 shdr = &n_rsrc->cfg_shdr;
6237 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6240 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6241 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6242 "2984 Failed to read allocated resources "
6243 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6245 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6246 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6251 lpfc_sli4_mbox_cmd_free(phba, mbox);
6256 * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block
6257 * @phba: pointer to lpfc hba data structure.
6258 * @pring: Pointer to driver SLI ring object.
6259 * @sgl_list: linked link of sgl buffers to post
6260 * @cnt: number of linked list buffers
6262 * This routine walks the list of buffers that have been allocated and
6263 * repost them to the port by using SGL block post. This is needed after a
6264 * pci_function_reset/warm_start or start. It attempts to construct blocks
6265 * of buffer sgls which contains contiguous xris and uses the non-embedded
6266 * SGL block post mailbox commands to post them to the port. For single
6267 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6268 * mailbox command for posting.
6270 * Returns: 0 = success, non-zero failure.
6273 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6274 struct list_head *sgl_list, int cnt)
6276 struct lpfc_sglq *sglq_entry = NULL;
6277 struct lpfc_sglq *sglq_entry_next = NULL;
6278 struct lpfc_sglq *sglq_entry_first = NULL;
6279 int status, total_cnt;
6280 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6281 int last_xritag = NO_XRI;
6282 LIST_HEAD(prep_sgl_list);
6283 LIST_HEAD(blck_sgl_list);
6284 LIST_HEAD(allc_sgl_list);
6285 LIST_HEAD(post_sgl_list);
6286 LIST_HEAD(free_sgl_list);
6288 spin_lock_irq(&phba->hbalock);
6289 spin_lock(&phba->sli4_hba.sgl_list_lock);
6290 list_splice_init(sgl_list, &allc_sgl_list);
6291 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6292 spin_unlock_irq(&phba->hbalock);
6295 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6296 &allc_sgl_list, list) {
6297 list_del_init(&sglq_entry->list);
6299 if ((last_xritag != NO_XRI) &&
6300 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6301 /* a hole in xri block, form a sgl posting block */
6302 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6303 post_cnt = block_cnt - 1;
6304 /* prepare list for next posting block */
6305 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6308 /* prepare list for next posting block */
6309 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6310 /* enough sgls for non-embed sgl mbox command */
6311 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6312 list_splice_init(&prep_sgl_list,
6314 post_cnt = block_cnt;
6320 /* keep track of last sgl's xritag */
6321 last_xritag = sglq_entry->sli4_xritag;
6323 /* end of repost sgl list condition for buffers */
6324 if (num_posted == total_cnt) {
6325 if (post_cnt == 0) {
6326 list_splice_init(&prep_sgl_list,
6328 post_cnt = block_cnt;
6329 } else if (block_cnt == 1) {
6330 status = lpfc_sli4_post_sgl(phba,
6331 sglq_entry->phys, 0,
6332 sglq_entry->sli4_xritag);
6334 /* successful, put sgl to posted list */
6335 list_add_tail(&sglq_entry->list,
6338 /* Failure, put sgl to free list */
6339 lpfc_printf_log(phba, KERN_WARNING,
6341 "3159 Failed to post "
6342 "sgl, xritag:x%x\n",
6343 sglq_entry->sli4_xritag);
6344 list_add_tail(&sglq_entry->list,
6351 /* continue until a nembed page worth of sgls */
6355 /* post the buffer list sgls as a block */
6356 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6360 /* success, put sgl list to posted sgl list */
6361 list_splice_init(&blck_sgl_list, &post_sgl_list);
6363 /* Failure, put sgl list to free sgl list */
6364 sglq_entry_first = list_first_entry(&blck_sgl_list,
6367 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6368 "3160 Failed to post sgl-list, "
6370 sglq_entry_first->sli4_xritag,
6371 (sglq_entry_first->sli4_xritag +
6373 list_splice_init(&blck_sgl_list, &free_sgl_list);
6374 total_cnt -= post_cnt;
6377 /* don't reset xirtag due to hole in xri block */
6379 last_xritag = NO_XRI;
6381 /* reset sgl post count for next round of posting */
6385 /* free the sgls failed to post */
6386 lpfc_free_sgl_list(phba, &free_sgl_list);
6388 /* push sgls posted to the available list */
6389 if (!list_empty(&post_sgl_list)) {
6390 spin_lock_irq(&phba->hbalock);
6391 spin_lock(&phba->sli4_hba.sgl_list_lock);
6392 list_splice_init(&post_sgl_list, sgl_list);
6393 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6394 spin_unlock_irq(&phba->hbalock);
6396 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6397 "3161 Failure to post sgl to port.\n");
6401 /* return the number of XRIs actually posted */
6406 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6410 len = sizeof(struct lpfc_mbx_set_host_data) -
6411 sizeof(struct lpfc_sli4_cfg_mhdr);
6412 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6413 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6414 LPFC_SLI4_MBX_EMBED);
6416 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6417 mbox->u.mqe.un.set_host_data.param_len =
6418 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6419 snprintf(mbox->u.mqe.un.set_host_data.data,
6420 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6421 "Linux %s v"LPFC_DRIVER_VERSION,
6422 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6426 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6427 * @phba: Pointer to HBA context object.
6429 * This function is the main SLI4 device intialization PCI function. This
6430 * function is called by the HBA intialization code, HBA reset code and
6431 * HBA error attention handler code. Caller is not required to hold any
6435 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6438 LPFC_MBOXQ_t *mboxq;
6439 struct lpfc_mqe *mqe;
6442 uint32_t ftr_rsp = 0;
6443 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6444 struct lpfc_vport *vport = phba->pport;
6445 struct lpfc_dmabuf *mp;
6447 /* Perform a PCI function reset to start from clean */
6448 rc = lpfc_pci_function_reset(phba);
6452 /* Check the HBA Host Status Register for readyness */
6453 rc = lpfc_sli4_post_status_check(phba);
6457 spin_lock_irq(&phba->hbalock);
6458 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6459 spin_unlock_irq(&phba->hbalock);
6463 * Allocate a single mailbox container for initializing the
6466 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6470 /* Issue READ_REV to collect vpd and FW information. */
6471 vpd_size = SLI4_PAGE_SIZE;
6472 vpd = kzalloc(vpd_size, GFP_KERNEL);
6478 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6484 mqe = &mboxq->u.mqe;
6485 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6486 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6487 phba->hba_flag |= HBA_FCOE_MODE;
6488 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6490 phba->hba_flag &= ~HBA_FCOE_MODE;
6493 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6495 phba->hba_flag |= HBA_FIP_SUPPORT;
6497 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6499 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6501 if (phba->sli_rev != LPFC_SLI_REV4) {
6502 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6503 "0376 READ_REV Error. SLI Level %d "
6504 "FCoE enabled %d\n",
6505 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6512 * Continue initialization with default values even if driver failed
6513 * to read FCoE param config regions, only read parameters if the
6516 if (phba->hba_flag & HBA_FCOE_MODE &&
6517 lpfc_sli4_read_fcoe_params(phba))
6518 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6519 "2570 Failed to read FCoE parameters\n");
6522 * Retrieve sli4 device physical port name, failure of doing it
6523 * is considered as non-fatal.
6525 rc = lpfc_sli4_retrieve_pport_name(phba);
6527 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6528 "3080 Successful retrieving SLI4 device "
6529 "physical port name: %s.\n", phba->Port);
6532 * Evaluate the read rev and vpd data. Populate the driver
6533 * state with the results. If this routine fails, the failure
6534 * is not fatal as the driver will use generic values.
6536 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6537 if (unlikely(!rc)) {
6538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6539 "0377 Error %d parsing vpd. "
6540 "Using defaults.\n", rc);
6545 /* Save information as VPD data */
6546 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6547 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6548 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6549 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6551 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6553 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6555 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6557 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6558 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6559 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6560 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6561 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6562 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6563 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6564 "(%d):0380 READ_REV Status x%x "
6565 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6566 mboxq->vport ? mboxq->vport->vpi : 0,
6567 bf_get(lpfc_mqe_status, mqe),
6568 phba->vpd.rev.opFwName,
6569 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6570 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6572 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6573 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6574 if (phba->pport->cfg_lun_queue_depth > rc) {
6575 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6576 "3362 LUN queue depth changed from %d to %d\n",
6577 phba->pport->cfg_lun_queue_depth, rc);
6578 phba->pport->cfg_lun_queue_depth = rc;
6581 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6582 LPFC_SLI_INTF_IF_TYPE_0) {
6583 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6585 if (rc == MBX_SUCCESS) {
6586 phba->hba_flag |= HBA_RECOVERABLE_UE;
6587 /* Set 1Sec interval to detect UE */
6588 phba->eratt_poll_interval = 1;
6589 phba->sli4_hba.ue_to_sr = bf_get(
6590 lpfc_mbx_set_feature_UESR,
6591 &mboxq->u.mqe.un.set_feature);
6592 phba->sli4_hba.ue_to_rp = bf_get(
6593 lpfc_mbx_set_feature_UERP,
6594 &mboxq->u.mqe.un.set_feature);
6598 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6599 /* Enable MDS Diagnostics only if the SLI Port supports it */
6600 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6601 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6602 if (rc != MBX_SUCCESS)
6603 phba->mds_diags_support = 0;
6607 * Discover the port's supported feature set and match it against the
6610 lpfc_request_features(phba, mboxq);
6611 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6618 * The port must support FCP initiator mode as this is the
6619 * only mode running in the host.
6621 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6622 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6623 "0378 No support for fcpi mode.\n");
6626 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6627 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6629 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6631 * If the port cannot support the host's requested features
6632 * then turn off the global config parameters to disable the
6633 * feature in the driver. This is not a fatal error.
6635 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6636 if (phba->cfg_enable_bg) {
6637 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6638 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6643 if (phba->max_vpi && phba->cfg_enable_npiv &&
6644 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6648 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6649 "0379 Feature Mismatch Data: x%08x %08x "
6650 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6651 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6652 phba->cfg_enable_npiv, phba->max_vpi);
6653 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6654 phba->cfg_enable_bg = 0;
6655 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6656 phba->cfg_enable_npiv = 0;
6659 /* These SLI3 features are assumed in SLI4 */
6660 spin_lock_irq(&phba->hbalock);
6661 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6662 spin_unlock_irq(&phba->hbalock);
6665 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6666 * calls depends on these resources to complete port setup.
6668 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6670 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6671 "2920 Failed to alloc Resource IDs "
6676 lpfc_set_host_data(phba, mboxq);
6678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6680 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6681 "2134 Failed to set host os driver version %x",
6685 /* Read the port's service parameters. */
6686 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6688 phba->link_state = LPFC_HBA_ERROR;
6693 mboxq->vport = vport;
6694 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6695 mp = (struct lpfc_dmabuf *) mboxq->context1;
6696 if (rc == MBX_SUCCESS) {
6697 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6702 * This memory was allocated by the lpfc_read_sparam routine. Release
6703 * it to the mbuf pool.
6705 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6707 mboxq->context1 = NULL;
6709 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6710 "0382 READ_SPARAM command failed "
6711 "status %d, mbxStatus x%x\n",
6712 rc, bf_get(lpfc_mqe_status, mqe));
6713 phba->link_state = LPFC_HBA_ERROR;
6718 lpfc_update_vport_wwn(vport);
6720 /* Update the fc_host data structures with new wwn. */
6721 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6722 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6724 /* Create all the SLI4 queues */
6725 rc = lpfc_sli4_queue_create(phba);
6727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6728 "3089 Failed to allocate queues\n");
6732 /* Set up all the queues to the device */
6733 rc = lpfc_sli4_queue_setup(phba);
6735 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6736 "0381 Error %d during queue setup.\n ", rc);
6737 goto out_stop_timers;
6739 /* Initialize the driver internal SLI layer lists. */
6740 lpfc_sli4_setup(phba);
6741 lpfc_sli4_queue_init(phba);
6743 /* update host els xri-sgl sizes and mappings */
6744 rc = lpfc_sli4_els_sgl_update(phba);
6746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6747 "1400 Failed to update xri-sgl size and "
6748 "mapping: %d\n", rc);
6749 goto out_destroy_queue;
6752 /* register the els sgl pool to the port */
6753 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6754 phba->sli4_hba.els_xri_cnt);
6755 if (unlikely(rc < 0)) {
6756 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6757 "0582 Error %d during els sgl post "
6760 goto out_destroy_queue;
6762 phba->sli4_hba.els_xri_cnt = rc;
6764 if (phba->nvmet_support == 0) {
6765 /* update host scsi xri-sgl sizes and mappings */
6766 rc = lpfc_sli4_scsi_sgl_update(phba);
6768 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6769 "6309 Failed to update scsi-sgl size "
6770 "and mapping: %d\n", rc);
6771 goto out_destroy_queue;
6774 /* update host nvme xri-sgl sizes and mappings */
6775 rc = lpfc_sli4_nvme_sgl_update(phba);
6777 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6778 "6082 Failed to update nvme-sgl size "
6779 "and mapping: %d\n", rc);
6780 goto out_destroy_queue;
6784 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6785 /* register the allocated scsi sgl pool to the port */
6786 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6788 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6789 "0383 Error %d during scsi sgl post "
6791 /* Some Scsi buffers were moved to abort scsi list */
6792 /* A pci function reset will repost them */
6794 goto out_destroy_queue;
6798 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
6799 (phba->nvmet_support == 0)) {
6801 /* register the allocated nvme sgl pool to the port */
6802 rc = lpfc_repost_nvme_sgl_list(phba);
6804 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6805 "6116 Error %d during nvme sgl post "
6807 /* Some NVME buffers were moved to abort nvme list */
6808 /* A pci function reset will repost them */
6810 goto out_destroy_queue;
6814 /* Post the rpi header region to the device. */
6815 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6817 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6818 "0393 Error %d during rpi post operation\n",
6821 goto out_destroy_queue;
6823 lpfc_sli4_node_prep(phba);
6825 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6826 if (phba->nvmet_support == 0) {
6828 * The FC Port needs to register FCFI (index 0)
6830 lpfc_reg_fcfi(phba, mboxq);
6831 mboxq->vport = phba->pport;
6832 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6833 if (rc != MBX_SUCCESS)
6834 goto out_unset_queue;
6836 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6837 &mboxq->u.mqe.un.reg_fcfi);
6839 /* Check if the port is configured to be disabled */
6840 lpfc_sli_read_link_ste(phba);
6843 /* Arm the CQs and then EQs on device */
6844 lpfc_sli4_arm_cqeq_intr(phba);
6846 /* Indicate device interrupt mode */
6847 phba->sli4_hba.intr_enable = 1;
6849 /* Allow asynchronous mailbox command to go through */
6850 spin_lock_irq(&phba->hbalock);
6851 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6852 spin_unlock_irq(&phba->hbalock);
6854 /* Post receive buffers to the device */
6855 lpfc_sli4_rb_setup(phba);
6857 /* Reset HBA FCF states after HBA reset */
6858 phba->fcf.fcf_flag = 0;
6859 phba->fcf.current_rec.flag = 0;
6861 /* Start the ELS watchdog timer */
6862 mod_timer(&vport->els_tmofunc,
6863 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6865 /* Start heart beat timer */
6866 mod_timer(&phba->hb_tmofunc,
6867 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6868 phba->hb_outstanding = 0;
6869 phba->last_completion_time = jiffies;
6871 /* Start error attention (ERATT) polling timer */
6872 mod_timer(&phba->eratt_poll,
6873 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
6875 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6876 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6877 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6879 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6880 "2829 This device supports "
6881 "Advanced Error Reporting (AER)\n");
6882 spin_lock_irq(&phba->hbalock);
6883 phba->hba_flag |= HBA_AER_ENABLED;
6884 spin_unlock_irq(&phba->hbalock);
6886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6887 "2830 This device does not support "
6888 "Advanced Error Reporting (AER)\n");
6889 phba->cfg_aer_support = 0;
6895 * The port is ready, set the host's link state to LINK_DOWN
6896 * in preparation for link interrupts.
6898 spin_lock_irq(&phba->hbalock);
6899 phba->link_state = LPFC_LINK_DOWN;
6900 spin_unlock_irq(&phba->hbalock);
6901 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6902 (phba->hba_flag & LINK_DISABLED)) {
6903 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6904 "3103 Adapter Link is disabled.\n");
6905 lpfc_down_link(phba, mboxq);
6906 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6907 if (rc != MBX_SUCCESS) {
6908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6909 "3104 Adapter failed to issue "
6910 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6911 goto out_unset_queue;
6913 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6914 /* don't perform init_link on SLI4 FC port loopback test */
6915 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6916 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6918 goto out_unset_queue;
6921 mempool_free(mboxq, phba->mbox_mem_pool);
6924 /* Unset all the queues set up in this routine when error out */
6925 lpfc_sli4_queue_unset(phba);
6927 lpfc_sli4_queue_destroy(phba);
6929 lpfc_stop_hba_timers(phba);
6931 mempool_free(mboxq, phba->mbox_mem_pool);
6936 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6937 * @ptr: context object - pointer to hba structure.
6939 * This is the callback function for mailbox timer. The mailbox
6940 * timer is armed when a new mailbox command is issued and the timer
6941 * is deleted when the mailbox complete. The function is called by
6942 * the kernel timer code when a mailbox does not complete within
6943 * expected time. This function wakes up the worker thread to
6944 * process the mailbox timeout and returns. All the processing is
6945 * done by the worker thread function lpfc_mbox_timeout_handler.
6948 lpfc_mbox_timeout(unsigned long ptr)
6950 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6951 unsigned long iflag;
6952 uint32_t tmo_posted;
6954 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6955 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6957 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6958 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6961 lpfc_worker_wake_up(phba);
6966 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
6968 * @phba: Pointer to HBA context object.
6970 * This function checks if any mailbox completions are present on the mailbox
6974 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
6978 struct lpfc_queue *mcq;
6979 struct lpfc_mcqe *mcqe;
6980 bool pending_completions = false;
6982 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6985 /* Check for completions on mailbox completion queue */
6987 mcq = phba->sli4_hba.mbx_cq;
6988 idx = mcq->hba_index;
6989 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
6990 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
6991 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
6992 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
6993 pending_completions = true;
6996 idx = (idx + 1) % mcq->entry_count;
6997 if (mcq->hba_index == idx)
7000 return pending_completions;
7005 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7007 * @phba: Pointer to HBA context object.
7009 * For sli4, it is possible to miss an interrupt. As such mbox completions
7010 * maybe missed causing erroneous mailbox timeouts to occur. This function
7011 * checks to see if mbox completions are on the mailbox completion queue
7012 * and will process all the completions associated with the eq for the
7013 * mailbox completion queue.
7016 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7020 struct lpfc_queue *fpeq = NULL;
7021 struct lpfc_eqe *eqe;
7024 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7027 /* Find the eq associated with the mcq */
7029 if (phba->sli4_hba.hba_eq)
7030 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7031 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7032 phba->sli4_hba.mbx_cq->assoc_qid) {
7033 fpeq = phba->sli4_hba.hba_eq[eqidx];
7039 /* Turn off interrupts from this EQ */
7041 lpfc_sli4_eq_clr_intr(fpeq);
7043 /* Check to see if a mbox completion is pending */
7045 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7048 * If a mbox completion is pending, process all the events on EQ
7049 * associated with the mbox completion queue (this could include
7050 * mailbox commands, async events, els commands, receive queue data
7055 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7056 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7057 fpeq->EQ_processed++;
7060 /* Always clear and re-arm the EQ */
7062 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7064 return mbox_pending;
7069 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7070 * @phba: Pointer to HBA context object.
7072 * This function is called from worker thread when a mailbox command times out.
7073 * The caller is not required to hold any locks. This function will reset the
7074 * HBA and recover all the pending commands.
7077 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7079 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7080 MAILBOX_t *mb = NULL;
7082 struct lpfc_sli *psli = &phba->sli;
7084 /* If the mailbox completed, process the completion and return */
7085 if (lpfc_sli4_process_missed_mbox_completions(phba))
7090 /* Check the pmbox pointer first. There is a race condition
7091 * between the mbox timeout handler getting executed in the
7092 * worklist and the mailbox actually completing. When this
7093 * race condition occurs, the mbox_active will be NULL.
7095 spin_lock_irq(&phba->hbalock);
7096 if (pmbox == NULL) {
7097 lpfc_printf_log(phba, KERN_WARNING,
7099 "0353 Active Mailbox cleared - mailbox timeout "
7101 spin_unlock_irq(&phba->hbalock);
7105 /* Mbox cmd <mbxCommand> timeout */
7106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7107 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7109 phba->pport->port_state,
7111 phba->sli.mbox_active);
7112 spin_unlock_irq(&phba->hbalock);
7114 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7115 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7116 * it to fail all outstanding SCSI IO.
7118 spin_lock_irq(&phba->pport->work_port_lock);
7119 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7120 spin_unlock_irq(&phba->pport->work_port_lock);
7121 spin_lock_irq(&phba->hbalock);
7122 phba->link_state = LPFC_LINK_UNKNOWN;
7123 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7124 spin_unlock_irq(&phba->hbalock);
7126 lpfc_sli_abort_fcp_rings(phba);
7128 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7129 "0345 Resetting board due to mailbox timeout\n");
7131 /* Reset the HBA device */
7132 lpfc_reset_hba(phba);
7136 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7137 * @phba: Pointer to HBA context object.
7138 * @pmbox: Pointer to mailbox object.
7139 * @flag: Flag indicating how the mailbox need to be processed.
7141 * This function is called by discovery code and HBA management code
7142 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7143 * function gets the hbalock to protect the data structures.
7144 * The mailbox command can be submitted in polling mode, in which case
7145 * this function will wait in a polling loop for the completion of the
7147 * If the mailbox is submitted in no_wait mode (not polling) the
7148 * function will submit the command and returns immediately without waiting
7149 * for the mailbox completion. The no_wait is supported only when HBA
7150 * is in SLI2/SLI3 mode - interrupts are enabled.
7151 * The SLI interface allows only one mailbox pending at a time. If the
7152 * mailbox is issued in polling mode and there is already a mailbox
7153 * pending, then the function will return an error. If the mailbox is issued
7154 * in NO_WAIT mode and there is a mailbox pending already, the function
7155 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7156 * The sli layer owns the mailbox object until the completion of mailbox
7157 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7158 * return codes the caller owns the mailbox command after the return of
7162 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7166 struct lpfc_sli *psli = &phba->sli;
7167 uint32_t status, evtctr;
7168 uint32_t ha_copy, hc_copy;
7170 unsigned long timeout;
7171 unsigned long drvr_flag = 0;
7172 uint32_t word0, ldata;
7173 void __iomem *to_slim;
7174 int processing_queue = 0;
7176 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7178 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7179 /* processing mbox queue from intr_handler */
7180 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7181 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7184 processing_queue = 1;
7185 pmbox = lpfc_mbox_get(phba);
7187 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7192 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7193 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7195 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7196 lpfc_printf_log(phba, KERN_ERR,
7197 LOG_MBOX | LOG_VPORT,
7198 "1806 Mbox x%x failed. No vport\n",
7199 pmbox->u.mb.mbxCommand);
7201 goto out_not_finished;
7205 /* If the PCI channel is in offline state, do not post mbox. */
7206 if (unlikely(pci_channel_offline(phba->pcidev))) {
7207 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7208 goto out_not_finished;
7211 /* If HBA has a deferred error attention, fail the iocb. */
7212 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7213 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7214 goto out_not_finished;
7220 status = MBX_SUCCESS;
7222 if (phba->link_state == LPFC_HBA_ERROR) {
7223 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7225 /* Mbox command <mbxCommand> cannot issue */
7226 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7227 "(%d):0311 Mailbox command x%x cannot "
7228 "issue Data: x%x x%x\n",
7229 pmbox->vport ? pmbox->vport->vpi : 0,
7230 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7231 goto out_not_finished;
7234 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7235 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7236 !(hc_copy & HC_MBINT_ENA)) {
7237 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7238 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7239 "(%d):2528 Mailbox command x%x cannot "
7240 "issue Data: x%x x%x\n",
7241 pmbox->vport ? pmbox->vport->vpi : 0,
7242 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7243 goto out_not_finished;
7247 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7248 /* Polling for a mbox command when another one is already active
7249 * is not allowed in SLI. Also, the driver must have established
7250 * SLI2 mode to queue and process multiple mbox commands.
7253 if (flag & MBX_POLL) {
7254 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7256 /* Mbox command <mbxCommand> cannot issue */
7257 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7258 "(%d):2529 Mailbox command x%x "
7259 "cannot issue Data: x%x x%x\n",
7260 pmbox->vport ? pmbox->vport->vpi : 0,
7261 pmbox->u.mb.mbxCommand,
7262 psli->sli_flag, flag);
7263 goto out_not_finished;
7266 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7268 /* Mbox command <mbxCommand> cannot issue */
7269 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7270 "(%d):2530 Mailbox command x%x "
7271 "cannot issue Data: x%x x%x\n",
7272 pmbox->vport ? pmbox->vport->vpi : 0,
7273 pmbox->u.mb.mbxCommand,
7274 psli->sli_flag, flag);
7275 goto out_not_finished;
7278 /* Another mailbox command is still being processed, queue this
7279 * command to be processed later.
7281 lpfc_mbox_put(phba, pmbox);
7283 /* Mbox cmd issue - BUSY */
7284 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7285 "(%d):0308 Mbox cmd issue - BUSY Data: "
7286 "x%x x%x x%x x%x\n",
7287 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7288 mbx->mbxCommand, phba->pport->port_state,
7289 psli->sli_flag, flag);
7291 psli->slistat.mbox_busy++;
7292 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7295 lpfc_debugfs_disc_trc(pmbox->vport,
7296 LPFC_DISC_TRC_MBOX_VPORT,
7297 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7298 (uint32_t)mbx->mbxCommand,
7299 mbx->un.varWords[0], mbx->un.varWords[1]);
7302 lpfc_debugfs_disc_trc(phba->pport,
7304 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7305 (uint32_t)mbx->mbxCommand,
7306 mbx->un.varWords[0], mbx->un.varWords[1]);
7312 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7314 /* If we are not polling, we MUST be in SLI2 mode */
7315 if (flag != MBX_POLL) {
7316 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7317 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7318 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7319 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7320 /* Mbox command <mbxCommand> cannot issue */
7321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7322 "(%d):2531 Mailbox command x%x "
7323 "cannot issue Data: x%x x%x\n",
7324 pmbox->vport ? pmbox->vport->vpi : 0,
7325 pmbox->u.mb.mbxCommand,
7326 psli->sli_flag, flag);
7327 goto out_not_finished;
7329 /* timeout active mbox command */
7330 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7332 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7335 /* Mailbox cmd <cmd> issue */
7336 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7337 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7339 pmbox->vport ? pmbox->vport->vpi : 0,
7340 mbx->mbxCommand, phba->pport->port_state,
7341 psli->sli_flag, flag);
7343 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7345 lpfc_debugfs_disc_trc(pmbox->vport,
7346 LPFC_DISC_TRC_MBOX_VPORT,
7347 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7348 (uint32_t)mbx->mbxCommand,
7349 mbx->un.varWords[0], mbx->un.varWords[1]);
7352 lpfc_debugfs_disc_trc(phba->pport,
7354 "MBOX Send: cmd:x%x mb:x%x x%x",
7355 (uint32_t)mbx->mbxCommand,
7356 mbx->un.varWords[0], mbx->un.varWords[1]);
7360 psli->slistat.mbox_cmd++;
7361 evtctr = psli->slistat.mbox_event;
7363 /* next set own bit for the adapter and copy over command word */
7364 mbx->mbxOwner = OWN_CHIP;
7366 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7367 /* Populate mbox extension offset word. */
7368 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7369 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7370 = (uint8_t *)phba->mbox_ext
7371 - (uint8_t *)phba->mbox;
7374 /* Copy the mailbox extension data */
7375 if (pmbox->in_ext_byte_len && pmbox->context2) {
7376 lpfc_sli_pcimem_bcopy(pmbox->context2,
7377 (uint8_t *)phba->mbox_ext,
7378 pmbox->in_ext_byte_len);
7380 /* Copy command data to host SLIM area */
7381 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7383 /* Populate mbox extension offset word. */
7384 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7385 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7386 = MAILBOX_HBA_EXT_OFFSET;
7388 /* Copy the mailbox extension data */
7389 if (pmbox->in_ext_byte_len && pmbox->context2)
7390 lpfc_memcpy_to_slim(phba->MBslimaddr +
7391 MAILBOX_HBA_EXT_OFFSET,
7392 pmbox->context2, pmbox->in_ext_byte_len);
7394 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7395 /* copy command data into host mbox for cmpl */
7396 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7399 /* First copy mbox command data to HBA SLIM, skip past first
7401 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7402 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7403 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7405 /* Next copy over first word, with mbxOwner set */
7406 ldata = *((uint32_t *)mbx);
7407 to_slim = phba->MBslimaddr;
7408 writel(ldata, to_slim);
7409 readl(to_slim); /* flush */
7411 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7412 /* switch over to host mailbox */
7413 psli->sli_flag |= LPFC_SLI_ACTIVE;
7420 /* Set up reference to mailbox command */
7421 psli->mbox_active = pmbox;
7422 /* Interrupt board to do it */
7423 writel(CA_MBATT, phba->CAregaddr);
7424 readl(phba->CAregaddr); /* flush */
7425 /* Don't wait for it to finish, just return */
7429 /* Set up null reference to mailbox command */
7430 psli->mbox_active = NULL;
7431 /* Interrupt board to do it */
7432 writel(CA_MBATT, phba->CAregaddr);
7433 readl(phba->CAregaddr); /* flush */
7435 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7436 /* First read mbox status word */
7437 word0 = *((uint32_t *)phba->mbox);
7438 word0 = le32_to_cpu(word0);
7440 /* First read mbox status word */
7441 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7442 spin_unlock_irqrestore(&phba->hbalock,
7444 goto out_not_finished;
7448 /* Read the HBA Host Attention Register */
7449 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7450 spin_unlock_irqrestore(&phba->hbalock,
7452 goto out_not_finished;
7454 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7457 /* Wait for command to complete */
7458 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7459 (!(ha_copy & HA_MBATT) &&
7460 (phba->link_state > LPFC_WARM_START))) {
7461 if (time_after(jiffies, timeout)) {
7462 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7463 spin_unlock_irqrestore(&phba->hbalock,
7465 goto out_not_finished;
7468 /* Check if we took a mbox interrupt while we were
7470 if (((word0 & OWN_CHIP) != OWN_CHIP)
7471 && (evtctr != psli->slistat.mbox_event))
7475 spin_unlock_irqrestore(&phba->hbalock,
7478 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7481 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7482 /* First copy command data */
7483 word0 = *((uint32_t *)phba->mbox);
7484 word0 = le32_to_cpu(word0);
7485 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7488 /* Check real SLIM for any errors */
7489 slimword0 = readl(phba->MBslimaddr);
7490 slimmb = (MAILBOX_t *) & slimword0;
7491 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7492 && slimmb->mbxStatus) {
7499 /* First copy command data */
7500 word0 = readl(phba->MBslimaddr);
7502 /* Read the HBA Host Attention Register */
7503 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7504 spin_unlock_irqrestore(&phba->hbalock,
7506 goto out_not_finished;
7510 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7511 /* copy results back to user */
7512 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7514 /* Copy the mailbox extension data */
7515 if (pmbox->out_ext_byte_len && pmbox->context2) {
7516 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7518 pmbox->out_ext_byte_len);
7521 /* First copy command data */
7522 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7524 /* Copy the mailbox extension data */
7525 if (pmbox->out_ext_byte_len && pmbox->context2) {
7526 lpfc_memcpy_from_slim(pmbox->context2,
7528 MAILBOX_HBA_EXT_OFFSET,
7529 pmbox->out_ext_byte_len);
7533 writel(HA_MBATT, phba->HAregaddr);
7534 readl(phba->HAregaddr); /* flush */
7536 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7537 status = mbx->mbxStatus;
7540 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7544 if (processing_queue) {
7545 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7546 lpfc_mbox_cmpl_put(phba, pmbox);
7548 return MBX_NOT_FINISHED;
7552 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7553 * @phba: Pointer to HBA context object.
7555 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7556 * the driver internal pending mailbox queue. It will then try to wait out the
7557 * possible outstanding mailbox command before return.
7560 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7561 * the outstanding mailbox command timed out.
7564 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7566 struct lpfc_sli *psli = &phba->sli;
7568 unsigned long timeout = 0;
7570 /* Mark the asynchronous mailbox command posting as blocked */
7571 spin_lock_irq(&phba->hbalock);
7572 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7573 /* Determine how long we might wait for the active mailbox
7574 * command to be gracefully completed by firmware.
7576 if (phba->sli.mbox_active)
7577 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7578 phba->sli.mbox_active) *
7580 spin_unlock_irq(&phba->hbalock);
7582 /* Make sure the mailbox is really active */
7584 lpfc_sli4_process_missed_mbox_completions(phba);
7586 /* Wait for the outstnading mailbox command to complete */
7587 while (phba->sli.mbox_active) {
7588 /* Check active mailbox complete status every 2ms */
7590 if (time_after(jiffies, timeout)) {
7591 /* Timeout, marked the outstanding cmd not complete */
7597 /* Can not cleanly block async mailbox command, fails it */
7599 spin_lock_irq(&phba->hbalock);
7600 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7601 spin_unlock_irq(&phba->hbalock);
7607 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7608 * @phba: Pointer to HBA context object.
7610 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7611 * commands from the driver internal pending mailbox queue. It makes sure
7612 * that there is no outstanding mailbox command before resuming posting
7613 * asynchronous mailbox commands. If, for any reason, there is outstanding
7614 * mailbox command, it will try to wait it out before resuming asynchronous
7615 * mailbox command posting.
7618 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7620 struct lpfc_sli *psli = &phba->sli;
7622 spin_lock_irq(&phba->hbalock);
7623 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7624 /* Asynchronous mailbox posting is not blocked, do nothing */
7625 spin_unlock_irq(&phba->hbalock);
7629 /* Outstanding synchronous mailbox command is guaranteed to be done,
7630 * successful or timeout, after timing-out the outstanding mailbox
7631 * command shall always be removed, so just unblock posting async
7632 * mailbox command and resume
7634 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7635 spin_unlock_irq(&phba->hbalock);
7637 /* wake up worker thread to post asynchronlous mailbox command */
7638 lpfc_worker_wake_up(phba);
7642 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7643 * @phba: Pointer to HBA context object.
7644 * @mboxq: Pointer to mailbox object.
7646 * The function waits for the bootstrap mailbox register ready bit from
7647 * port for twice the regular mailbox command timeout value.
7649 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7650 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7653 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7656 unsigned long timeout;
7657 struct lpfc_register bmbx_reg;
7659 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7663 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7664 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7668 if (time_after(jiffies, timeout))
7669 return MBXERR_ERROR;
7670 } while (!db_ready);
7676 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7677 * @phba: Pointer to HBA context object.
7678 * @mboxq: Pointer to mailbox object.
7680 * The function posts a mailbox to the port. The mailbox is expected
7681 * to be comletely filled in and ready for the port to operate on it.
7682 * This routine executes a synchronous completion operation on the
7683 * mailbox by polling for its completion.
7685 * The caller must not be holding any locks when calling this routine.
7688 * MBX_SUCCESS - mailbox posted successfully
7689 * Any of the MBX error values.
7692 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7694 int rc = MBX_SUCCESS;
7695 unsigned long iflag;
7696 uint32_t mcqe_status;
7698 struct lpfc_sli *psli = &phba->sli;
7699 struct lpfc_mqe *mb = &mboxq->u.mqe;
7700 struct lpfc_bmbx_create *mbox_rgn;
7701 struct dma_address *dma_address;
7704 * Only one mailbox can be active to the bootstrap mailbox region
7705 * at a time and there is no queueing provided.
7707 spin_lock_irqsave(&phba->hbalock, iflag);
7708 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7709 spin_unlock_irqrestore(&phba->hbalock, iflag);
7710 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7711 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7712 "cannot issue Data: x%x x%x\n",
7713 mboxq->vport ? mboxq->vport->vpi : 0,
7714 mboxq->u.mb.mbxCommand,
7715 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7716 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7717 psli->sli_flag, MBX_POLL);
7718 return MBXERR_ERROR;
7720 /* The server grabs the token and owns it until release */
7721 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7722 phba->sli.mbox_active = mboxq;
7723 spin_unlock_irqrestore(&phba->hbalock, iflag);
7725 /* wait for bootstrap mbox register for readyness */
7726 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7731 * Initialize the bootstrap memory region to avoid stale data areas
7732 * in the mailbox post. Then copy the caller's mailbox contents to
7733 * the bmbx mailbox region.
7735 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7736 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7737 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7738 sizeof(struct lpfc_mqe));
7740 /* Post the high mailbox dma address to the port and wait for ready. */
7741 dma_address = &phba->sli4_hba.bmbx.dma_address;
7742 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7744 /* wait for bootstrap mbox register for hi-address write done */
7745 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7749 /* Post the low mailbox dma address to the port. */
7750 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7752 /* wait for bootstrap mbox register for low address write done */
7753 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7758 * Read the CQ to ensure the mailbox has completed.
7759 * If so, update the mailbox status so that the upper layers
7760 * can complete the request normally.
7762 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7763 sizeof(struct lpfc_mqe));
7764 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7765 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7766 sizeof(struct lpfc_mcqe));
7767 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7769 * When the CQE status indicates a failure and the mailbox status
7770 * indicates success then copy the CQE status into the mailbox status
7771 * (and prefix it with x4000).
7773 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7774 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7775 bf_set(lpfc_mqe_status, mb,
7776 (LPFC_MBX_ERROR_RANGE | mcqe_status));
7779 lpfc_sli4_swap_str(phba, mboxq);
7781 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7782 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7783 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7784 " x%x x%x CQ: x%x x%x x%x x%x\n",
7785 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7786 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7787 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7788 bf_get(lpfc_mqe_status, mb),
7789 mb->un.mb_words[0], mb->un.mb_words[1],
7790 mb->un.mb_words[2], mb->un.mb_words[3],
7791 mb->un.mb_words[4], mb->un.mb_words[5],
7792 mb->un.mb_words[6], mb->un.mb_words[7],
7793 mb->un.mb_words[8], mb->un.mb_words[9],
7794 mb->un.mb_words[10], mb->un.mb_words[11],
7795 mb->un.mb_words[12], mboxq->mcqe.word0,
7796 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7797 mboxq->mcqe.trailer);
7799 /* We are holding the token, no needed for lock when release */
7800 spin_lock_irqsave(&phba->hbalock, iflag);
7801 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7802 phba->sli.mbox_active = NULL;
7803 spin_unlock_irqrestore(&phba->hbalock, iflag);
7808 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7809 * @phba: Pointer to HBA context object.
7810 * @pmbox: Pointer to mailbox object.
7811 * @flag: Flag indicating how the mailbox need to be processed.
7813 * This function is called by discovery code and HBA management code to submit
7814 * a mailbox command to firmware with SLI-4 interface spec.
7816 * Return codes the caller owns the mailbox command after the return of the
7820 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7823 struct lpfc_sli *psli = &phba->sli;
7824 unsigned long iflags;
7827 /* dump from issue mailbox command if setup */
7828 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7830 rc = lpfc_mbox_dev_check(phba);
7832 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7833 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7834 "cannot issue Data: x%x x%x\n",
7835 mboxq->vport ? mboxq->vport->vpi : 0,
7836 mboxq->u.mb.mbxCommand,
7837 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7838 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7839 psli->sli_flag, flag);
7840 goto out_not_finished;
7843 /* Detect polling mode and jump to a handler */
7844 if (!phba->sli4_hba.intr_enable) {
7845 if (flag == MBX_POLL)
7846 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7849 if (rc != MBX_SUCCESS)
7850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7851 "(%d):2541 Mailbox command x%x "
7852 "(x%x/x%x) failure: "
7853 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7855 mboxq->vport ? mboxq->vport->vpi : 0,
7856 mboxq->u.mb.mbxCommand,
7857 lpfc_sli_config_mbox_subsys_get(phba,
7859 lpfc_sli_config_mbox_opcode_get(phba,
7861 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7862 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7863 bf_get(lpfc_mcqe_ext_status,
7865 psli->sli_flag, flag);
7867 } else if (flag == MBX_POLL) {
7868 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7869 "(%d):2542 Try to issue mailbox command "
7870 "x%x (x%x/x%x) synchronously ahead of async"
7871 "mailbox command queue: x%x x%x\n",
7872 mboxq->vport ? mboxq->vport->vpi : 0,
7873 mboxq->u.mb.mbxCommand,
7874 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7875 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7876 psli->sli_flag, flag);
7877 /* Try to block the asynchronous mailbox posting */
7878 rc = lpfc_sli4_async_mbox_block(phba);
7880 /* Successfully blocked, now issue sync mbox cmd */
7881 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7882 if (rc != MBX_SUCCESS)
7883 lpfc_printf_log(phba, KERN_WARNING,
7885 "(%d):2597 Sync Mailbox command "
7886 "x%x (x%x/x%x) failure: "
7887 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7889 mboxq->vport ? mboxq->vport->vpi : 0,
7890 mboxq->u.mb.mbxCommand,
7891 lpfc_sli_config_mbox_subsys_get(phba,
7893 lpfc_sli_config_mbox_opcode_get(phba,
7895 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7896 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7897 bf_get(lpfc_mcqe_ext_status,
7899 psli->sli_flag, flag);
7900 /* Unblock the async mailbox posting afterward */
7901 lpfc_sli4_async_mbox_unblock(phba);
7906 /* Now, interrupt mode asynchrous mailbox command */
7907 rc = lpfc_mbox_cmd_check(phba, mboxq);
7909 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7910 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7911 "cannot issue Data: x%x x%x\n",
7912 mboxq->vport ? mboxq->vport->vpi : 0,
7913 mboxq->u.mb.mbxCommand,
7914 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7915 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7916 psli->sli_flag, flag);
7917 goto out_not_finished;
7920 /* Put the mailbox command to the driver internal FIFO */
7921 psli->slistat.mbox_busy++;
7922 spin_lock_irqsave(&phba->hbalock, iflags);
7923 lpfc_mbox_put(phba, mboxq);
7924 spin_unlock_irqrestore(&phba->hbalock, iflags);
7925 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7926 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7927 "x%x (x%x/x%x) x%x x%x x%x\n",
7928 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7929 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7930 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7931 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7932 phba->pport->port_state,
7933 psli->sli_flag, MBX_NOWAIT);
7934 /* Wake up worker thread to transport mailbox command from head */
7935 lpfc_worker_wake_up(phba);
7940 return MBX_NOT_FINISHED;
7944 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7945 * @phba: Pointer to HBA context object.
7947 * This function is called by worker thread to send a mailbox command to
7948 * SLI4 HBA firmware.
7952 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7954 struct lpfc_sli *psli = &phba->sli;
7955 LPFC_MBOXQ_t *mboxq;
7956 int rc = MBX_SUCCESS;
7957 unsigned long iflags;
7958 struct lpfc_mqe *mqe;
7961 /* Check interrupt mode before post async mailbox command */
7962 if (unlikely(!phba->sli4_hba.intr_enable))
7963 return MBX_NOT_FINISHED;
7965 /* Check for mailbox command service token */
7966 spin_lock_irqsave(&phba->hbalock, iflags);
7967 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7968 spin_unlock_irqrestore(&phba->hbalock, iflags);
7969 return MBX_NOT_FINISHED;
7971 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7972 spin_unlock_irqrestore(&phba->hbalock, iflags);
7973 return MBX_NOT_FINISHED;
7975 if (unlikely(phba->sli.mbox_active)) {
7976 spin_unlock_irqrestore(&phba->hbalock, iflags);
7977 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7978 "0384 There is pending active mailbox cmd\n");
7979 return MBX_NOT_FINISHED;
7981 /* Take the mailbox command service token */
7982 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7984 /* Get the next mailbox command from head of queue */
7985 mboxq = lpfc_mbox_get(phba);
7987 /* If no more mailbox command waiting for post, we're done */
7989 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7990 spin_unlock_irqrestore(&phba->hbalock, iflags);
7993 phba->sli.mbox_active = mboxq;
7994 spin_unlock_irqrestore(&phba->hbalock, iflags);
7996 /* Check device readiness for posting mailbox command */
7997 rc = lpfc_mbox_dev_check(phba);
7999 /* Driver clean routine will clean up pending mailbox */
8000 goto out_not_finished;
8002 /* Prepare the mbox command to be posted */
8003 mqe = &mboxq->u.mqe;
8004 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8006 /* Start timer for the mbox_tmo and log some mailbox post messages */
8007 mod_timer(&psli->mbox_tmo, (jiffies +
8008 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8010 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8011 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8013 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8014 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8015 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8016 phba->pport->port_state, psli->sli_flag);
8018 if (mbx_cmnd != MBX_HEARTBEAT) {
8020 lpfc_debugfs_disc_trc(mboxq->vport,
8021 LPFC_DISC_TRC_MBOX_VPORT,
8022 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8023 mbx_cmnd, mqe->un.mb_words[0],
8024 mqe->un.mb_words[1]);
8026 lpfc_debugfs_disc_trc(phba->pport,
8028 "MBOX Send: cmd:x%x mb:x%x x%x",
8029 mbx_cmnd, mqe->un.mb_words[0],
8030 mqe->un.mb_words[1]);
8033 psli->slistat.mbox_cmd++;
8035 /* Post the mailbox command to the port */
8036 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8037 if (rc != MBX_SUCCESS) {
8038 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8039 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8040 "cannot issue Data: x%x x%x\n",
8041 mboxq->vport ? mboxq->vport->vpi : 0,
8042 mboxq->u.mb.mbxCommand,
8043 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8044 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8045 psli->sli_flag, MBX_NOWAIT);
8046 goto out_not_finished;
8052 spin_lock_irqsave(&phba->hbalock, iflags);
8053 if (phba->sli.mbox_active) {
8054 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8055 __lpfc_mbox_cmpl_put(phba, mboxq);
8056 /* Release the token */
8057 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8058 phba->sli.mbox_active = NULL;
8060 spin_unlock_irqrestore(&phba->hbalock, iflags);
8062 return MBX_NOT_FINISHED;
8066 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8067 * @phba: Pointer to HBA context object.
8068 * @pmbox: Pointer to mailbox object.
8069 * @flag: Flag indicating how the mailbox need to be processed.
8071 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8072 * the API jump table function pointer from the lpfc_hba struct.
8074 * Return codes the caller owns the mailbox command after the return of the
8078 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8080 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8084 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8085 * @phba: The hba struct for which this call is being executed.
8086 * @dev_grp: The HBA PCI-Device group number.
8088 * This routine sets up the mbox interface API function jump table in @phba
8090 * Returns: 0 - success, -ENODEV - failure.
8093 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8097 case LPFC_PCI_DEV_LP:
8098 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8099 phba->lpfc_sli_handle_slow_ring_event =
8100 lpfc_sli_handle_slow_ring_event_s3;
8101 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8102 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8103 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8105 case LPFC_PCI_DEV_OC:
8106 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8107 phba->lpfc_sli_handle_slow_ring_event =
8108 lpfc_sli_handle_slow_ring_event_s4;
8109 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8110 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8111 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8115 "1420 Invalid HBA PCI-device group: 0x%x\n",
8124 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8125 * @phba: Pointer to HBA context object.
8126 * @pring: Pointer to driver SLI ring object.
8127 * @piocb: Pointer to address of newly added command iocb.
8129 * This function is called with hbalock held to add a command
8130 * iocb to the txq when SLI layer cannot submit the command iocb
8134 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8135 struct lpfc_iocbq *piocb)
8137 lockdep_assert_held(&phba->hbalock);
8138 /* Insert the caller's iocb in the txq tail for later processing. */
8139 list_add_tail(&piocb->list, &pring->txq);
8143 * lpfc_sli_next_iocb - Get the next iocb in the txq
8144 * @phba: Pointer to HBA context object.
8145 * @pring: Pointer to driver SLI ring object.
8146 * @piocb: Pointer to address of newly added command iocb.
8148 * This function is called with hbalock held before a new
8149 * iocb is submitted to the firmware. This function checks
8150 * txq to flush the iocbs in txq to Firmware before
8151 * submitting new iocbs to the Firmware.
8152 * If there are iocbs in the txq which need to be submitted
8153 * to firmware, lpfc_sli_next_iocb returns the first element
8154 * of the txq after dequeuing it from txq.
8155 * If there is no iocb in the txq then the function will return
8156 * *piocb and *piocb is set to NULL. Caller needs to check
8157 * *piocb to find if there are more commands in the txq.
8159 static struct lpfc_iocbq *
8160 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8161 struct lpfc_iocbq **piocb)
8163 struct lpfc_iocbq * nextiocb;
8165 lockdep_assert_held(&phba->hbalock);
8167 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8177 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8178 * @phba: Pointer to HBA context object.
8179 * @ring_number: SLI ring number to issue iocb on.
8180 * @piocb: Pointer to command iocb.
8181 * @flag: Flag indicating if this command can be put into txq.
8183 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8184 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8185 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8186 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8187 * this function allows only iocbs for posting buffers. This function finds
8188 * next available slot in the command ring and posts the command to the
8189 * available slot and writes the port attention register to request HBA start
8190 * processing new iocb. If there is no slot available in the ring and
8191 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8192 * the function returns IOCB_BUSY.
8194 * This function is called with hbalock held. The function will return success
8195 * after it successfully submit the iocb to firmware or after adding to the
8199 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8200 struct lpfc_iocbq *piocb, uint32_t flag)
8202 struct lpfc_iocbq *nextiocb;
8204 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8206 lockdep_assert_held(&phba->hbalock);
8208 if (piocb->iocb_cmpl && (!piocb->vport) &&
8209 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8210 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8211 lpfc_printf_log(phba, KERN_ERR,
8212 LOG_SLI | LOG_VPORT,
8213 "1807 IOCB x%x failed. No vport\n",
8214 piocb->iocb.ulpCommand);
8220 /* If the PCI channel is in offline state, do not post iocbs. */
8221 if (unlikely(pci_channel_offline(phba->pcidev)))
8224 /* If HBA has a deferred error attention, fail the iocb. */
8225 if (unlikely(phba->hba_flag & DEFER_ERATT))
8229 * We should never get an IOCB if we are in a < LINK_DOWN state
8231 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8235 * Check to see if we are blocking IOCB processing because of a
8236 * outstanding event.
8238 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8241 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8243 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8244 * can be issued if the link is not up.
8246 switch (piocb->iocb.ulpCommand) {
8247 case CMD_GEN_REQUEST64_CR:
8248 case CMD_GEN_REQUEST64_CX:
8249 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8250 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8251 FC_RCTL_DD_UNSOL_CMD) ||
8252 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8253 MENLO_TRANSPORT_TYPE))
8257 case CMD_QUE_RING_BUF_CN:
8258 case CMD_QUE_RING_BUF64_CN:
8260 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8261 * completion, iocb_cmpl MUST be 0.
8263 if (piocb->iocb_cmpl)
8264 piocb->iocb_cmpl = NULL;
8266 case CMD_CREATE_XRI_CR:
8267 case CMD_CLOSE_XRI_CN:
8268 case CMD_CLOSE_XRI_CX:
8275 * For FCP commands, we must be in a state where we can process link
8278 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8279 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8283 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8284 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8285 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8288 lpfc_sli_update_ring(phba, pring);
8290 lpfc_sli_update_full_ring(phba, pring);
8293 return IOCB_SUCCESS;
8298 pring->stats.iocb_cmd_delay++;
8302 if (!(flag & SLI_IOCB_RET_IOCB)) {
8303 __lpfc_sli_ringtx_put(phba, pring, piocb);
8304 return IOCB_SUCCESS;
8311 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8312 * @phba: Pointer to HBA context object.
8313 * @piocb: Pointer to command iocb.
8314 * @sglq: Pointer to the scatter gather queue object.
8316 * This routine converts the bpl or bde that is in the IOCB
8317 * to a sgl list for the sli4 hardware. The physical address
8318 * of the bpl/bde is converted back to a virtual address.
8319 * If the IOCB contains a BPL then the list of BDE's is
8320 * converted to sli4_sge's. If the IOCB contains a single
8321 * BDE then it is converted to a single sli_sge.
8322 * The IOCB is still in cpu endianess so the contents of
8323 * the bpl can be used without byte swapping.
8325 * Returns valid XRI = Success, NO_XRI = Failure.
8328 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8329 struct lpfc_sglq *sglq)
8331 uint16_t xritag = NO_XRI;
8332 struct ulp_bde64 *bpl = NULL;
8333 struct ulp_bde64 bde;
8334 struct sli4_sge *sgl = NULL;
8335 struct lpfc_dmabuf *dmabuf;
8339 uint32_t offset = 0; /* accumulated offset in the sg request list */
8340 int inbound = 0; /* number of sg reply entries inbound from firmware */
8342 if (!piocbq || !sglq)
8345 sgl = (struct sli4_sge *)sglq->sgl;
8346 icmd = &piocbq->iocb;
8347 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8348 return sglq->sli4_xritag;
8349 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8350 numBdes = icmd->un.genreq64.bdl.bdeSize /
8351 sizeof(struct ulp_bde64);
8352 /* The addrHigh and addrLow fields within the IOCB
8353 * have not been byteswapped yet so there is no
8354 * need to swap them back.
8356 if (piocbq->context3)
8357 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8361 bpl = (struct ulp_bde64 *)dmabuf->virt;
8365 for (i = 0; i < numBdes; i++) {
8366 /* Should already be byte swapped. */
8367 sgl->addr_hi = bpl->addrHigh;
8368 sgl->addr_lo = bpl->addrLow;
8370 sgl->word2 = le32_to_cpu(sgl->word2);
8371 if ((i+1) == numBdes)
8372 bf_set(lpfc_sli4_sge_last, sgl, 1);
8374 bf_set(lpfc_sli4_sge_last, sgl, 0);
8375 /* swap the size field back to the cpu so we
8376 * can assign it to the sgl.
8378 bde.tus.w = le32_to_cpu(bpl->tus.w);
8379 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8380 /* The offsets in the sgl need to be accumulated
8381 * separately for the request and reply lists.
8382 * The request is always first, the reply follows.
8384 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8385 /* add up the reply sg entries */
8386 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8388 /* first inbound? reset the offset */
8391 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8392 bf_set(lpfc_sli4_sge_type, sgl,
8393 LPFC_SGE_TYPE_DATA);
8394 offset += bde.tus.f.bdeSize;
8396 sgl->word2 = cpu_to_le32(sgl->word2);
8400 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8401 /* The addrHigh and addrLow fields of the BDE have not
8402 * been byteswapped yet so they need to be swapped
8403 * before putting them in the sgl.
8406 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8408 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8409 sgl->word2 = le32_to_cpu(sgl->word2);
8410 bf_set(lpfc_sli4_sge_last, sgl, 1);
8411 sgl->word2 = cpu_to_le32(sgl->word2);
8413 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8415 return sglq->sli4_xritag;
8419 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8420 * @phba: Pointer to HBA context object.
8421 * @piocb: Pointer to command iocb.
8422 * @wqe: Pointer to the work queue entry.
8424 * This routine converts the iocb command to its Work Queue Entry
8425 * equivalent. The wqe pointer should not have any fields set when
8426 * this routine is called because it will memcpy over them.
8427 * This routine does not set the CQ_ID or the WQEC bits in the
8430 * Returns: 0 = Success, IOCB_ERROR = Failure.
8433 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8434 union lpfc_wqe *wqe)
8436 uint32_t xmit_len = 0, total_len = 0;
8440 uint8_t command_type = ELS_COMMAND_NON_FIP;
8443 uint16_t abrt_iotag;
8444 struct lpfc_iocbq *abrtiocbq;
8445 struct ulp_bde64 *bpl = NULL;
8446 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8448 struct ulp_bde64 bde;
8449 struct lpfc_nodelist *ndlp;
8453 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8454 /* The fcp commands will set command type */
8455 if (iocbq->iocb_flag & LPFC_IO_FCP)
8456 command_type = FCP_COMMAND;
8457 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8458 command_type = ELS_COMMAND_FIP;
8460 command_type = ELS_COMMAND_NON_FIP;
8462 if (phba->fcp_embed_io)
8463 memset(wqe, 0, sizeof(union lpfc_wqe128));
8464 /* Some of the fields are in the right position already */
8465 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8466 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
8467 wqe->generic.wqe_com.word10 = 0;
8469 abort_tag = (uint32_t) iocbq->iotag;
8470 xritag = iocbq->sli4_xritag;
8471 /* words0-2 bpl convert bde */
8472 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8473 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8474 sizeof(struct ulp_bde64);
8475 bpl = (struct ulp_bde64 *)
8476 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8480 /* Should already be byte swapped. */
8481 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8482 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8483 /* swap the size field back to the cpu so we
8484 * can assign it to the sgl.
8486 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8487 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8489 for (i = 0; i < numBdes; i++) {
8490 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8491 total_len += bde.tus.f.bdeSize;
8494 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8496 iocbq->iocb.ulpIoTag = iocbq->iotag;
8497 cmnd = iocbq->iocb.ulpCommand;
8499 switch (iocbq->iocb.ulpCommand) {
8500 case CMD_ELS_REQUEST64_CR:
8501 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8502 ndlp = iocbq->context_un.ndlp;
8504 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8505 if (!iocbq->iocb.ulpLe) {
8506 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8507 "2007 Only Limited Edition cmd Format"
8508 " supported 0x%x\n",
8509 iocbq->iocb.ulpCommand);
8513 wqe->els_req.payload_len = xmit_len;
8514 /* Els_reguest64 has a TMO */
8515 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8516 iocbq->iocb.ulpTimeout);
8517 /* Need a VF for word 4 set the vf bit*/
8518 bf_set(els_req64_vf, &wqe->els_req, 0);
8519 /* And a VFID for word 12 */
8520 bf_set(els_req64_vfid, &wqe->els_req, 0);
8521 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8522 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8523 iocbq->iocb.ulpContext);
8524 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8525 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8526 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8527 if (command_type == ELS_COMMAND_FIP)
8528 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8529 >> LPFC_FIP_ELS_ID_SHIFT);
8530 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8531 iocbq->context2)->virt);
8532 if_type = bf_get(lpfc_sli_intf_if_type,
8533 &phba->sli4_hba.sli_intf);
8534 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8535 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8536 *pcmd == ELS_CMD_SCR ||
8537 *pcmd == ELS_CMD_FDISC ||
8538 *pcmd == ELS_CMD_LOGO ||
8539 *pcmd == ELS_CMD_PLOGI)) {
8540 bf_set(els_req64_sp, &wqe->els_req, 1);
8541 bf_set(els_req64_sid, &wqe->els_req,
8542 iocbq->vport->fc_myDID);
8543 if ((*pcmd == ELS_CMD_FLOGI) &&
8544 !(phba->fc_topology ==
8545 LPFC_TOPOLOGY_LOOP))
8546 bf_set(els_req64_sid, &wqe->els_req, 0);
8547 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8548 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8549 phba->vpi_ids[iocbq->vport->vpi]);
8550 } else if (pcmd && iocbq->context1) {
8551 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8552 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8553 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8556 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8557 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8558 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8559 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8560 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8561 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8562 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8563 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8564 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8566 case CMD_XMIT_SEQUENCE64_CX:
8567 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8568 iocbq->iocb.un.ulpWord[3]);
8569 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8570 iocbq->iocb.unsli3.rcvsli3.ox_id);
8571 /* The entire sequence is transmitted for this IOCB */
8572 xmit_len = total_len;
8573 cmnd = CMD_XMIT_SEQUENCE64_CR;
8574 if (phba->link_flag & LS_LOOPBACK_MODE)
8575 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8576 case CMD_XMIT_SEQUENCE64_CR:
8577 /* word3 iocb=io_tag32 wqe=reserved */
8578 wqe->xmit_sequence.rsvd3 = 0;
8579 /* word4 relative_offset memcpy */
8580 /* word5 r_ctl/df_ctl memcpy */
8581 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8582 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8583 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8584 LPFC_WQE_IOD_WRITE);
8585 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8586 LPFC_WQE_LENLOC_WORD12);
8587 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8588 wqe->xmit_sequence.xmit_len = xmit_len;
8589 command_type = OTHER_COMMAND;
8591 case CMD_XMIT_BCAST64_CN:
8592 /* word3 iocb=iotag32 wqe=seq_payload_len */
8593 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8594 /* word4 iocb=rsvd wqe=rsvd */
8595 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8596 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8597 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8598 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8599 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8600 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8601 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8602 LPFC_WQE_LENLOC_WORD3);
8603 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8605 case CMD_FCP_IWRITE64_CR:
8606 command_type = FCP_COMMAND_DATA_OUT;
8607 /* word3 iocb=iotag wqe=payload_offset_len */
8608 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8609 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8610 xmit_len + sizeof(struct fcp_rsp));
8611 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8613 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8614 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8615 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8616 iocbq->iocb.ulpFCP2Rcvy);
8617 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8618 /* Always open the exchange */
8619 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8620 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8621 LPFC_WQE_LENLOC_WORD4);
8622 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8623 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8624 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8625 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8626 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8627 if (iocbq->priority) {
8628 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8629 (iocbq->priority << 1));
8631 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8632 (phba->cfg_XLanePriority << 1));
8635 /* Note, word 10 is already initialized to 0 */
8637 if (phba->fcp_embed_io) {
8638 struct lpfc_scsi_buf *lpfc_cmd;
8639 struct sli4_sge *sgl;
8640 union lpfc_wqe128 *wqe128;
8641 struct fcp_cmnd *fcp_cmnd;
8644 /* 128 byte wqe support here */
8645 wqe128 = (union lpfc_wqe128 *)wqe;
8647 lpfc_cmd = iocbq->context1;
8648 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8649 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8651 /* Word 0-2 - FCP_CMND */
8652 wqe128->generic.bde.tus.f.bdeFlags =
8653 BUFF_TYPE_BDE_IMMED;
8654 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8655 wqe128->generic.bde.addrHigh = 0;
8656 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8658 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8660 /* Word 22-29 FCP CMND Payload */
8661 ptr = &wqe128->words[22];
8662 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8665 case CMD_FCP_IREAD64_CR:
8666 /* word3 iocb=iotag wqe=payload_offset_len */
8667 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8668 bf_set(payload_offset_len, &wqe->fcp_iread,
8669 xmit_len + sizeof(struct fcp_rsp));
8670 bf_set(cmd_buff_len, &wqe->fcp_iread,
8672 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8673 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8674 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8675 iocbq->iocb.ulpFCP2Rcvy);
8676 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8677 /* Always open the exchange */
8678 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8679 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8680 LPFC_WQE_LENLOC_WORD4);
8681 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8682 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8683 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8684 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8685 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8686 if (iocbq->priority) {
8687 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8688 (iocbq->priority << 1));
8690 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8691 (phba->cfg_XLanePriority << 1));
8694 /* Note, word 10 is already initialized to 0 */
8696 if (phba->fcp_embed_io) {
8697 struct lpfc_scsi_buf *lpfc_cmd;
8698 struct sli4_sge *sgl;
8699 union lpfc_wqe128 *wqe128;
8700 struct fcp_cmnd *fcp_cmnd;
8703 /* 128 byte wqe support here */
8704 wqe128 = (union lpfc_wqe128 *)wqe;
8706 lpfc_cmd = iocbq->context1;
8707 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8708 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8710 /* Word 0-2 - FCP_CMND */
8711 wqe128->generic.bde.tus.f.bdeFlags =
8712 BUFF_TYPE_BDE_IMMED;
8713 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8714 wqe128->generic.bde.addrHigh = 0;
8715 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8717 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8719 /* Word 22-29 FCP CMND Payload */
8720 ptr = &wqe128->words[22];
8721 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8724 case CMD_FCP_ICMND64_CR:
8725 /* word3 iocb=iotag wqe=payload_offset_len */
8726 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8727 bf_set(payload_offset_len, &wqe->fcp_icmd,
8728 xmit_len + sizeof(struct fcp_rsp));
8729 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8731 /* word3 iocb=IO_TAG wqe=reserved */
8732 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8733 /* Always open the exchange */
8734 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8735 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8736 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8737 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8738 LPFC_WQE_LENLOC_NONE);
8739 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8740 iocbq->iocb.ulpFCP2Rcvy);
8741 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8742 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8743 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8744 if (iocbq->priority) {
8745 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8746 (iocbq->priority << 1));
8748 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8749 (phba->cfg_XLanePriority << 1));
8752 /* Note, word 10 is already initialized to 0 */
8754 if (phba->fcp_embed_io) {
8755 struct lpfc_scsi_buf *lpfc_cmd;
8756 struct sli4_sge *sgl;
8757 union lpfc_wqe128 *wqe128;
8758 struct fcp_cmnd *fcp_cmnd;
8761 /* 128 byte wqe support here */
8762 wqe128 = (union lpfc_wqe128 *)wqe;
8764 lpfc_cmd = iocbq->context1;
8765 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8766 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8768 /* Word 0-2 - FCP_CMND */
8769 wqe128->generic.bde.tus.f.bdeFlags =
8770 BUFF_TYPE_BDE_IMMED;
8771 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8772 wqe128->generic.bde.addrHigh = 0;
8773 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8775 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8777 /* Word 22-29 FCP CMND Payload */
8778 ptr = &wqe128->words[22];
8779 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8782 case CMD_GEN_REQUEST64_CR:
8783 /* For this command calculate the xmit length of the
8787 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8788 sizeof(struct ulp_bde64);
8789 for (i = 0; i < numBdes; i++) {
8790 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8791 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8793 xmit_len += bde.tus.f.bdeSize;
8795 /* word3 iocb=IO_TAG wqe=request_payload_len */
8796 wqe->gen_req.request_payload_len = xmit_len;
8797 /* word4 iocb=parameter wqe=relative_offset memcpy */
8798 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8799 /* word6 context tag copied in memcpy */
8800 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8801 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8803 "2015 Invalid CT %x command 0x%x\n",
8804 ct, iocbq->iocb.ulpCommand);
8807 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8808 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8809 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8810 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8811 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8812 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8813 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8814 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8815 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
8816 command_type = OTHER_COMMAND;
8818 case CMD_XMIT_ELS_RSP64_CX:
8819 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8820 /* words0-2 BDE memcpy */
8821 /* word3 iocb=iotag32 wqe=response_payload_len */
8822 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8824 wqe->xmit_els_rsp.word4 = 0;
8825 /* word5 iocb=rsvd wge=did */
8826 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8827 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8829 if_type = bf_get(lpfc_sli_intf_if_type,
8830 &phba->sli4_hba.sli_intf);
8831 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8832 if (iocbq->vport->fc_flag & FC_PT2PT) {
8833 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8834 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8835 iocbq->vport->fc_myDID);
8836 if (iocbq->vport->fc_myDID == Fabric_DID) {
8838 &wqe->xmit_els_rsp.wqe_dest, 0);
8842 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8843 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8844 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8845 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8846 iocbq->iocb.unsli3.rcvsli3.ox_id);
8847 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8848 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8849 phba->vpi_ids[iocbq->vport->vpi]);
8850 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8851 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8852 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8853 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8854 LPFC_WQE_LENLOC_WORD3);
8855 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8856 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8857 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8858 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8859 iocbq->context2)->virt);
8860 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8861 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8862 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8863 iocbq->vport->fc_myDID);
8864 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8865 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8866 phba->vpi_ids[phba->pport->vpi]);
8868 command_type = OTHER_COMMAND;
8870 case CMD_CLOSE_XRI_CN:
8871 case CMD_ABORT_XRI_CN:
8872 case CMD_ABORT_XRI_CX:
8873 /* words 0-2 memcpy should be 0 rserved */
8874 /* port will send abts */
8875 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8876 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8877 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8878 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8882 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8884 * The link is down, or the command was ELS_FIP
8885 * so the fw does not need to send abts
8888 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8890 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8891 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8892 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8893 wqe->abort_cmd.rsrvd5 = 0;
8894 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8895 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8896 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8898 * The abort handler will send us CMD_ABORT_XRI_CN or
8899 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8901 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8902 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8903 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8904 LPFC_WQE_LENLOC_NONE);
8905 cmnd = CMD_ABORT_XRI_CX;
8906 command_type = OTHER_COMMAND;
8909 case CMD_XMIT_BLS_RSP64_CX:
8910 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8911 /* As BLS ABTS RSP WQE is very different from other WQEs,
8912 * we re-construct this WQE here based on information in
8913 * iocbq from scratch.
8915 memset(wqe, 0, sizeof(union lpfc_wqe));
8916 /* OX_ID is invariable to who sent ABTS to CT exchange */
8917 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8918 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8919 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8920 LPFC_ABTS_UNSOL_INT) {
8921 /* ABTS sent by initiator to CT exchange, the
8922 * RX_ID field will be filled with the newly
8923 * allocated responder XRI.
8925 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8926 iocbq->sli4_xritag);
8928 /* ABTS sent by responder to CT exchange, the
8929 * RX_ID field will be filled with the responder
8932 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8933 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8935 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8936 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8939 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8941 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8942 iocbq->iocb.ulpContext);
8943 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8944 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8945 phba->vpi_ids[phba->pport->vpi]);
8946 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8947 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8948 LPFC_WQE_LENLOC_NONE);
8949 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8950 command_type = OTHER_COMMAND;
8951 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8952 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8953 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8954 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8955 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8956 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8957 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8961 case CMD_XRI_ABORTED_CX:
8962 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8963 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8964 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8965 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8966 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8968 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8969 "2014 Invalid command 0x%x\n",
8970 iocbq->iocb.ulpCommand);
8975 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8976 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8977 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8978 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8979 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8980 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8981 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8982 LPFC_IO_DIF_INSERT);
8983 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8984 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8985 wqe->generic.wqe_com.abort_tag = abort_tag;
8986 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8987 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8988 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8989 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8994 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8995 * @phba: Pointer to HBA context object.
8996 * @ring_number: SLI ring number to issue iocb on.
8997 * @piocb: Pointer to command iocb.
8998 * @flag: Flag indicating if this command can be put into txq.
9000 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9001 * an iocb command to an HBA with SLI-4 interface spec.
9003 * This function is called with hbalock held. The function will return success
9004 * after it successfully submit the iocb to firmware or after adding to the
9008 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9009 struct lpfc_iocbq *piocb, uint32_t flag)
9011 struct lpfc_sglq *sglq;
9012 union lpfc_wqe *wqe;
9013 union lpfc_wqe128 wqe128;
9014 struct lpfc_queue *wq;
9015 struct lpfc_sli_ring *pring;
9018 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9019 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9020 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9021 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9023 wq = phba->sli4_hba.oas_wq;
9025 wq = phba->sli4_hba.els_wq;
9028 /* Get corresponding ring */
9032 * The WQE can be either 64 or 128 bytes,
9033 * so allocate space on the stack assuming the largest.
9035 wqe = (union lpfc_wqe *)&wqe128;
9037 lockdep_assert_held(&phba->hbalock);
9039 if (piocb->sli4_xritag == NO_XRI) {
9040 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9041 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9044 if (!list_empty(&pring->txq)) {
9045 if (!(flag & SLI_IOCB_RET_IOCB)) {
9046 __lpfc_sli_ringtx_put(phba,
9048 return IOCB_SUCCESS;
9053 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9055 if (!(flag & SLI_IOCB_RET_IOCB)) {
9056 __lpfc_sli_ringtx_put(phba,
9059 return IOCB_SUCCESS;
9065 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9066 /* These IO's already have an XRI and a mapped sgl. */
9070 * This is a continuation of a commandi,(CX) so this
9071 * sglq is on the active list
9073 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9079 piocb->sli4_lxritag = sglq->sli4_lxritag;
9080 piocb->sli4_xritag = sglq->sli4_xritag;
9081 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9085 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
9088 if (lpfc_sli4_wq_put(wq, wqe))
9090 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9096 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9098 * This routine wraps the actual lockless version for issusing IOCB function
9099 * pointer from the lpfc_hba struct.
9102 * IOCB_ERROR - Error
9103 * IOCB_SUCCESS - Success
9107 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9108 struct lpfc_iocbq *piocb, uint32_t flag)
9110 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9114 * lpfc_sli_api_table_setup - Set up sli api function jump table
9115 * @phba: The hba struct for which this call is being executed.
9116 * @dev_grp: The HBA PCI-Device group number.
9118 * This routine sets up the SLI interface API function jump table in @phba
9120 * Returns: 0 - success, -ENODEV - failure.
9123 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9127 case LPFC_PCI_DEV_LP:
9128 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9129 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9131 case LPFC_PCI_DEV_OC:
9132 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9133 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9137 "1419 Invalid HBA PCI-device group: 0x%x\n",
9142 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9147 * lpfc_sli4_calc_ring - Calculates which ring to use
9148 * @phba: Pointer to HBA context object.
9149 * @piocb: Pointer to command iocb.
9151 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9152 * hba_wqidx, thus we need to calculate the corresponding ring.
9153 * Since ABORTS must go on the same WQ of the command they are
9154 * aborting, we use command's hba_wqidx.
9156 struct lpfc_sli_ring *
9157 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9159 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9160 if (!(phba->cfg_fof) ||
9161 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9162 if (unlikely(!phba->sli4_hba.fcp_wq))
9165 * for abort iocb hba_wqidx should already
9166 * be setup based on what work queue we used.
9168 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
9170 lpfc_sli4_scmd_to_wqidx_distr(phba,
9172 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9174 if (unlikely(!phba->sli4_hba.oas_wq))
9176 piocb->hba_wqidx = 0;
9177 return phba->sli4_hba.oas_wq->pring;
9180 if (unlikely(!phba->sli4_hba.els_wq))
9182 piocb->hba_wqidx = 0;
9183 return phba->sli4_hba.els_wq->pring;
9188 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9189 * @phba: Pointer to HBA context object.
9190 * @pring: Pointer to driver SLI ring object.
9191 * @piocb: Pointer to command iocb.
9192 * @flag: Flag indicating if this command can be put into txq.
9194 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9195 * function. This function gets the hbalock and calls
9196 * __lpfc_sli_issue_iocb function and will return the error returned
9197 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9198 * functions which do not hold hbalock.
9201 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9202 struct lpfc_iocbq *piocb, uint32_t flag)
9204 struct lpfc_hba_eq_hdl *hba_eq_hdl;
9205 struct lpfc_sli_ring *pring;
9206 struct lpfc_queue *fpeq;
9207 struct lpfc_eqe *eqe;
9208 unsigned long iflags;
9211 if (phba->sli_rev == LPFC_SLI_REV4) {
9212 pring = lpfc_sli4_calc_ring(phba, piocb);
9213 if (unlikely(pring == NULL))
9216 spin_lock_irqsave(&pring->ring_lock, iflags);
9217 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9218 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9220 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9221 idx = piocb->hba_wqidx;
9222 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9224 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9226 /* Get associated EQ with this index */
9227 fpeq = phba->sli4_hba.hba_eq[idx];
9229 /* Turn off interrupts from this EQ */
9230 lpfc_sli4_eq_clr_intr(fpeq);
9233 * Process all the events on FCP EQ
9235 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9236 lpfc_sli4_hba_handle_eqe(phba,
9238 fpeq->EQ_processed++;
9241 /* Always clear and re-arm the EQ */
9242 lpfc_sli4_eq_release(fpeq,
9245 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9248 /* For now, SLI2/3 will still use hbalock */
9249 spin_lock_irqsave(&phba->hbalock, iflags);
9250 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9251 spin_unlock_irqrestore(&phba->hbalock, iflags);
9257 * lpfc_extra_ring_setup - Extra ring setup function
9258 * @phba: Pointer to HBA context object.
9260 * This function is called while driver attaches with the
9261 * HBA to setup the extra ring. The extra ring is used
9262 * only when driver needs to support target mode functionality
9263 * or IP over FC functionalities.
9265 * This function is called with no lock held. SLI3 only.
9268 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9270 struct lpfc_sli *psli;
9271 struct lpfc_sli_ring *pring;
9275 /* Adjust cmd/rsp ring iocb entries more evenly */
9277 /* Take some away from the FCP ring */
9278 pring = &psli->sli3_ring[LPFC_FCP_RING];
9279 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9280 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9281 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9282 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9284 /* and give them to the extra ring */
9285 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9287 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9288 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9289 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9290 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9292 /* Setup default profile for this ring */
9293 pring->iotag_max = 4096;
9294 pring->num_mask = 1;
9295 pring->prt[0].profile = 0; /* Mask 0 */
9296 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9297 pring->prt[0].type = phba->cfg_multi_ring_type;
9298 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9302 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9303 * @phba: Pointer to HBA context object.
9304 * @iocbq: Pointer to iocb object.
9306 * The async_event handler calls this routine when it receives
9307 * an ASYNC_STATUS_CN event from the port. The port generates
9308 * this event when an Abort Sequence request to an rport fails
9309 * twice in succession. The abort could be originated by the
9310 * driver or by the port. The ABTS could have been for an ELS
9311 * or FCP IO. The port only generates this event when an ABTS
9312 * fails to complete after one retry.
9315 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9316 struct lpfc_iocbq *iocbq)
9318 struct lpfc_nodelist *ndlp = NULL;
9319 uint16_t rpi = 0, vpi = 0;
9320 struct lpfc_vport *vport = NULL;
9322 /* The rpi in the ulpContext is vport-sensitive. */
9323 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9324 rpi = iocbq->iocb.ulpContext;
9326 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9327 "3092 Port generated ABTS async event "
9328 "on vpi %d rpi %d status 0x%x\n",
9329 vpi, rpi, iocbq->iocb.ulpStatus);
9331 vport = lpfc_find_vport_by_vpid(phba, vpi);
9334 ndlp = lpfc_findnode_rpi(vport, rpi);
9335 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9338 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9339 lpfc_sli_abts_recover_port(vport, ndlp);
9343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9344 "3095 Event Context not found, no "
9345 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9346 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9350 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9351 * @phba: pointer to HBA context object.
9352 * @ndlp: nodelist pointer for the impacted rport.
9353 * @axri: pointer to the wcqe containing the failed exchange.
9355 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9356 * port. The port generates this event when an abort exchange request to an
9357 * rport fails twice in succession with no reply. The abort could be originated
9358 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9361 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9362 struct lpfc_nodelist *ndlp,
9363 struct sli4_wcqe_xri_aborted *axri)
9365 struct lpfc_vport *vport;
9366 uint32_t ext_status = 0;
9368 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9369 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9370 "3115 Node Context not found, driver "
9371 "ignoring abts err event\n");
9375 vport = ndlp->vport;
9376 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9377 "3116 Port generated FCP XRI ABORT event on "
9378 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9379 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9380 bf_get(lpfc_wcqe_xa_xri, axri),
9381 bf_get(lpfc_wcqe_xa_status, axri),
9385 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9386 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9387 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9389 ext_status = axri->parameter & IOERR_PARAM_MASK;
9390 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9391 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9392 lpfc_sli_abts_recover_port(vport, ndlp);
9396 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9397 * @phba: Pointer to HBA context object.
9398 * @pring: Pointer to driver SLI ring object.
9399 * @iocbq: Pointer to iocb object.
9401 * This function is called by the slow ring event handler
9402 * function when there is an ASYNC event iocb in the ring.
9403 * This function is called with no lock held.
9404 * Currently this function handles only temperature related
9405 * ASYNC events. The function decodes the temperature sensor
9406 * event message and posts events for the management applications.
9409 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9410 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9414 struct temp_event temp_event_data;
9415 struct Scsi_Host *shost;
9418 icmd = &iocbq->iocb;
9419 evt_code = icmd->un.asyncstat.evt_code;
9422 case ASYNC_TEMP_WARN:
9423 case ASYNC_TEMP_SAFE:
9424 temp_event_data.data = (uint32_t) icmd->ulpContext;
9425 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9426 if (evt_code == ASYNC_TEMP_WARN) {
9427 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9428 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9429 "0347 Adapter is very hot, please take "
9430 "corrective action. temperature : %d Celsius\n",
9431 (uint32_t) icmd->ulpContext);
9433 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9434 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9435 "0340 Adapter temperature is OK now. "
9436 "temperature : %d Celsius\n",
9437 (uint32_t) icmd->ulpContext);
9440 /* Send temperature change event to applications */
9441 shost = lpfc_shost_from_vport(phba->pport);
9442 fc_host_post_vendor_event(shost, fc_get_event_number(),
9443 sizeof(temp_event_data), (char *) &temp_event_data,
9446 case ASYNC_STATUS_CN:
9447 lpfc_sli_abts_err_handler(phba, iocbq);
9450 iocb_w = (uint32_t *) icmd;
9451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9452 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9454 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9455 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9456 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9457 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9458 pring->ringno, icmd->un.asyncstat.evt_code,
9459 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9460 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9461 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9462 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9470 * lpfc_sli4_setup - SLI ring setup function
9471 * @phba: Pointer to HBA context object.
9473 * lpfc_sli_setup sets up rings of the SLI interface with
9474 * number of iocbs per ring and iotags. This function is
9475 * called while driver attach to the HBA and before the
9476 * interrupts are enabled. So there is no need for locking.
9478 * This function always returns 0.
9481 lpfc_sli4_setup(struct lpfc_hba *phba)
9483 struct lpfc_sli_ring *pring;
9485 pring = phba->sli4_hba.els_wq->pring;
9486 pring->num_mask = LPFC_MAX_RING_MASK;
9487 pring->prt[0].profile = 0; /* Mask 0 */
9488 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9489 pring->prt[0].type = FC_TYPE_ELS;
9490 pring->prt[0].lpfc_sli_rcv_unsol_event =
9491 lpfc_els_unsol_event;
9492 pring->prt[1].profile = 0; /* Mask 1 */
9493 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9494 pring->prt[1].type = FC_TYPE_ELS;
9495 pring->prt[1].lpfc_sli_rcv_unsol_event =
9496 lpfc_els_unsol_event;
9497 pring->prt[2].profile = 0; /* Mask 2 */
9498 /* NameServer Inquiry */
9499 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9501 pring->prt[2].type = FC_TYPE_CT;
9502 pring->prt[2].lpfc_sli_rcv_unsol_event =
9503 lpfc_ct_unsol_event;
9504 pring->prt[3].profile = 0; /* Mask 3 */
9505 /* NameServer response */
9506 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9508 pring->prt[3].type = FC_TYPE_CT;
9509 pring->prt[3].lpfc_sli_rcv_unsol_event =
9510 lpfc_ct_unsol_event;
9515 * lpfc_sli_setup - SLI ring setup function
9516 * @phba: Pointer to HBA context object.
9518 * lpfc_sli_setup sets up rings of the SLI interface with
9519 * number of iocbs per ring and iotags. This function is
9520 * called while driver attach to the HBA and before the
9521 * interrupts are enabled. So there is no need for locking.
9523 * This function always returns 0. SLI3 only.
9526 lpfc_sli_setup(struct lpfc_hba *phba)
9528 int i, totiocbsize = 0;
9529 struct lpfc_sli *psli = &phba->sli;
9530 struct lpfc_sli_ring *pring;
9532 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9535 psli->iocbq_lookup = NULL;
9536 psli->iocbq_lookup_len = 0;
9537 psli->last_iotag = 0;
9539 for (i = 0; i < psli->num_rings; i++) {
9540 pring = &psli->sli3_ring[i];
9542 case LPFC_FCP_RING: /* ring 0 - FCP */
9543 /* numCiocb and numRiocb are used in config_port */
9544 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9545 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9546 pring->sli.sli3.numCiocb +=
9547 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9548 pring->sli.sli3.numRiocb +=
9549 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9550 pring->sli.sli3.numCiocb +=
9551 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9552 pring->sli.sli3.numRiocb +=
9553 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9554 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9555 SLI3_IOCB_CMD_SIZE :
9557 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9558 SLI3_IOCB_RSP_SIZE :
9560 pring->iotag_ctr = 0;
9562 (phba->cfg_hba_queue_depth * 2);
9563 pring->fast_iotag = pring->iotag_max;
9564 pring->num_mask = 0;
9566 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
9567 /* numCiocb and numRiocb are used in config_port */
9568 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9569 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9570 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9571 SLI3_IOCB_CMD_SIZE :
9573 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9574 SLI3_IOCB_RSP_SIZE :
9576 pring->iotag_max = phba->cfg_hba_queue_depth;
9577 pring->num_mask = 0;
9579 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
9580 /* numCiocb and numRiocb are used in config_port */
9581 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9582 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9583 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9584 SLI3_IOCB_CMD_SIZE :
9586 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9587 SLI3_IOCB_RSP_SIZE :
9589 pring->fast_iotag = 0;
9590 pring->iotag_ctr = 0;
9591 pring->iotag_max = 4096;
9592 pring->lpfc_sli_rcv_async_status =
9593 lpfc_sli_async_event_handler;
9594 pring->num_mask = LPFC_MAX_RING_MASK;
9595 pring->prt[0].profile = 0; /* Mask 0 */
9596 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9597 pring->prt[0].type = FC_TYPE_ELS;
9598 pring->prt[0].lpfc_sli_rcv_unsol_event =
9599 lpfc_els_unsol_event;
9600 pring->prt[1].profile = 0; /* Mask 1 */
9601 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9602 pring->prt[1].type = FC_TYPE_ELS;
9603 pring->prt[1].lpfc_sli_rcv_unsol_event =
9604 lpfc_els_unsol_event;
9605 pring->prt[2].profile = 0; /* Mask 2 */
9606 /* NameServer Inquiry */
9607 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9609 pring->prt[2].type = FC_TYPE_CT;
9610 pring->prt[2].lpfc_sli_rcv_unsol_event =
9611 lpfc_ct_unsol_event;
9612 pring->prt[3].profile = 0; /* Mask 3 */
9613 /* NameServer response */
9614 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9616 pring->prt[3].type = FC_TYPE_CT;
9617 pring->prt[3].lpfc_sli_rcv_unsol_event =
9618 lpfc_ct_unsol_event;
9621 totiocbsize += (pring->sli.sli3.numCiocb *
9622 pring->sli.sli3.sizeCiocb) +
9623 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9625 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9626 /* Too many cmd / rsp ring entries in SLI2 SLIM */
9627 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9628 "SLI2 SLIM Data: x%x x%lx\n",
9629 phba->brd_no, totiocbsize,
9630 (unsigned long) MAX_SLIM_IOCB_SIZE);
9632 if (phba->cfg_multi_ring_support == 2)
9633 lpfc_extra_ring_setup(phba);
9639 * lpfc_sli4_queue_init - Queue initialization function
9640 * @phba: Pointer to HBA context object.
9642 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
9643 * ring. This function also initializes ring indices of each ring.
9644 * This function is called during the initialization of the SLI
9645 * interface of an HBA.
9646 * This function is called with no lock held and always returns
9650 lpfc_sli4_queue_init(struct lpfc_hba *phba)
9652 struct lpfc_sli *psli;
9653 struct lpfc_sli_ring *pring;
9657 spin_lock_irq(&phba->hbalock);
9658 INIT_LIST_HEAD(&psli->mboxq);
9659 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9660 /* Initialize list headers for txq and txcmplq as double linked lists */
9661 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9662 pring = phba->sli4_hba.fcp_wq[i]->pring;
9664 pring->ringno = LPFC_FCP_RING;
9665 INIT_LIST_HEAD(&pring->txq);
9666 INIT_LIST_HEAD(&pring->txcmplq);
9667 INIT_LIST_HEAD(&pring->iocb_continueq);
9668 spin_lock_init(&pring->ring_lock);
9670 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9671 pring = phba->sli4_hba.nvme_wq[i]->pring;
9673 pring->ringno = LPFC_FCP_RING;
9674 INIT_LIST_HEAD(&pring->txq);
9675 INIT_LIST_HEAD(&pring->txcmplq);
9676 INIT_LIST_HEAD(&pring->iocb_continueq);
9677 spin_lock_init(&pring->ring_lock);
9679 pring = phba->sli4_hba.els_wq->pring;
9681 pring->ringno = LPFC_ELS_RING;
9682 INIT_LIST_HEAD(&pring->txq);
9683 INIT_LIST_HEAD(&pring->txcmplq);
9684 INIT_LIST_HEAD(&pring->iocb_continueq);
9685 spin_lock_init(&pring->ring_lock);
9687 if (phba->cfg_nvme_io_channel) {
9688 pring = phba->sli4_hba.nvmels_wq->pring;
9690 pring->ringno = LPFC_ELS_RING;
9691 INIT_LIST_HEAD(&pring->txq);
9692 INIT_LIST_HEAD(&pring->txcmplq);
9693 INIT_LIST_HEAD(&pring->iocb_continueq);
9694 spin_lock_init(&pring->ring_lock);
9697 if (phba->cfg_fof) {
9698 pring = phba->sli4_hba.oas_wq->pring;
9700 pring->ringno = LPFC_FCP_RING;
9701 INIT_LIST_HEAD(&pring->txq);
9702 INIT_LIST_HEAD(&pring->txcmplq);
9703 INIT_LIST_HEAD(&pring->iocb_continueq);
9704 spin_lock_init(&pring->ring_lock);
9707 spin_unlock_irq(&phba->hbalock);
9711 * lpfc_sli_queue_init - Queue initialization function
9712 * @phba: Pointer to HBA context object.
9714 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9715 * ring. This function also initializes ring indices of each ring.
9716 * This function is called during the initialization of the SLI
9717 * interface of an HBA.
9718 * This function is called with no lock held and always returns
9722 lpfc_sli_queue_init(struct lpfc_hba *phba)
9724 struct lpfc_sli *psli;
9725 struct lpfc_sli_ring *pring;
9729 spin_lock_irq(&phba->hbalock);
9730 INIT_LIST_HEAD(&psli->mboxq);
9731 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9732 /* Initialize list headers for txq and txcmplq as double linked lists */
9733 for (i = 0; i < psli->num_rings; i++) {
9734 pring = &psli->sli3_ring[i];
9736 pring->sli.sli3.next_cmdidx = 0;
9737 pring->sli.sli3.local_getidx = 0;
9738 pring->sli.sli3.cmdidx = 0;
9739 INIT_LIST_HEAD(&pring->iocb_continueq);
9740 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9741 INIT_LIST_HEAD(&pring->postbufq);
9743 INIT_LIST_HEAD(&pring->txq);
9744 INIT_LIST_HEAD(&pring->txcmplq);
9745 spin_lock_init(&pring->ring_lock);
9747 spin_unlock_irq(&phba->hbalock);
9751 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9752 * @phba: Pointer to HBA context object.
9754 * This routine flushes the mailbox command subsystem. It will unconditionally
9755 * flush all the mailbox commands in the three possible stages in the mailbox
9756 * command sub-system: pending mailbox command queue; the outstanding mailbox
9757 * command; and completed mailbox command queue. It is caller's responsibility
9758 * to make sure that the driver is in the proper state to flush the mailbox
9759 * command sub-system. Namely, the posting of mailbox commands into the
9760 * pending mailbox command queue from the various clients must be stopped;
9761 * either the HBA is in a state that it will never works on the outstanding
9762 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9763 * mailbox command has been completed.
9766 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9768 LIST_HEAD(completions);
9769 struct lpfc_sli *psli = &phba->sli;
9771 unsigned long iflag;
9773 /* Flush all the mailbox commands in the mbox system */
9774 spin_lock_irqsave(&phba->hbalock, iflag);
9775 /* The pending mailbox command queue */
9776 list_splice_init(&phba->sli.mboxq, &completions);
9777 /* The outstanding active mailbox command */
9778 if (psli->mbox_active) {
9779 list_add_tail(&psli->mbox_active->list, &completions);
9780 psli->mbox_active = NULL;
9781 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9783 /* The completed mailbox command queue */
9784 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9785 spin_unlock_irqrestore(&phba->hbalock, iflag);
9787 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9788 while (!list_empty(&completions)) {
9789 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9790 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9792 pmb->mbox_cmpl(phba, pmb);
9797 * lpfc_sli_host_down - Vport cleanup function
9798 * @vport: Pointer to virtual port object.
9800 * lpfc_sli_host_down is called to clean up the resources
9801 * associated with a vport before destroying virtual
9802 * port data structures.
9803 * This function does following operations:
9804 * - Free discovery resources associated with this virtual
9806 * - Free iocbs associated with this virtual port in
9808 * - Send abort for all iocb commands associated with this
9811 * This function is called with no lock held and always returns 1.
9814 lpfc_sli_host_down(struct lpfc_vport *vport)
9816 LIST_HEAD(completions);
9817 struct lpfc_hba *phba = vport->phba;
9818 struct lpfc_sli *psli = &phba->sli;
9819 struct lpfc_queue *qp = NULL;
9820 struct lpfc_sli_ring *pring;
9821 struct lpfc_iocbq *iocb, *next_iocb;
9823 unsigned long flags = 0;
9824 uint16_t prev_pring_flag;
9826 lpfc_cleanup_discovery_resources(vport);
9828 spin_lock_irqsave(&phba->hbalock, flags);
9831 * Error everything on the txq since these iocbs
9832 * have not been given to the FW yet.
9833 * Also issue ABTS for everything on the txcmplq
9835 if (phba->sli_rev != LPFC_SLI_REV4) {
9836 for (i = 0; i < psli->num_rings; i++) {
9837 pring = &psli->sli3_ring[i];
9838 prev_pring_flag = pring->flag;
9839 /* Only slow rings */
9840 if (pring->ringno == LPFC_ELS_RING) {
9841 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9842 /* Set the lpfc data pending flag */
9843 set_bit(LPFC_DATA_READY, &phba->data_flags);
9845 list_for_each_entry_safe(iocb, next_iocb,
9846 &pring->txq, list) {
9847 if (iocb->vport != vport)
9849 list_move_tail(&iocb->list, &completions);
9851 list_for_each_entry_safe(iocb, next_iocb,
9852 &pring->txcmplq, list) {
9853 if (iocb->vport != vport)
9855 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9857 pring->flag = prev_pring_flag;
9860 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
9864 if (pring == phba->sli4_hba.els_wq->pring) {
9865 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9866 /* Set the lpfc data pending flag */
9867 set_bit(LPFC_DATA_READY, &phba->data_flags);
9869 prev_pring_flag = pring->flag;
9870 spin_lock_irq(&pring->ring_lock);
9871 list_for_each_entry_safe(iocb, next_iocb,
9872 &pring->txq, list) {
9873 if (iocb->vport != vport)
9875 list_move_tail(&iocb->list, &completions);
9877 spin_unlock_irq(&pring->ring_lock);
9878 list_for_each_entry_safe(iocb, next_iocb,
9879 &pring->txcmplq, list) {
9880 if (iocb->vport != vport)
9882 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9884 pring->flag = prev_pring_flag;
9887 spin_unlock_irqrestore(&phba->hbalock, flags);
9889 /* Cancel all the IOCBs from the completions list */
9890 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9896 * lpfc_sli_hba_down - Resource cleanup function for the HBA
9897 * @phba: Pointer to HBA context object.
9899 * This function cleans up all iocb, buffers, mailbox commands
9900 * while shutting down the HBA. This function is called with no
9901 * lock held and always returns 1.
9902 * This function does the following to cleanup driver resources:
9903 * - Free discovery resources for each virtual port
9904 * - Cleanup any pending fabric iocbs
9905 * - Iterate through the iocb txq and free each entry
9907 * - Free up any buffer posted to the HBA
9908 * - Free mailbox commands in the mailbox queue.
9911 lpfc_sli_hba_down(struct lpfc_hba *phba)
9913 LIST_HEAD(completions);
9914 struct lpfc_sli *psli = &phba->sli;
9915 struct lpfc_queue *qp = NULL;
9916 struct lpfc_sli_ring *pring;
9917 struct lpfc_dmabuf *buf_ptr;
9918 unsigned long flags = 0;
9921 /* Shutdown the mailbox command sub-system */
9922 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9924 lpfc_hba_down_prep(phba);
9926 lpfc_fabric_abort_hba(phba);
9928 spin_lock_irqsave(&phba->hbalock, flags);
9931 * Error everything on the txq since these iocbs
9932 * have not been given to the FW yet.
9934 if (phba->sli_rev != LPFC_SLI_REV4) {
9935 for (i = 0; i < psli->num_rings; i++) {
9936 pring = &psli->sli3_ring[i];
9937 /* Only slow rings */
9938 if (pring->ringno == LPFC_ELS_RING) {
9939 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9940 /* Set the lpfc data pending flag */
9941 set_bit(LPFC_DATA_READY, &phba->data_flags);
9943 list_splice_init(&pring->txq, &completions);
9946 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
9950 spin_lock_irq(&pring->ring_lock);
9951 list_splice_init(&pring->txq, &completions);
9952 spin_unlock_irq(&pring->ring_lock);
9953 if (pring == phba->sli4_hba.els_wq->pring) {
9954 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9955 /* Set the lpfc data pending flag */
9956 set_bit(LPFC_DATA_READY, &phba->data_flags);
9960 spin_unlock_irqrestore(&phba->hbalock, flags);
9962 /* Cancel all the IOCBs from the completions list */
9963 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9966 spin_lock_irqsave(&phba->hbalock, flags);
9967 list_splice_init(&phba->elsbuf, &completions);
9968 phba->elsbuf_cnt = 0;
9969 phba->elsbuf_prev_cnt = 0;
9970 spin_unlock_irqrestore(&phba->hbalock, flags);
9972 while (!list_empty(&completions)) {
9973 list_remove_head(&completions, buf_ptr,
9974 struct lpfc_dmabuf, list);
9975 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9979 /* Return any active mbox cmds */
9980 del_timer_sync(&psli->mbox_tmo);
9982 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9983 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9984 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9990 * lpfc_sli_pcimem_bcopy - SLI memory copy function
9991 * @srcp: Source memory pointer.
9992 * @destp: Destination memory pointer.
9993 * @cnt: Number of words required to be copied.
9995 * This function is used for copying data between driver memory
9996 * and the SLI memory. This function also changes the endianness
9997 * of each word if native endianness is different from SLI
9998 * endianness. This function can be called with or without
10002 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10004 uint32_t *src = srcp;
10005 uint32_t *dest = destp;
10009 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10011 ldata = le32_to_cpu(ldata);
10020 * lpfc_sli_bemem_bcopy - SLI memory copy function
10021 * @srcp: Source memory pointer.
10022 * @destp: Destination memory pointer.
10023 * @cnt: Number of words required to be copied.
10025 * This function is used for copying data between a data structure
10026 * with big endian representation to local endianness.
10027 * This function can be called with or without lock.
10030 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10032 uint32_t *src = srcp;
10033 uint32_t *dest = destp;
10037 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10039 ldata = be32_to_cpu(ldata);
10047 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10048 * @phba: Pointer to HBA context object.
10049 * @pring: Pointer to driver SLI ring object.
10050 * @mp: Pointer to driver buffer object.
10052 * This function is called with no lock held.
10053 * It always return zero after adding the buffer to the postbufq
10057 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10058 struct lpfc_dmabuf *mp)
10060 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10062 spin_lock_irq(&phba->hbalock);
10063 list_add_tail(&mp->list, &pring->postbufq);
10064 pring->postbufq_cnt++;
10065 spin_unlock_irq(&phba->hbalock);
10070 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10071 * @phba: Pointer to HBA context object.
10073 * When HBQ is enabled, buffers are searched based on tags. This function
10074 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10075 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10076 * does not conflict with tags of buffer posted for unsolicited events.
10077 * The function returns the allocated tag. The function is called with
10081 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10083 spin_lock_irq(&phba->hbalock);
10084 phba->buffer_tag_count++;
10086 * Always set the QUE_BUFTAG_BIT to distiguish between
10087 * a tag assigned by HBQ.
10089 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10090 spin_unlock_irq(&phba->hbalock);
10091 return phba->buffer_tag_count;
10095 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10096 * @phba: Pointer to HBA context object.
10097 * @pring: Pointer to driver SLI ring object.
10098 * @tag: Buffer tag.
10100 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10101 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10102 * iocb is posted to the response ring with the tag of the buffer.
10103 * This function searches the pring->postbufq list using the tag
10104 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10105 * iocb. If the buffer is found then lpfc_dmabuf object of the
10106 * buffer is returned to the caller else NULL is returned.
10107 * This function is called with no lock held.
10109 struct lpfc_dmabuf *
10110 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10113 struct lpfc_dmabuf *mp, *next_mp;
10114 struct list_head *slp = &pring->postbufq;
10116 /* Search postbufq, from the beginning, looking for a match on tag */
10117 spin_lock_irq(&phba->hbalock);
10118 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10119 if (mp->buffer_tag == tag) {
10120 list_del_init(&mp->list);
10121 pring->postbufq_cnt--;
10122 spin_unlock_irq(&phba->hbalock);
10127 spin_unlock_irq(&phba->hbalock);
10128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10129 "0402 Cannot find virtual addr for buffer tag on "
10130 "ring %d Data x%lx x%p x%p x%x\n",
10131 pring->ringno, (unsigned long) tag,
10132 slp->next, slp->prev, pring->postbufq_cnt);
10138 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10139 * @phba: Pointer to HBA context object.
10140 * @pring: Pointer to driver SLI ring object.
10141 * @phys: DMA address of the buffer.
10143 * This function searches the buffer list using the dma_address
10144 * of unsolicited event to find the driver's lpfc_dmabuf object
10145 * corresponding to the dma_address. The function returns the
10146 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10147 * This function is called by the ct and els unsolicited event
10148 * handlers to get the buffer associated with the unsolicited
10151 * This function is called with no lock held.
10153 struct lpfc_dmabuf *
10154 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10157 struct lpfc_dmabuf *mp, *next_mp;
10158 struct list_head *slp = &pring->postbufq;
10160 /* Search postbufq, from the beginning, looking for a match on phys */
10161 spin_lock_irq(&phba->hbalock);
10162 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10163 if (mp->phys == phys) {
10164 list_del_init(&mp->list);
10165 pring->postbufq_cnt--;
10166 spin_unlock_irq(&phba->hbalock);
10171 spin_unlock_irq(&phba->hbalock);
10172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10173 "0410 Cannot find virtual addr for mapped buf on "
10174 "ring %d Data x%llx x%p x%p x%x\n",
10175 pring->ringno, (unsigned long long)phys,
10176 slp->next, slp->prev, pring->postbufq_cnt);
10181 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10182 * @phba: Pointer to HBA context object.
10183 * @cmdiocb: Pointer to driver command iocb object.
10184 * @rspiocb: Pointer to driver response iocb object.
10186 * This function is the completion handler for the abort iocbs for
10187 * ELS commands. This function is called from the ELS ring event
10188 * handler with no lock held. This function frees memory resources
10189 * associated with the abort iocb.
10192 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10193 struct lpfc_iocbq *rspiocb)
10195 IOCB_t *irsp = &rspiocb->iocb;
10196 uint16_t abort_iotag, abort_context;
10197 struct lpfc_iocbq *abort_iocb = NULL;
10199 if (irsp->ulpStatus) {
10202 * Assume that the port already completed and returned, or
10203 * will return the iocb. Just Log the message.
10205 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10206 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10208 spin_lock_irq(&phba->hbalock);
10209 if (phba->sli_rev < LPFC_SLI_REV4) {
10210 if (abort_iotag != 0 &&
10211 abort_iotag <= phba->sli.last_iotag)
10213 phba->sli.iocbq_lookup[abort_iotag];
10215 /* For sli4 the abort_tag is the XRI,
10216 * so the abort routine puts the iotag of the iocb
10217 * being aborted in the context field of the abort
10220 abort_iocb = phba->sli.iocbq_lookup[abort_context];
10222 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10223 "0327 Cannot abort els iocb %p "
10224 "with tag %x context %x, abort status %x, "
10226 abort_iocb, abort_iotag, abort_context,
10227 irsp->ulpStatus, irsp->un.ulpWord[4]);
10229 spin_unlock_irq(&phba->hbalock);
10231 lpfc_sli_release_iocbq(phba, cmdiocb);
10236 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10237 * @phba: Pointer to HBA context object.
10238 * @cmdiocb: Pointer to driver command iocb object.
10239 * @rspiocb: Pointer to driver response iocb object.
10241 * The function is called from SLI ring event handler with no
10242 * lock held. This function is the completion handler for ELS commands
10243 * which are aborted. The function frees memory resources used for
10244 * the aborted ELS commands.
10247 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10248 struct lpfc_iocbq *rspiocb)
10250 IOCB_t *irsp = &rspiocb->iocb;
10252 /* ELS cmd tag <ulpIoTag> completes */
10253 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10254 "0139 Ignoring ELS cmd tag x%x completion Data: "
10256 irsp->ulpIoTag, irsp->ulpStatus,
10257 irsp->un.ulpWord[4], irsp->ulpTimeout);
10258 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10259 lpfc_ct_free_iocb(phba, cmdiocb);
10261 lpfc_els_free_iocb(phba, cmdiocb);
10266 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10267 * @phba: Pointer to HBA context object.
10268 * @pring: Pointer to driver SLI ring object.
10269 * @cmdiocb: Pointer to driver command iocb object.
10271 * This function issues an abort iocb for the provided command iocb down to
10272 * the port. Other than the case the outstanding command iocb is an abort
10273 * request, this function issues abort out unconditionally. This function is
10274 * called with hbalock held. The function returns 0 when it fails due to
10275 * memory allocation failure or when the command iocb is an abort request.
10278 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10279 struct lpfc_iocbq *cmdiocb)
10281 struct lpfc_vport *vport = cmdiocb->vport;
10282 struct lpfc_iocbq *abtsiocbp;
10283 IOCB_t *icmd = NULL;
10284 IOCB_t *iabt = NULL;
10286 unsigned long iflags;
10288 lockdep_assert_held(&phba->hbalock);
10291 * There are certain command types we don't want to abort. And we
10292 * don't want to abort commands that are already in the process of
10295 icmd = &cmdiocb->iocb;
10296 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10297 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10298 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10301 /* issue ABTS for this IOCB based on iotag */
10302 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10303 if (abtsiocbp == NULL)
10306 /* This signals the response to set the correct status
10307 * before calling the completion handler
10309 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10311 iabt = &abtsiocbp->iocb;
10312 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10313 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10314 if (phba->sli_rev == LPFC_SLI_REV4) {
10315 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10316 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10319 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10321 iabt->ulpClass = icmd->ulpClass;
10323 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10324 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10325 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10326 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10327 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10328 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10330 if (phba->link_state >= LPFC_LINK_UP)
10331 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10333 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10335 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10336 abtsiocbp->vport = vport;
10338 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10339 "0339 Abort xri x%x, original iotag x%x, "
10340 "abort cmd iotag x%x\n",
10341 iabt->un.acxri.abortIoTag,
10342 iabt->un.acxri.abortContextTag,
10345 if (phba->sli_rev == LPFC_SLI_REV4) {
10346 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10347 if (unlikely(pring == NULL))
10349 /* Note: both hbalock and ring_lock need to be set here */
10350 spin_lock_irqsave(&pring->ring_lock, iflags);
10351 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10353 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10355 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10360 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10363 * Caller to this routine should check for IOCB_ERROR
10364 * and handle it properly. This routine no longer removes
10365 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10371 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10372 * @phba: Pointer to HBA context object.
10373 * @pring: Pointer to driver SLI ring object.
10374 * @cmdiocb: Pointer to driver command iocb object.
10376 * This function issues an abort iocb for the provided command iocb. In case
10377 * of unloading, the abort iocb will not be issued to commands on the ELS
10378 * ring. Instead, the callback function shall be changed to those commands
10379 * so that nothing happens when them finishes. This function is called with
10380 * hbalock held. The function returns 0 when the command iocb is an abort
10384 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10385 struct lpfc_iocbq *cmdiocb)
10387 struct lpfc_vport *vport = cmdiocb->vport;
10388 int retval = IOCB_ERROR;
10389 IOCB_t *icmd = NULL;
10391 lockdep_assert_held(&phba->hbalock);
10394 * There are certain command types we don't want to abort. And we
10395 * don't want to abort commands that are already in the process of
10398 icmd = &cmdiocb->iocb;
10399 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10400 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10401 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10405 * If we're unloading, don't abort iocb on the ELS ring, but change
10406 * the callback so that nothing happens when it finishes.
10408 if ((vport->load_flag & FC_UNLOADING) &&
10409 (pring->ringno == LPFC_ELS_RING)) {
10410 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10411 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10413 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10414 goto abort_iotag_exit;
10417 /* Now, we try to issue the abort to the cmdiocb out */
10418 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10422 * Caller to this routine should check for IOCB_ERROR
10423 * and handle it properly. This routine no longer removes
10424 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10430 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10431 * @phba: Pointer to HBA context object.
10432 * @pring: Pointer to driver SLI ring object.
10433 * @cmdiocb: Pointer to driver command iocb object.
10435 * This function issues an abort iocb for the provided command iocb down to
10436 * the port. Other than the case the outstanding command iocb is an abort
10437 * request, this function issues abort out unconditionally. This function is
10438 * called with hbalock held. The function returns 0 when it fails due to
10439 * memory allocation failure or when the command iocb is an abort request.
10442 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10443 struct lpfc_iocbq *cmdiocb)
10445 struct lpfc_vport *vport = cmdiocb->vport;
10446 struct lpfc_iocbq *abtsiocbp;
10447 union lpfc_wqe *abts_wqe;
10451 * There are certain command types we don't want to abort. And we
10452 * don't want to abort commands that are already in the process of
10455 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10456 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10457 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10460 /* issue ABTS for this io based on iotag */
10461 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10462 if (abtsiocbp == NULL)
10465 /* This signals the response to set the correct status
10466 * before calling the completion handler
10468 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10470 /* Complete prepping the abort wqe and issue to the FW. */
10471 abts_wqe = &abtsiocbp->wqe;
10472 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10473 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10475 /* Explicitly set reserved fields to zero.*/
10476 abts_wqe->abort_cmd.rsrvd4 = 0;
10477 abts_wqe->abort_cmd.rsrvd5 = 0;
10479 /* WQE Common - word 6. Context is XRI tag. Set 0. */
10480 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10481 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10484 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10485 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10486 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10487 cmdiocb->iocb.ulpClass);
10489 /* word 8 - tell the FW to abort the IO associated with this
10490 * outstanding exchange ID.
10492 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10494 /* word 9 - this is the iotag for the abts_wqe completion. */
10495 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10499 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10500 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10501 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10504 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10505 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10506 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10508 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10509 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10510 abtsiocbp->vport = vport;
10511 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10512 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10513 if (retval == IOCB_ERROR) {
10514 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10515 "6147 Failed abts issue_wqe with status x%x "
10517 retval, cmdiocb->sli4_xritag);
10518 lpfc_sli_release_iocbq(phba, abtsiocbp);
10522 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10523 "6148 Drv Abort NVME Request Issued for "
10524 "ox_id x%x on reqtag x%x\n",
10525 cmdiocb->sli4_xritag,
10532 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10533 * @phba: pointer to lpfc HBA data structure.
10535 * This routine will abort all pending and outstanding iocbs to an HBA.
10538 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10540 struct lpfc_sli *psli = &phba->sli;
10541 struct lpfc_sli_ring *pring;
10542 struct lpfc_queue *qp = NULL;
10545 if (phba->sli_rev != LPFC_SLI_REV4) {
10546 for (i = 0; i < psli->num_rings; i++) {
10547 pring = &psli->sli3_ring[i];
10548 lpfc_sli_abort_iocb_ring(phba, pring);
10552 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10556 lpfc_sli_abort_iocb_ring(phba, pring);
10561 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
10562 * @iocbq: Pointer to driver iocb object.
10563 * @vport: Pointer to driver virtual port object.
10564 * @tgt_id: SCSI ID of the target.
10565 * @lun_id: LUN ID of the scsi device.
10566 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10568 * This function acts as an iocb filter for functions which abort or count
10569 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10570 * 0 if the filtering criteria is met for the given iocb and will return
10571 * 1 if the filtering criteria is not met.
10572 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10573 * given iocb is for the SCSI device specified by vport, tgt_id and
10574 * lun_id parameter.
10575 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
10576 * given iocb is for the SCSI target specified by vport and tgt_id
10578 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10579 * given iocb is for the SCSI host associated with the given vport.
10580 * This function is called with no locks held.
10583 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10584 uint16_t tgt_id, uint64_t lun_id,
10585 lpfc_ctx_cmd ctx_cmd)
10587 struct lpfc_scsi_buf *lpfc_cmd;
10590 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10593 if (iocbq->vport != vport)
10596 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10598 if (lpfc_cmd->pCmd == NULL)
10603 if ((lpfc_cmd->rdata->pnode) &&
10604 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10605 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10609 if ((lpfc_cmd->rdata->pnode) &&
10610 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10613 case LPFC_CTX_HOST:
10617 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10618 __func__, ctx_cmd);
10626 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
10627 * @vport: Pointer to virtual port.
10628 * @tgt_id: SCSI ID of the target.
10629 * @lun_id: LUN ID of the scsi device.
10630 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10632 * This function returns number of FCP commands pending for the vport.
10633 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10634 * commands pending on the vport associated with SCSI device specified
10635 * by tgt_id and lun_id parameters.
10636 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10637 * commands pending on the vport associated with SCSI target specified
10638 * by tgt_id parameter.
10639 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10640 * commands pending on the vport.
10641 * This function returns the number of iocbs which satisfy the filter.
10642 * This function is called without any lock held.
10645 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10646 lpfc_ctx_cmd ctx_cmd)
10648 struct lpfc_hba *phba = vport->phba;
10649 struct lpfc_iocbq *iocbq;
10652 spin_lock_irq(&phba->hbalock);
10653 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10654 iocbq = phba->sli.iocbq_lookup[i];
10656 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10660 spin_unlock_irq(&phba->hbalock);
10666 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10667 * @phba: Pointer to HBA context object
10668 * @cmdiocb: Pointer to command iocb object.
10669 * @rspiocb: Pointer to response iocb object.
10671 * This function is called when an aborted FCP iocb completes. This
10672 * function is called by the ring event handler with no lock held.
10673 * This function frees the iocb.
10676 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10677 struct lpfc_iocbq *rspiocb)
10679 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10680 "3096 ABORT_XRI_CN completing on rpi x%x "
10681 "original iotag x%x, abort cmd iotag x%x "
10682 "status 0x%x, reason 0x%x\n",
10683 cmdiocb->iocb.un.acxri.abortContextTag,
10684 cmdiocb->iocb.un.acxri.abortIoTag,
10685 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10686 rspiocb->iocb.un.ulpWord[4]);
10687 lpfc_sli_release_iocbq(phba, cmdiocb);
10692 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10693 * @vport: Pointer to virtual port.
10694 * @pring: Pointer to driver SLI ring object.
10695 * @tgt_id: SCSI ID of the target.
10696 * @lun_id: LUN ID of the scsi device.
10697 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10699 * This function sends an abort command for every SCSI command
10700 * associated with the given virtual port pending on the ring
10701 * filtered by lpfc_sli_validate_fcp_iocb function.
10702 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10703 * FCP iocbs associated with lun specified by tgt_id and lun_id
10705 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10706 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10707 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10708 * FCP iocbs associated with virtual port.
10709 * This function returns number of iocbs it failed to abort.
10710 * This function is called with no locks held.
10713 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10714 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10716 struct lpfc_hba *phba = vport->phba;
10717 struct lpfc_iocbq *iocbq;
10718 struct lpfc_iocbq *abtsiocb;
10719 IOCB_t *cmd = NULL;
10720 int errcnt = 0, ret_val = 0;
10723 for (i = 1; i <= phba->sli.last_iotag; i++) {
10724 iocbq = phba->sli.iocbq_lookup[i];
10726 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10731 * If the iocbq is already being aborted, don't take a second
10732 * action, but do count it.
10734 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10737 /* issue ABTS for this IOCB based on iotag */
10738 abtsiocb = lpfc_sli_get_iocbq(phba);
10739 if (abtsiocb == NULL) {
10744 /* indicate the IO is being aborted by the driver. */
10745 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10747 cmd = &iocbq->iocb;
10748 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10749 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10750 if (phba->sli_rev == LPFC_SLI_REV4)
10751 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10753 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10754 abtsiocb->iocb.ulpLe = 1;
10755 abtsiocb->iocb.ulpClass = cmd->ulpClass;
10756 abtsiocb->vport = vport;
10758 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10759 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
10760 if (iocbq->iocb_flag & LPFC_IO_FCP)
10761 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10762 if (iocbq->iocb_flag & LPFC_IO_FOF)
10763 abtsiocb->iocb_flag |= LPFC_IO_FOF;
10765 if (lpfc_is_link_up(phba))
10766 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10768 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10770 /* Setup callback routine and issue the command. */
10771 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10772 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10774 if (ret_val == IOCB_ERROR) {
10775 lpfc_sli_release_iocbq(phba, abtsiocb);
10785 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10786 * @vport: Pointer to virtual port.
10787 * @pring: Pointer to driver SLI ring object.
10788 * @tgt_id: SCSI ID of the target.
10789 * @lun_id: LUN ID of the scsi device.
10790 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10792 * This function sends an abort command for every SCSI command
10793 * associated with the given virtual port pending on the ring
10794 * filtered by lpfc_sli_validate_fcp_iocb function.
10795 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10796 * FCP iocbs associated with lun specified by tgt_id and lun_id
10798 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10799 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10800 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10801 * FCP iocbs associated with virtual port.
10802 * This function returns number of iocbs it aborted .
10803 * This function is called with no locks held right after a taskmgmt
10807 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10808 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10810 struct lpfc_hba *phba = vport->phba;
10811 struct lpfc_scsi_buf *lpfc_cmd;
10812 struct lpfc_iocbq *abtsiocbq;
10813 struct lpfc_nodelist *ndlp;
10814 struct lpfc_iocbq *iocbq;
10816 int sum, i, ret_val;
10817 unsigned long iflags;
10818 struct lpfc_sli_ring *pring_s4;
10820 spin_lock_irq(&phba->hbalock);
10822 /* all I/Os are in process of being flushed */
10823 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10824 spin_unlock_irq(&phba->hbalock);
10829 for (i = 1; i <= phba->sli.last_iotag; i++) {
10830 iocbq = phba->sli.iocbq_lookup[i];
10832 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10837 * If the iocbq is already being aborted, don't take a second
10838 * action, but do count it.
10840 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10843 /* issue ABTS for this IOCB based on iotag */
10844 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10845 if (abtsiocbq == NULL)
10848 icmd = &iocbq->iocb;
10849 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10850 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10851 if (phba->sli_rev == LPFC_SLI_REV4)
10852 abtsiocbq->iocb.un.acxri.abortIoTag =
10853 iocbq->sli4_xritag;
10855 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10856 abtsiocbq->iocb.ulpLe = 1;
10857 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10858 abtsiocbq->vport = vport;
10860 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10861 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
10862 if (iocbq->iocb_flag & LPFC_IO_FCP)
10863 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10864 if (iocbq->iocb_flag & LPFC_IO_FOF)
10865 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10867 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10868 ndlp = lpfc_cmd->rdata->pnode;
10870 if (lpfc_is_link_up(phba) &&
10871 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10872 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10874 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10876 /* Setup callback routine and issue the command. */
10877 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10880 * Indicate the IO is being aborted by the driver and set
10881 * the caller's flag into the aborted IO.
10883 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10885 if (phba->sli_rev == LPFC_SLI_REV4) {
10886 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
10887 if (pring_s4 == NULL)
10889 /* Note: both hbalock and ring_lock must be set here */
10890 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10891 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10893 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10895 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10900 if (ret_val == IOCB_ERROR)
10901 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10905 spin_unlock_irq(&phba->hbalock);
10910 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10911 * @phba: Pointer to HBA context object.
10912 * @cmdiocbq: Pointer to command iocb.
10913 * @rspiocbq: Pointer to response iocb.
10915 * This function is the completion handler for iocbs issued using
10916 * lpfc_sli_issue_iocb_wait function. This function is called by the
10917 * ring event handler function without any lock held. This function
10918 * can be called from both worker thread context and interrupt
10919 * context. This function also can be called from other thread which
10920 * cleans up the SLI layer objects.
10921 * This function copy the contents of the response iocb to the
10922 * response iocb memory object provided by the caller of
10923 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10924 * sleeps for the iocb completion.
10927 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
10928 struct lpfc_iocbq *cmdiocbq,
10929 struct lpfc_iocbq *rspiocbq)
10931 wait_queue_head_t *pdone_q;
10932 unsigned long iflags;
10933 struct lpfc_scsi_buf *lpfc_cmd;
10935 spin_lock_irqsave(&phba->hbalock, iflags);
10936 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
10939 * A time out has occurred for the iocb. If a time out
10940 * completion handler has been supplied, call it. Otherwise,
10941 * just free the iocbq.
10944 spin_unlock_irqrestore(&phba->hbalock, iflags);
10945 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
10946 cmdiocbq->wait_iocb_cmpl = NULL;
10947 if (cmdiocbq->iocb_cmpl)
10948 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
10950 lpfc_sli_release_iocbq(phba, cmdiocbq);
10954 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
10955 if (cmdiocbq->context2 && rspiocbq)
10956 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
10957 &rspiocbq->iocb, sizeof(IOCB_t));
10959 /* Set the exchange busy flag for task management commands */
10960 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
10961 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
10962 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
10964 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
10967 pdone_q = cmdiocbq->context_un.wait_queue;
10970 spin_unlock_irqrestore(&phba->hbalock, iflags);
10975 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
10976 * @phba: Pointer to HBA context object..
10977 * @piocbq: Pointer to command iocb.
10978 * @flag: Flag to test.
10980 * This routine grabs the hbalock and then test the iocb_flag to
10981 * see if the passed in flag is set.
10983 * 1 if flag is set.
10984 * 0 if flag is not set.
10987 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
10988 struct lpfc_iocbq *piocbq, uint32_t flag)
10990 unsigned long iflags;
10993 spin_lock_irqsave(&phba->hbalock, iflags);
10994 ret = piocbq->iocb_flag & flag;
10995 spin_unlock_irqrestore(&phba->hbalock, iflags);
11001 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11002 * @phba: Pointer to HBA context object..
11003 * @pring: Pointer to sli ring.
11004 * @piocb: Pointer to command iocb.
11005 * @prspiocbq: Pointer to response iocb.
11006 * @timeout: Timeout in number of seconds.
11008 * This function issues the iocb to firmware and waits for the
11009 * iocb to complete. The iocb_cmpl field of the shall be used
11010 * to handle iocbs which time out. If the field is NULL, the
11011 * function shall free the iocbq structure. If more clean up is
11012 * needed, the caller is expected to provide a completion function
11013 * that will provide the needed clean up. If the iocb command is
11014 * not completed within timeout seconds, the function will either
11015 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11016 * completion function set in the iocb_cmpl field and then return
11017 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11018 * resources if this function returns IOCB_TIMEDOUT.
11019 * The function waits for the iocb completion using an
11020 * non-interruptible wait.
11021 * This function will sleep while waiting for iocb completion.
11022 * So, this function should not be called from any context which
11023 * does not allow sleeping. Due to the same reason, this function
11024 * cannot be called with interrupt disabled.
11025 * This function assumes that the iocb completions occur while
11026 * this function sleep. So, this function cannot be called from
11027 * the thread which process iocb completion for this ring.
11028 * This function clears the iocb_flag of the iocb object before
11029 * issuing the iocb and the iocb completion handler sets this
11030 * flag and wakes this thread when the iocb completes.
11031 * The contents of the response iocb will be copied to prspiocbq
11032 * by the completion handler when the command completes.
11033 * This function returns IOCB_SUCCESS when success.
11034 * This function is called with no lock held.
11037 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11038 uint32_t ring_number,
11039 struct lpfc_iocbq *piocb,
11040 struct lpfc_iocbq *prspiocbq,
11043 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11044 long timeleft, timeout_req = 0;
11045 int retval = IOCB_SUCCESS;
11047 struct lpfc_iocbq *iocb;
11049 int txcmplq_cnt = 0;
11050 struct lpfc_sli_ring *pring;
11051 unsigned long iflags;
11052 bool iocb_completed = true;
11054 if (phba->sli_rev >= LPFC_SLI_REV4)
11055 pring = lpfc_sli4_calc_ring(phba, piocb);
11057 pring = &phba->sli.sli3_ring[ring_number];
11059 * If the caller has provided a response iocbq buffer, then context2
11060 * is NULL or its an error.
11063 if (piocb->context2)
11065 piocb->context2 = prspiocbq;
11068 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11069 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11070 piocb->context_un.wait_queue = &done_q;
11071 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11073 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11074 if (lpfc_readl(phba->HCregaddr, &creg_val))
11076 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11077 writel(creg_val, phba->HCregaddr);
11078 readl(phba->HCregaddr); /* flush */
11081 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11082 SLI_IOCB_RET_IOCB);
11083 if (retval == IOCB_SUCCESS) {
11084 timeout_req = msecs_to_jiffies(timeout * 1000);
11085 timeleft = wait_event_timeout(done_q,
11086 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11088 spin_lock_irqsave(&phba->hbalock, iflags);
11089 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11092 * IOCB timed out. Inform the wake iocb wait
11093 * completion function and set local status
11096 iocb_completed = false;
11097 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11099 spin_unlock_irqrestore(&phba->hbalock, iflags);
11100 if (iocb_completed) {
11101 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11102 "0331 IOCB wake signaled\n");
11103 /* Note: we are not indicating if the IOCB has a success
11104 * status or not - that's for the caller to check.
11105 * IOCB_SUCCESS means just that the command was sent and
11106 * completed. Not that it completed successfully.
11108 } else if (timeleft == 0) {
11109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11110 "0338 IOCB wait timeout error - no "
11111 "wake response Data x%x\n", timeout);
11112 retval = IOCB_TIMEDOUT;
11114 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11115 "0330 IOCB wake NOT set, "
11117 timeout, (timeleft / jiffies));
11118 retval = IOCB_TIMEDOUT;
11120 } else if (retval == IOCB_BUSY) {
11121 if (phba->cfg_log_verbose & LOG_SLI) {
11122 list_for_each_entry(iocb, &pring->txq, list) {
11125 list_for_each_entry(iocb, &pring->txcmplq, list) {
11128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11129 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11130 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11134 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11135 "0332 IOCB wait issue failed, Data x%x\n",
11137 retval = IOCB_ERROR;
11140 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11141 if (lpfc_readl(phba->HCregaddr, &creg_val))
11143 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11144 writel(creg_val, phba->HCregaddr);
11145 readl(phba->HCregaddr); /* flush */
11149 piocb->context2 = NULL;
11151 piocb->context_un.wait_queue = NULL;
11152 piocb->iocb_cmpl = NULL;
11157 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11158 * @phba: Pointer to HBA context object.
11159 * @pmboxq: Pointer to driver mailbox object.
11160 * @timeout: Timeout in number of seconds.
11162 * This function issues the mailbox to firmware and waits for the
11163 * mailbox command to complete. If the mailbox command is not
11164 * completed within timeout seconds, it returns MBX_TIMEOUT.
11165 * The function waits for the mailbox completion using an
11166 * interruptible wait. If the thread is woken up due to a
11167 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11168 * should not free the mailbox resources, if this function returns
11170 * This function will sleep while waiting for mailbox completion.
11171 * So, this function should not be called from any context which
11172 * does not allow sleeping. Due to the same reason, this function
11173 * cannot be called with interrupt disabled.
11174 * This function assumes that the mailbox completion occurs while
11175 * this function sleep. So, this function cannot be called from
11176 * the worker thread which processes mailbox completion.
11177 * This function is called in the context of HBA management
11179 * This function returns MBX_SUCCESS when successful.
11180 * This function is called with no lock held.
11183 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11186 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11187 MAILBOX_t *mb = NULL;
11189 unsigned long flag;
11191 /* The caller might set context1 for extended buffer */
11192 if (pmboxq->context1)
11193 mb = (MAILBOX_t *)pmboxq->context1;
11195 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11196 /* setup wake call as IOCB callback */
11197 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11198 /* setup context field to pass wait_queue pointer to wake function */
11199 pmboxq->context1 = &done_q;
11201 /* now issue the command */
11202 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11203 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11204 wait_event_interruptible_timeout(done_q,
11205 pmboxq->mbox_flag & LPFC_MBX_WAKE,
11206 msecs_to_jiffies(timeout * 1000));
11208 spin_lock_irqsave(&phba->hbalock, flag);
11209 /* restore the possible extended buffer for free resource */
11210 pmboxq->context1 = (uint8_t *)mb;
11212 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11213 * else do not free the resources.
11215 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11216 retval = MBX_SUCCESS;
11218 retval = MBX_TIMEOUT;
11219 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11221 spin_unlock_irqrestore(&phba->hbalock, flag);
11223 /* restore the possible extended buffer for free resource */
11224 pmboxq->context1 = (uint8_t *)mb;
11231 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11232 * @phba: Pointer to HBA context.
11234 * This function is called to shutdown the driver's mailbox sub-system.
11235 * It first marks the mailbox sub-system is in a block state to prevent
11236 * the asynchronous mailbox command from issued off the pending mailbox
11237 * command queue. If the mailbox command sub-system shutdown is due to
11238 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11239 * the mailbox sub-system flush routine to forcefully bring down the
11240 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11241 * as with offline or HBA function reset), this routine will wait for the
11242 * outstanding mailbox command to complete before invoking the mailbox
11243 * sub-system flush routine to gracefully bring down mailbox sub-system.
11246 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11248 struct lpfc_sli *psli = &phba->sli;
11249 unsigned long timeout;
11251 if (mbx_action == LPFC_MBX_NO_WAIT) {
11252 /* delay 100ms for port state */
11254 lpfc_sli_mbox_sys_flush(phba);
11257 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11259 spin_lock_irq(&phba->hbalock);
11260 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11262 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11263 /* Determine how long we might wait for the active mailbox
11264 * command to be gracefully completed by firmware.
11266 if (phba->sli.mbox_active)
11267 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11268 phba->sli.mbox_active) *
11270 spin_unlock_irq(&phba->hbalock);
11272 while (phba->sli.mbox_active) {
11273 /* Check active mailbox complete status every 2ms */
11275 if (time_after(jiffies, timeout))
11276 /* Timeout, let the mailbox flush routine to
11277 * forcefully release active mailbox command
11282 spin_unlock_irq(&phba->hbalock);
11284 lpfc_sli_mbox_sys_flush(phba);
11288 * lpfc_sli_eratt_read - read sli-3 error attention events
11289 * @phba: Pointer to HBA context.
11291 * This function is called to read the SLI3 device error attention registers
11292 * for possible error attention events. The caller must hold the hostlock
11293 * with spin_lock_irq().
11295 * This function returns 1 when there is Error Attention in the Host Attention
11296 * Register and returns 0 otherwise.
11299 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11303 /* Read chip Host Attention (HA) register */
11304 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11307 if (ha_copy & HA_ERATT) {
11308 /* Read host status register to retrieve error event */
11309 if (lpfc_sli_read_hs(phba))
11312 /* Check if there is a deferred error condition is active */
11313 if ((HS_FFER1 & phba->work_hs) &&
11314 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11315 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11316 phba->hba_flag |= DEFER_ERATT;
11317 /* Clear all interrupt enable conditions */
11318 writel(0, phba->HCregaddr);
11319 readl(phba->HCregaddr);
11322 /* Set the driver HA work bitmap */
11323 phba->work_ha |= HA_ERATT;
11324 /* Indicate polling handles this ERATT */
11325 phba->hba_flag |= HBA_ERATT_HANDLED;
11331 /* Set the driver HS work bitmap */
11332 phba->work_hs |= UNPLUG_ERR;
11333 /* Set the driver HA work bitmap */
11334 phba->work_ha |= HA_ERATT;
11335 /* Indicate polling handles this ERATT */
11336 phba->hba_flag |= HBA_ERATT_HANDLED;
11341 * lpfc_sli4_eratt_read - read sli-4 error attention events
11342 * @phba: Pointer to HBA context.
11344 * This function is called to read the SLI4 device error attention registers
11345 * for possible error attention events. The caller must hold the hostlock
11346 * with spin_lock_irq().
11348 * This function returns 1 when there is Error Attention in the Host Attention
11349 * Register and returns 0 otherwise.
11352 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11354 uint32_t uerr_sta_hi, uerr_sta_lo;
11355 uint32_t if_type, portsmphr;
11356 struct lpfc_register portstat_reg;
11359 * For now, use the SLI4 device internal unrecoverable error
11360 * registers for error attention. This can be changed later.
11362 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11364 case LPFC_SLI_INTF_IF_TYPE_0:
11365 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11367 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11369 phba->work_hs |= UNPLUG_ERR;
11370 phba->work_ha |= HA_ERATT;
11371 phba->hba_flag |= HBA_ERATT_HANDLED;
11374 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11375 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11377 "1423 HBA Unrecoverable error: "
11378 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11379 "ue_mask_lo_reg=0x%x, "
11380 "ue_mask_hi_reg=0x%x\n",
11381 uerr_sta_lo, uerr_sta_hi,
11382 phba->sli4_hba.ue_mask_lo,
11383 phba->sli4_hba.ue_mask_hi);
11384 phba->work_status[0] = uerr_sta_lo;
11385 phba->work_status[1] = uerr_sta_hi;
11386 phba->work_ha |= HA_ERATT;
11387 phba->hba_flag |= HBA_ERATT_HANDLED;
11391 case LPFC_SLI_INTF_IF_TYPE_2:
11392 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11393 &portstat_reg.word0) ||
11394 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11396 phba->work_hs |= UNPLUG_ERR;
11397 phba->work_ha |= HA_ERATT;
11398 phba->hba_flag |= HBA_ERATT_HANDLED;
11401 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11402 phba->work_status[0] =
11403 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11404 phba->work_status[1] =
11405 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11407 "2885 Port Status Event: "
11408 "port status reg 0x%x, "
11409 "port smphr reg 0x%x, "
11410 "error 1=0x%x, error 2=0x%x\n",
11411 portstat_reg.word0,
11413 phba->work_status[0],
11414 phba->work_status[1]);
11415 phba->work_ha |= HA_ERATT;
11416 phba->hba_flag |= HBA_ERATT_HANDLED;
11420 case LPFC_SLI_INTF_IF_TYPE_1:
11422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11423 "2886 HBA Error Attention on unsupported "
11424 "if type %d.", if_type);
11432 * lpfc_sli_check_eratt - check error attention events
11433 * @phba: Pointer to HBA context.
11435 * This function is called from timer soft interrupt context to check HBA's
11436 * error attention register bit for error attention events.
11438 * This function returns 1 when there is Error Attention in the Host Attention
11439 * Register and returns 0 otherwise.
11442 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11446 /* If somebody is waiting to handle an eratt, don't process it
11447 * here. The brdkill function will do this.
11449 if (phba->link_flag & LS_IGNORE_ERATT)
11452 /* Check if interrupt handler handles this ERATT */
11453 spin_lock_irq(&phba->hbalock);
11454 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11455 /* Interrupt handler has handled ERATT */
11456 spin_unlock_irq(&phba->hbalock);
11461 * If there is deferred error attention, do not check for error
11464 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11465 spin_unlock_irq(&phba->hbalock);
11469 /* If PCI channel is offline, don't process it */
11470 if (unlikely(pci_channel_offline(phba->pcidev))) {
11471 spin_unlock_irq(&phba->hbalock);
11475 switch (phba->sli_rev) {
11476 case LPFC_SLI_REV2:
11477 case LPFC_SLI_REV3:
11478 /* Read chip Host Attention (HA) register */
11479 ha_copy = lpfc_sli_eratt_read(phba);
11481 case LPFC_SLI_REV4:
11482 /* Read device Uncoverable Error (UERR) registers */
11483 ha_copy = lpfc_sli4_eratt_read(phba);
11486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11487 "0299 Invalid SLI revision (%d)\n",
11492 spin_unlock_irq(&phba->hbalock);
11498 * lpfc_intr_state_check - Check device state for interrupt handling
11499 * @phba: Pointer to HBA context.
11501 * This inline routine checks whether a device or its PCI slot is in a state
11502 * that the interrupt should be handled.
11504 * This function returns 0 if the device or the PCI slot is in a state that
11505 * interrupt should be handled, otherwise -EIO.
11508 lpfc_intr_state_check(struct lpfc_hba *phba)
11510 /* If the pci channel is offline, ignore all the interrupts */
11511 if (unlikely(pci_channel_offline(phba->pcidev)))
11514 /* Update device level interrupt statistics */
11515 phba->sli.slistat.sli_intr++;
11517 /* Ignore all interrupts during initialization. */
11518 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11525 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11526 * @irq: Interrupt number.
11527 * @dev_id: The device context pointer.
11529 * This function is directly called from the PCI layer as an interrupt
11530 * service routine when device with SLI-3 interface spec is enabled with
11531 * MSI-X multi-message interrupt mode and there are slow-path events in
11532 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11533 * interrupt mode, this function is called as part of the device-level
11534 * interrupt handler. When the PCI slot is in error recovery or the HBA
11535 * is undergoing initialization, the interrupt handler will not process
11536 * the interrupt. The link attention and ELS ring attention events are
11537 * handled by the worker thread. The interrupt handler signals the worker
11538 * thread and returns for these events. This function is called without
11539 * any lock held. It gets the hbalock to access and update SLI data
11542 * This function returns IRQ_HANDLED when interrupt is handled else it
11543 * returns IRQ_NONE.
11546 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11548 struct lpfc_hba *phba;
11549 uint32_t ha_copy, hc_copy;
11550 uint32_t work_ha_copy;
11551 unsigned long status;
11552 unsigned long iflag;
11555 MAILBOX_t *mbox, *pmbox;
11556 struct lpfc_vport *vport;
11557 struct lpfc_nodelist *ndlp;
11558 struct lpfc_dmabuf *mp;
11563 * Get the driver's phba structure from the dev_id and
11564 * assume the HBA is not interrupting.
11566 phba = (struct lpfc_hba *)dev_id;
11568 if (unlikely(!phba))
11572 * Stuff needs to be attented to when this function is invoked as an
11573 * individual interrupt handler in MSI-X multi-message interrupt mode
11575 if (phba->intr_type == MSIX) {
11576 /* Check device state for handling interrupt */
11577 if (lpfc_intr_state_check(phba))
11579 /* Need to read HA REG for slow-path events */
11580 spin_lock_irqsave(&phba->hbalock, iflag);
11581 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11583 /* If somebody is waiting to handle an eratt don't process it
11584 * here. The brdkill function will do this.
11586 if (phba->link_flag & LS_IGNORE_ERATT)
11587 ha_copy &= ~HA_ERATT;
11588 /* Check the need for handling ERATT in interrupt handler */
11589 if (ha_copy & HA_ERATT) {
11590 if (phba->hba_flag & HBA_ERATT_HANDLED)
11591 /* ERATT polling has handled ERATT */
11592 ha_copy &= ~HA_ERATT;
11594 /* Indicate interrupt handler handles ERATT */
11595 phba->hba_flag |= HBA_ERATT_HANDLED;
11599 * If there is deferred error attention, do not check for any
11602 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11603 spin_unlock_irqrestore(&phba->hbalock, iflag);
11607 /* Clear up only attention source related to slow-path */
11608 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11611 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11612 HC_LAINT_ENA | HC_ERINT_ENA),
11614 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11616 writel(hc_copy, phba->HCregaddr);
11617 readl(phba->HAregaddr); /* flush */
11618 spin_unlock_irqrestore(&phba->hbalock, iflag);
11620 ha_copy = phba->ha_copy;
11622 work_ha_copy = ha_copy & phba->work_ha_mask;
11624 if (work_ha_copy) {
11625 if (work_ha_copy & HA_LATT) {
11626 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11628 * Turn off Link Attention interrupts
11629 * until CLEAR_LA done
11631 spin_lock_irqsave(&phba->hbalock, iflag);
11632 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11633 if (lpfc_readl(phba->HCregaddr, &control))
11635 control &= ~HC_LAINT_ENA;
11636 writel(control, phba->HCregaddr);
11637 readl(phba->HCregaddr); /* flush */
11638 spin_unlock_irqrestore(&phba->hbalock, iflag);
11641 work_ha_copy &= ~HA_LATT;
11644 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11646 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11647 * the only slow ring.
11649 status = (work_ha_copy &
11650 (HA_RXMASK << (4*LPFC_ELS_RING)));
11651 status >>= (4*LPFC_ELS_RING);
11652 if (status & HA_RXMASK) {
11653 spin_lock_irqsave(&phba->hbalock, iflag);
11654 if (lpfc_readl(phba->HCregaddr, &control))
11657 lpfc_debugfs_slow_ring_trc(phba,
11658 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11660 (uint32_t)phba->sli.slistat.sli_intr);
11662 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11663 lpfc_debugfs_slow_ring_trc(phba,
11664 "ISR Disable ring:"
11665 "pwork:x%x hawork:x%x wait:x%x",
11666 phba->work_ha, work_ha_copy,
11667 (uint32_t)((unsigned long)
11668 &phba->work_waitq));
11671 ~(HC_R0INT_ENA << LPFC_ELS_RING);
11672 writel(control, phba->HCregaddr);
11673 readl(phba->HCregaddr); /* flush */
11676 lpfc_debugfs_slow_ring_trc(phba,
11677 "ISR slow ring: pwork:"
11678 "x%x hawork:x%x wait:x%x",
11679 phba->work_ha, work_ha_copy,
11680 (uint32_t)((unsigned long)
11681 &phba->work_waitq));
11683 spin_unlock_irqrestore(&phba->hbalock, iflag);
11686 spin_lock_irqsave(&phba->hbalock, iflag);
11687 if (work_ha_copy & HA_ERATT) {
11688 if (lpfc_sli_read_hs(phba))
11691 * Check if there is a deferred error condition
11694 if ((HS_FFER1 & phba->work_hs) &&
11695 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11696 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11698 phba->hba_flag |= DEFER_ERATT;
11699 /* Clear all interrupt enable conditions */
11700 writel(0, phba->HCregaddr);
11701 readl(phba->HCregaddr);
11705 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11706 pmb = phba->sli.mbox_active;
11707 pmbox = &pmb->u.mb;
11709 vport = pmb->vport;
11711 /* First check out the status word */
11712 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11713 if (pmbox->mbxOwner != OWN_HOST) {
11714 spin_unlock_irqrestore(&phba->hbalock, iflag);
11716 * Stray Mailbox Interrupt, mbxCommand <cmd>
11717 * mbxStatus <status>
11719 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11721 "(%d):0304 Stray Mailbox "
11722 "Interrupt mbxCommand x%x "
11724 (vport ? vport->vpi : 0),
11727 /* clear mailbox attention bit */
11728 work_ha_copy &= ~HA_MBATT;
11730 phba->sli.mbox_active = NULL;
11731 spin_unlock_irqrestore(&phba->hbalock, iflag);
11732 phba->last_completion_time = jiffies;
11733 del_timer(&phba->sli.mbox_tmo);
11734 if (pmb->mbox_cmpl) {
11735 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11737 if (pmb->out_ext_byte_len &&
11739 lpfc_sli_pcimem_bcopy(
11742 pmb->out_ext_byte_len);
11744 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11745 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11747 lpfc_debugfs_disc_trc(vport,
11748 LPFC_DISC_TRC_MBOX_VPORT,
11749 "MBOX dflt rpi: : "
11750 "status:x%x rpi:x%x",
11751 (uint32_t)pmbox->mbxStatus,
11752 pmbox->un.varWords[0], 0);
11754 if (!pmbox->mbxStatus) {
11755 mp = (struct lpfc_dmabuf *)
11757 ndlp = (struct lpfc_nodelist *)
11760 /* Reg_LOGIN of dflt RPI was
11761 * successful. new lets get
11762 * rid of the RPI using the
11763 * same mbox buffer.
11765 lpfc_unreg_login(phba,
11767 pmbox->un.varWords[0],
11770 lpfc_mbx_cmpl_dflt_rpi;
11771 pmb->context1 = mp;
11772 pmb->context2 = ndlp;
11773 pmb->vport = vport;
11774 rc = lpfc_sli_issue_mbox(phba,
11777 if (rc != MBX_BUSY)
11778 lpfc_printf_log(phba,
11780 LOG_MBOX | LOG_SLI,
11781 "0350 rc should have"
11782 "been MBX_BUSY\n");
11783 if (rc != MBX_NOT_FINISHED)
11784 goto send_current_mbox;
11788 &phba->pport->work_port_lock,
11790 phba->pport->work_port_events &=
11792 spin_unlock_irqrestore(
11793 &phba->pport->work_port_lock,
11795 lpfc_mbox_cmpl_put(phba, pmb);
11798 spin_unlock_irqrestore(&phba->hbalock, iflag);
11800 if ((work_ha_copy & HA_MBATT) &&
11801 (phba->sli.mbox_active == NULL)) {
11803 /* Process next mailbox command if there is one */
11805 rc = lpfc_sli_issue_mbox(phba, NULL,
11807 } while (rc == MBX_NOT_FINISHED);
11808 if (rc != MBX_SUCCESS)
11809 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11810 LOG_SLI, "0349 rc should be "
11814 spin_lock_irqsave(&phba->hbalock, iflag);
11815 phba->work_ha |= work_ha_copy;
11816 spin_unlock_irqrestore(&phba->hbalock, iflag);
11817 lpfc_worker_wake_up(phba);
11819 return IRQ_HANDLED;
11821 spin_unlock_irqrestore(&phba->hbalock, iflag);
11822 return IRQ_HANDLED;
11824 } /* lpfc_sli_sp_intr_handler */
11827 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
11828 * @irq: Interrupt number.
11829 * @dev_id: The device context pointer.
11831 * This function is directly called from the PCI layer as an interrupt
11832 * service routine when device with SLI-3 interface spec is enabled with
11833 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11834 * ring event in the HBA. However, when the device is enabled with either
11835 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11836 * device-level interrupt handler. When the PCI slot is in error recovery
11837 * or the HBA is undergoing initialization, the interrupt handler will not
11838 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11839 * the intrrupt context. This function is called without any lock held.
11840 * It gets the hbalock to access and update SLI data structures.
11842 * This function returns IRQ_HANDLED when interrupt is handled else it
11843 * returns IRQ_NONE.
11846 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
11848 struct lpfc_hba *phba;
11850 unsigned long status;
11851 unsigned long iflag;
11852 struct lpfc_sli_ring *pring;
11854 /* Get the driver's phba structure from the dev_id and
11855 * assume the HBA is not interrupting.
11857 phba = (struct lpfc_hba *) dev_id;
11859 if (unlikely(!phba))
11863 * Stuff needs to be attented to when this function is invoked as an
11864 * individual interrupt handler in MSI-X multi-message interrupt mode
11866 if (phba->intr_type == MSIX) {
11867 /* Check device state for handling interrupt */
11868 if (lpfc_intr_state_check(phba))
11870 /* Need to read HA REG for FCP ring and other ring events */
11871 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11872 return IRQ_HANDLED;
11873 /* Clear up only attention source related to fast-path */
11874 spin_lock_irqsave(&phba->hbalock, iflag);
11876 * If there is deferred error attention, do not check for
11879 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11880 spin_unlock_irqrestore(&phba->hbalock, iflag);
11883 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11885 readl(phba->HAregaddr); /* flush */
11886 spin_unlock_irqrestore(&phba->hbalock, iflag);
11888 ha_copy = phba->ha_copy;
11891 * Process all events on FCP ring. Take the optimized path for FCP IO.
11893 ha_copy &= ~(phba->work_ha_mask);
11895 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11896 status >>= (4*LPFC_FCP_RING);
11897 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
11898 if (status & HA_RXMASK)
11899 lpfc_sli_handle_fast_ring_event(phba, pring, status);
11901 if (phba->cfg_multi_ring_support == 2) {
11903 * Process all events on extra ring. Take the optimized path
11904 * for extra ring IO.
11906 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11907 status >>= (4*LPFC_EXTRA_RING);
11908 if (status & HA_RXMASK) {
11909 lpfc_sli_handle_fast_ring_event(phba,
11910 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
11914 return IRQ_HANDLED;
11915 } /* lpfc_sli_fp_intr_handler */
11918 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
11919 * @irq: Interrupt number.
11920 * @dev_id: The device context pointer.
11922 * This function is the HBA device-level interrupt handler to device with
11923 * SLI-3 interface spec, called from the PCI layer when either MSI or
11924 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
11925 * requires driver attention. This function invokes the slow-path interrupt
11926 * attention handling function and fast-path interrupt attention handling
11927 * function in turn to process the relevant HBA attention events. This
11928 * function is called without any lock held. It gets the hbalock to access
11929 * and update SLI data structures.
11931 * This function returns IRQ_HANDLED when interrupt is handled, else it
11932 * returns IRQ_NONE.
11935 lpfc_sli_intr_handler(int irq, void *dev_id)
11937 struct lpfc_hba *phba;
11938 irqreturn_t sp_irq_rc, fp_irq_rc;
11939 unsigned long status1, status2;
11943 * Get the driver's phba structure from the dev_id and
11944 * assume the HBA is not interrupting.
11946 phba = (struct lpfc_hba *) dev_id;
11948 if (unlikely(!phba))
11951 /* Check device state for handling interrupt */
11952 if (lpfc_intr_state_check(phba))
11955 spin_lock(&phba->hbalock);
11956 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
11957 spin_unlock(&phba->hbalock);
11958 return IRQ_HANDLED;
11961 if (unlikely(!phba->ha_copy)) {
11962 spin_unlock(&phba->hbalock);
11964 } else if (phba->ha_copy & HA_ERATT) {
11965 if (phba->hba_flag & HBA_ERATT_HANDLED)
11966 /* ERATT polling has handled ERATT */
11967 phba->ha_copy &= ~HA_ERATT;
11969 /* Indicate interrupt handler handles ERATT */
11970 phba->hba_flag |= HBA_ERATT_HANDLED;
11974 * If there is deferred error attention, do not check for any interrupt.
11976 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11977 spin_unlock(&phba->hbalock);
11981 /* Clear attention sources except link and error attentions */
11982 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
11983 spin_unlock(&phba->hbalock);
11984 return IRQ_HANDLED;
11986 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
11987 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
11989 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
11990 writel(hc_copy, phba->HCregaddr);
11991 readl(phba->HAregaddr); /* flush */
11992 spin_unlock(&phba->hbalock);
11995 * Invokes slow-path host attention interrupt handling as appropriate.
11998 /* status of events with mailbox and link attention */
11999 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12001 /* status of events with ELS ring */
12002 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12003 status2 >>= (4*LPFC_ELS_RING);
12005 if (status1 || (status2 & HA_RXMASK))
12006 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12008 sp_irq_rc = IRQ_NONE;
12011 * Invoke fast-path host attention interrupt handling as appropriate.
12014 /* status of events with FCP ring */
12015 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12016 status1 >>= (4*LPFC_FCP_RING);
12018 /* status of events with extra ring */
12019 if (phba->cfg_multi_ring_support == 2) {
12020 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12021 status2 >>= (4*LPFC_EXTRA_RING);
12025 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12026 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12028 fp_irq_rc = IRQ_NONE;
12030 /* Return device-level interrupt handling status */
12031 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12032 } /* lpfc_sli_intr_handler */
12035 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12036 * @phba: pointer to lpfc hba data structure.
12038 * This routine is invoked by the worker thread to process all the pending
12039 * SLI4 FCP abort XRI events.
12041 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12043 struct lpfc_cq_event *cq_event;
12045 /* First, declare the fcp xri abort event has been handled */
12046 spin_lock_irq(&phba->hbalock);
12047 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12048 spin_unlock_irq(&phba->hbalock);
12049 /* Now, handle all the fcp xri abort events */
12050 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12051 /* Get the first event from the head of the event queue */
12052 spin_lock_irq(&phba->hbalock);
12053 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12054 cq_event, struct lpfc_cq_event, list);
12055 spin_unlock_irq(&phba->hbalock);
12056 /* Notify aborted XRI for FCP work queue */
12057 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12058 /* Free the event processed back to the free pool */
12059 lpfc_sli4_cq_event_release(phba, cq_event);
12064 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12065 * @phba: pointer to lpfc hba data structure.
12067 * This routine is invoked by the worker thread to process all the pending
12068 * SLI4 els abort xri events.
12070 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12072 struct lpfc_cq_event *cq_event;
12074 /* First, declare the els xri abort event has been handled */
12075 spin_lock_irq(&phba->hbalock);
12076 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12077 spin_unlock_irq(&phba->hbalock);
12078 /* Now, handle all the els xri abort events */
12079 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12080 /* Get the first event from the head of the event queue */
12081 spin_lock_irq(&phba->hbalock);
12082 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12083 cq_event, struct lpfc_cq_event, list);
12084 spin_unlock_irq(&phba->hbalock);
12085 /* Notify aborted XRI for ELS work queue */
12086 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12087 /* Free the event processed back to the free pool */
12088 lpfc_sli4_cq_event_release(phba, cq_event);
12093 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12094 * @phba: pointer to lpfc hba data structure
12095 * @pIocbIn: pointer to the rspiocbq
12096 * @pIocbOut: pointer to the cmdiocbq
12097 * @wcqe: pointer to the complete wcqe
12099 * This routine transfers the fields of a command iocbq to a response iocbq
12100 * by copying all the IOCB fields from command iocbq and transferring the
12101 * completion status information from the complete wcqe.
12104 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12105 struct lpfc_iocbq *pIocbIn,
12106 struct lpfc_iocbq *pIocbOut,
12107 struct lpfc_wcqe_complete *wcqe)
12110 unsigned long iflags;
12111 uint32_t status, max_response;
12112 struct lpfc_dmabuf *dmabuf;
12113 struct ulp_bde64 *bpl, bde;
12114 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12116 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12117 sizeof(struct lpfc_iocbq) - offset);
12118 /* Map WCQE parameters into irspiocb parameters */
12119 status = bf_get(lpfc_wcqe_c_status, wcqe);
12120 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12121 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12122 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12123 pIocbIn->iocb.un.fcpi.fcpi_parm =
12124 pIocbOut->iocb.un.fcpi.fcpi_parm -
12125 wcqe->total_data_placed;
12127 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12129 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12130 switch (pIocbOut->iocb.ulpCommand) {
12131 case CMD_ELS_REQUEST64_CR:
12132 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12133 bpl = (struct ulp_bde64 *)dmabuf->virt;
12134 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12135 max_response = bde.tus.f.bdeSize;
12137 case CMD_GEN_REQUEST64_CR:
12139 if (!pIocbOut->context3)
12141 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12142 sizeof(struct ulp_bde64);
12143 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12144 bpl = (struct ulp_bde64 *)dmabuf->virt;
12145 for (i = 0; i < numBdes; i++) {
12146 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12147 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12148 max_response += bde.tus.f.bdeSize;
12152 max_response = wcqe->total_data_placed;
12155 if (max_response < wcqe->total_data_placed)
12156 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12158 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12159 wcqe->total_data_placed;
12162 /* Convert BG errors for completion status */
12163 if (status == CQE_STATUS_DI_ERROR) {
12164 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12166 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12167 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12169 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12171 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12172 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12173 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12174 BGS_GUARD_ERR_MASK;
12175 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12176 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12177 BGS_APPTAG_ERR_MASK;
12178 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12179 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12180 BGS_REFTAG_ERR_MASK;
12182 /* Check to see if there was any good data before the error */
12183 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12184 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12185 BGS_HI_WATER_MARK_PRESENT_MASK;
12186 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12187 wcqe->total_data_placed;
12191 * Set ALL the error bits to indicate we don't know what
12192 * type of error it is.
12194 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12195 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12196 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12197 BGS_GUARD_ERR_MASK);
12200 /* Pick up HBA exchange busy condition */
12201 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12202 spin_lock_irqsave(&phba->hbalock, iflags);
12203 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12204 spin_unlock_irqrestore(&phba->hbalock, iflags);
12209 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12210 * @phba: Pointer to HBA context object.
12211 * @wcqe: Pointer to work-queue completion queue entry.
12213 * This routine handles an ELS work-queue completion event and construct
12214 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12215 * discovery engine to handle.
12217 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12219 static struct lpfc_iocbq *
12220 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12221 struct lpfc_iocbq *irspiocbq)
12223 struct lpfc_sli_ring *pring;
12224 struct lpfc_iocbq *cmdiocbq;
12225 struct lpfc_wcqe_complete *wcqe;
12226 unsigned long iflags;
12228 pring = lpfc_phba_elsring(phba);
12230 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12231 spin_lock_irqsave(&pring->ring_lock, iflags);
12232 pring->stats.iocb_event++;
12233 /* Look up the ELS command IOCB and create pseudo response IOCB */
12234 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12235 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12236 /* Put the iocb back on the txcmplq */
12237 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12238 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12240 if (unlikely(!cmdiocbq)) {
12241 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12242 "0386 ELS complete with no corresponding "
12243 "cmdiocb: iotag (%d)\n",
12244 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12245 lpfc_sli_release_iocbq(phba, irspiocbq);
12249 /* Fake the irspiocbq and copy necessary response information */
12250 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12256 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12257 * @phba: Pointer to HBA context object.
12258 * @cqe: Pointer to mailbox completion queue entry.
12260 * This routine process a mailbox completion queue entry with asynchrous
12263 * Return: true if work posted to worker thread, otherwise false.
12266 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12268 struct lpfc_cq_event *cq_event;
12269 unsigned long iflags;
12271 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12272 "0392 Async Event: word0:x%x, word1:x%x, "
12273 "word2:x%x, word3:x%x\n", mcqe->word0,
12274 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12276 /* Allocate a new internal CQ_EVENT entry */
12277 cq_event = lpfc_sli4_cq_event_alloc(phba);
12279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12280 "0394 Failed to allocate CQ_EVENT entry\n");
12284 /* Move the CQE into an asynchronous event entry */
12285 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12286 spin_lock_irqsave(&phba->hbalock, iflags);
12287 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12288 /* Set the async event flag */
12289 phba->hba_flag |= ASYNC_EVENT;
12290 spin_unlock_irqrestore(&phba->hbalock, iflags);
12296 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12297 * @phba: Pointer to HBA context object.
12298 * @cqe: Pointer to mailbox completion queue entry.
12300 * This routine process a mailbox completion queue entry with mailbox
12301 * completion event.
12303 * Return: true if work posted to worker thread, otherwise false.
12306 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12308 uint32_t mcqe_status;
12309 MAILBOX_t *mbox, *pmbox;
12310 struct lpfc_mqe *mqe;
12311 struct lpfc_vport *vport;
12312 struct lpfc_nodelist *ndlp;
12313 struct lpfc_dmabuf *mp;
12314 unsigned long iflags;
12316 bool workposted = false;
12319 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12320 if (!bf_get(lpfc_trailer_completed, mcqe))
12321 goto out_no_mqe_complete;
12323 /* Get the reference to the active mbox command */
12324 spin_lock_irqsave(&phba->hbalock, iflags);
12325 pmb = phba->sli.mbox_active;
12326 if (unlikely(!pmb)) {
12327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12328 "1832 No pending MBOX command to handle\n");
12329 spin_unlock_irqrestore(&phba->hbalock, iflags);
12330 goto out_no_mqe_complete;
12332 spin_unlock_irqrestore(&phba->hbalock, iflags);
12334 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12336 vport = pmb->vport;
12338 /* Reset heartbeat timer */
12339 phba->last_completion_time = jiffies;
12340 del_timer(&phba->sli.mbox_tmo);
12342 /* Move mbox data to caller's mailbox region, do endian swapping */
12343 if (pmb->mbox_cmpl && mbox)
12344 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12347 * For mcqe errors, conditionally move a modified error code to
12348 * the mbox so that the error will not be missed.
12350 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12351 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12352 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12353 bf_set(lpfc_mqe_status, mqe,
12354 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12356 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12357 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12359 "MBOX dflt rpi: status:x%x rpi:x%x",
12361 pmbox->un.varWords[0], 0);
12362 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12363 mp = (struct lpfc_dmabuf *)(pmb->context1);
12364 ndlp = (struct lpfc_nodelist *)pmb->context2;
12365 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12366 * RID of the PPI using the same mbox buffer.
12368 lpfc_unreg_login(phba, vport->vpi,
12369 pmbox->un.varWords[0], pmb);
12370 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12371 pmb->context1 = mp;
12372 pmb->context2 = ndlp;
12373 pmb->vport = vport;
12374 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12375 if (rc != MBX_BUSY)
12376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12377 LOG_SLI, "0385 rc should "
12378 "have been MBX_BUSY\n");
12379 if (rc != MBX_NOT_FINISHED)
12380 goto send_current_mbox;
12383 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12384 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12385 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12387 /* There is mailbox completion work to do */
12388 spin_lock_irqsave(&phba->hbalock, iflags);
12389 __lpfc_mbox_cmpl_put(phba, pmb);
12390 phba->work_ha |= HA_MBATT;
12391 spin_unlock_irqrestore(&phba->hbalock, iflags);
12395 spin_lock_irqsave(&phba->hbalock, iflags);
12396 /* Release the mailbox command posting token */
12397 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12398 /* Setting active mailbox pointer need to be in sync to flag clear */
12399 phba->sli.mbox_active = NULL;
12400 spin_unlock_irqrestore(&phba->hbalock, iflags);
12401 /* Wake up worker thread to post the next pending mailbox command */
12402 lpfc_worker_wake_up(phba);
12403 out_no_mqe_complete:
12404 if (bf_get(lpfc_trailer_consumed, mcqe))
12405 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12410 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12411 * @phba: Pointer to HBA context object.
12412 * @cqe: Pointer to mailbox completion queue entry.
12414 * This routine process a mailbox completion queue entry, it invokes the
12415 * proper mailbox complete handling or asynchrous event handling routine
12416 * according to the MCQE's async bit.
12418 * Return: true if work posted to worker thread, otherwise false.
12421 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12423 struct lpfc_mcqe mcqe;
12426 /* Copy the mailbox MCQE and convert endian order as needed */
12427 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12429 /* Invoke the proper event handling routine */
12430 if (!bf_get(lpfc_trailer_async, &mcqe))
12431 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12433 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12438 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12439 * @phba: Pointer to HBA context object.
12440 * @cq: Pointer to associated CQ
12441 * @wcqe: Pointer to work-queue completion queue entry.
12443 * This routine handles an ELS work-queue completion event.
12445 * Return: true if work posted to worker thread, otherwise false.
12448 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12449 struct lpfc_wcqe_complete *wcqe)
12451 struct lpfc_iocbq *irspiocbq;
12452 unsigned long iflags;
12453 struct lpfc_sli_ring *pring = cq->pring;
12455 int txcmplq_cnt = 0;
12456 int fcp_txcmplq_cnt = 0;
12458 /* Get an irspiocbq for later ELS response processing use */
12459 irspiocbq = lpfc_sli_get_iocbq(phba);
12461 if (!list_empty(&pring->txq))
12463 if (!list_empty(&pring->txcmplq))
12465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12466 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12467 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12468 txq_cnt, phba->iocb_cnt,
12474 /* Save off the slow-path queue event for work thread to process */
12475 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12476 spin_lock_irqsave(&phba->hbalock, iflags);
12477 list_add_tail(&irspiocbq->cq_event.list,
12478 &phba->sli4_hba.sp_queue_event);
12479 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12480 spin_unlock_irqrestore(&phba->hbalock, iflags);
12486 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12487 * @phba: Pointer to HBA context object.
12488 * @wcqe: Pointer to work-queue completion queue entry.
12490 * This routine handles slow-path WQ entry comsumed event by invoking the
12491 * proper WQ release routine to the slow-path WQ.
12494 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12495 struct lpfc_wcqe_release *wcqe)
12497 /* sanity check on queue memory */
12498 if (unlikely(!phba->sli4_hba.els_wq))
12500 /* Check for the slow-path ELS work queue */
12501 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12502 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12503 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12505 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12506 "2579 Slow-path wqe consume event carries "
12507 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12508 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12509 phba->sli4_hba.els_wq->queue_id);
12513 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12514 * @phba: Pointer to HBA context object.
12515 * @cq: Pointer to a WQ completion queue.
12516 * @wcqe: Pointer to work-queue completion queue entry.
12518 * This routine handles an XRI abort event.
12520 * Return: true if work posted to worker thread, otherwise false.
12523 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12524 struct lpfc_queue *cq,
12525 struct sli4_wcqe_xri_aborted *wcqe)
12527 bool workposted = false;
12528 struct lpfc_cq_event *cq_event;
12529 unsigned long iflags;
12531 /* Allocate a new internal CQ_EVENT entry */
12532 cq_event = lpfc_sli4_cq_event_alloc(phba);
12534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12535 "0602 Failed to allocate CQ_EVENT entry\n");
12539 /* Move the CQE into the proper xri abort event list */
12540 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12541 switch (cq->subtype) {
12543 spin_lock_irqsave(&phba->hbalock, iflags);
12544 list_add_tail(&cq_event->list,
12545 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12546 /* Set the fcp xri abort event flag */
12547 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12548 spin_unlock_irqrestore(&phba->hbalock, iflags);
12552 spin_lock_irqsave(&phba->hbalock, iflags);
12553 list_add_tail(&cq_event->list,
12554 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12555 /* Set the els xri abort event flag */
12556 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12557 spin_unlock_irqrestore(&phba->hbalock, iflags);
12561 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12562 "0603 Invalid work queue CQE subtype (x%x)\n",
12564 workposted = false;
12571 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12572 * @phba: Pointer to HBA context object.
12573 * @rcqe: Pointer to receive-queue completion queue entry.
12575 * This routine process a receive-queue completion queue entry.
12577 * Return: true if work posted to worker thread, otherwise false.
12580 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12582 bool workposted = false;
12583 struct fc_frame_header *fc_hdr;
12584 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12585 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12586 struct hbq_dmabuf *dma_buf;
12587 uint32_t status, rq_id;
12588 unsigned long iflags;
12590 /* sanity check on queue memory */
12591 if (unlikely(!hrq) || unlikely(!drq))
12594 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12595 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12597 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12598 if (rq_id != hrq->queue_id)
12601 status = bf_get(lpfc_rcqe_status, rcqe);
12603 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12605 "2537 Receive Frame Truncated!!\n");
12606 hrq->RQ_buf_trunc++;
12607 case FC_STATUS_RQ_SUCCESS:
12608 lpfc_sli4_rq_release(hrq, drq);
12609 spin_lock_irqsave(&phba->hbalock, iflags);
12610 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12612 hrq->RQ_no_buf_found++;
12613 spin_unlock_irqrestore(&phba->hbalock, iflags);
12617 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12619 /* If a NVME LS event (type 0x28), treat it as Fast path */
12620 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12622 /* save off the frame for the word thread to process */
12623 list_add_tail(&dma_buf->cq_event.list,
12624 &phba->sli4_hba.sp_queue_event);
12625 /* Frame received */
12626 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12627 spin_unlock_irqrestore(&phba->hbalock, iflags);
12630 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12631 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12632 hrq->RQ_no_posted_buf++;
12633 /* Post more buffers if possible */
12634 spin_lock_irqsave(&phba->hbalock, iflags);
12635 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12636 spin_unlock_irqrestore(&phba->hbalock, iflags);
12645 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12646 * @phba: Pointer to HBA context object.
12647 * @cq: Pointer to the completion queue.
12648 * @wcqe: Pointer to a completion queue entry.
12650 * This routine process a slow-path work-queue or receive queue completion queue
12653 * Return: true if work posted to worker thread, otherwise false.
12656 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12657 struct lpfc_cqe *cqe)
12659 struct lpfc_cqe cqevt;
12660 bool workposted = false;
12662 /* Copy the work queue CQE and convert endian order if needed */
12663 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12665 /* Check and process for different type of WCQE and dispatch */
12666 switch (bf_get(lpfc_cqe_code, &cqevt)) {
12667 case CQE_CODE_COMPL_WQE:
12668 /* Process the WQ/RQ complete event */
12669 phba->last_completion_time = jiffies;
12670 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12671 (struct lpfc_wcqe_complete *)&cqevt);
12673 case CQE_CODE_RELEASE_WQE:
12674 /* Process the WQ release event */
12675 lpfc_sli4_sp_handle_rel_wcqe(phba,
12676 (struct lpfc_wcqe_release *)&cqevt);
12678 case CQE_CODE_XRI_ABORTED:
12679 /* Process the WQ XRI abort event */
12680 phba->last_completion_time = jiffies;
12681 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12682 (struct sli4_wcqe_xri_aborted *)&cqevt);
12684 case CQE_CODE_RECEIVE:
12685 case CQE_CODE_RECEIVE_V1:
12686 /* Process the RQ event */
12687 phba->last_completion_time = jiffies;
12688 workposted = lpfc_sli4_sp_handle_rcqe(phba,
12689 (struct lpfc_rcqe *)&cqevt);
12692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12693 "0388 Not a valid WCQE code: x%x\n",
12694 bf_get(lpfc_cqe_code, &cqevt));
12701 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12702 * @phba: Pointer to HBA context object.
12703 * @eqe: Pointer to fast-path event queue entry.
12705 * This routine process a event queue entry from the slow-path event queue.
12706 * It will check the MajorCode and MinorCode to determine this is for a
12707 * completion event on a completion queue, if not, an error shall be logged
12708 * and just return. Otherwise, it will get to the corresponding completion
12709 * queue and process all the entries on that completion queue, rearm the
12710 * completion queue, and then return.
12714 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12715 struct lpfc_queue *speq)
12717 struct lpfc_queue *cq = NULL, *childq;
12718 struct lpfc_cqe *cqe;
12719 bool workposted = false;
12723 /* Get the reference to the corresponding CQ */
12724 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12726 list_for_each_entry(childq, &speq->child_list, list) {
12727 if (childq->queue_id == cqid) {
12732 if (unlikely(!cq)) {
12733 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12734 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12735 "0365 Slow-path CQ identifier "
12736 "(%d) does not exist\n", cqid);
12740 /* Save EQ associated with this CQ */
12741 cq->assoc_qp = speq;
12743 /* Process all the entries to the CQ */
12744 switch (cq->type) {
12746 while ((cqe = lpfc_sli4_cq_get(cq))) {
12747 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12748 if (!(++ecount % cq->entry_repost))
12749 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12754 while ((cqe = lpfc_sli4_cq_get(cq))) {
12755 if ((cq->subtype == LPFC_FCP) ||
12756 (cq->subtype == LPFC_NVME))
12757 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
12760 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12762 if (!(++ecount % cq->entry_repost))
12763 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12766 /* Track the max number of CQEs processed in 1 EQ */
12767 if (ecount > cq->CQ_max_cqe)
12768 cq->CQ_max_cqe = ecount;
12771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12772 "0370 Invalid completion queue type (%d)\n",
12777 /* Catch the no cq entry condition, log an error */
12778 if (unlikely(ecount == 0))
12779 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12780 "0371 No entry from the CQ: identifier "
12781 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12783 /* In any case, flash and re-arm the RCQ */
12784 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12786 /* wake up worker thread if there are works to be done */
12788 lpfc_worker_wake_up(phba);
12792 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
12793 * @phba: Pointer to HBA context object.
12794 * @cq: Pointer to associated CQ
12795 * @wcqe: Pointer to work-queue completion queue entry.
12797 * This routine process a fast-path work queue completion entry from fast-path
12798 * event queue for FCP command response completion.
12801 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12802 struct lpfc_wcqe_complete *wcqe)
12804 struct lpfc_sli_ring *pring = cq->pring;
12805 struct lpfc_iocbq *cmdiocbq;
12806 struct lpfc_iocbq irspiocbq;
12807 unsigned long iflags;
12809 /* Check for response status */
12810 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12811 /* If resource errors reported from HBA, reduce queue
12812 * depth of the SCSI device.
12814 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12815 IOSTAT_LOCAL_REJECT)) &&
12816 ((wcqe->parameter & IOERR_PARAM_MASK) ==
12817 IOERR_NO_RESOURCES))
12818 phba->lpfc_rampdown_queue_depth(phba);
12820 /* Log the error status */
12821 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12822 "0373 FCP complete error: status=x%x, "
12823 "hw_status=x%x, total_data_specified=%d, "
12824 "parameter=x%x, word3=x%x\n",
12825 bf_get(lpfc_wcqe_c_status, wcqe),
12826 bf_get(lpfc_wcqe_c_hw_status, wcqe),
12827 wcqe->total_data_placed, wcqe->parameter,
12831 /* Look up the FCP command IOCB and create pseudo response IOCB */
12832 spin_lock_irqsave(&pring->ring_lock, iflags);
12833 pring->stats.iocb_event++;
12834 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12835 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12836 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12837 if (unlikely(!cmdiocbq)) {
12838 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12839 "0374 FCP complete with no corresponding "
12840 "cmdiocb: iotag (%d)\n",
12841 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12846 cmdiocbq->isr_timestamp =
12847 cq->assoc_qp->isr_timestamp;
12849 if (cmdiocbq->iocb_cmpl == NULL) {
12850 if (cmdiocbq->wqe_cmpl) {
12851 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12852 spin_lock_irqsave(&phba->hbalock, iflags);
12853 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12854 spin_unlock_irqrestore(&phba->hbalock, iflags);
12857 /* Pass the cmd_iocb and the wcqe to the upper layer */
12858 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
12861 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12862 "0375 FCP cmdiocb not callback function "
12864 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12868 /* Fake the irspiocb and copy necessary response information */
12869 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
12871 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12872 spin_lock_irqsave(&phba->hbalock, iflags);
12873 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12874 spin_unlock_irqrestore(&phba->hbalock, iflags);
12877 /* Pass the cmd_iocb and the rsp state to the upper layer */
12878 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12882 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12883 * @phba: Pointer to HBA context object.
12884 * @cq: Pointer to completion queue.
12885 * @wcqe: Pointer to work-queue completion queue entry.
12887 * This routine handles an fast-path WQ entry comsumed event by invoking the
12888 * proper WQ release routine to the slow-path WQ.
12891 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12892 struct lpfc_wcqe_release *wcqe)
12894 struct lpfc_queue *childwq;
12895 bool wqid_matched = false;
12898 /* Check for fast-path FCP work queue release */
12899 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
12900 list_for_each_entry(childwq, &cq->child_list, list) {
12901 if (childwq->queue_id == hba_wqid) {
12902 lpfc_sli4_wq_release(childwq,
12903 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12904 wqid_matched = true;
12908 /* Report warning log message if no match found */
12909 if (wqid_matched != true)
12910 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12911 "2580 Fast-path wqe consume event carries "
12912 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
12916 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
12917 * @cq: Pointer to the completion queue.
12918 * @eqe: Pointer to fast-path completion queue entry.
12920 * This routine process a fast-path work queue completion entry from fast-path
12921 * event queue for FCP command response completion.
12924 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12925 struct lpfc_cqe *cqe)
12927 struct lpfc_wcqe_release wcqe;
12928 bool workposted = false;
12930 /* Copy the work queue CQE and convert endian order if needed */
12931 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
12933 /* Check and process for different type of WCQE and dispatch */
12934 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
12935 case CQE_CODE_COMPL_WQE:
12936 case CQE_CODE_NVME_ERSP:
12938 /* Process the WQ complete event */
12939 phba->last_completion_time = jiffies;
12940 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
12941 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12942 (struct lpfc_wcqe_complete *)&wcqe);
12943 if (cq->subtype == LPFC_NVME_LS)
12944 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12945 (struct lpfc_wcqe_complete *)&wcqe);
12947 case CQE_CODE_RELEASE_WQE:
12948 cq->CQ_release_wqe++;
12949 /* Process the WQ release event */
12950 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
12951 (struct lpfc_wcqe_release *)&wcqe);
12953 case CQE_CODE_XRI_ABORTED:
12954 cq->CQ_xri_aborted++;
12955 /* Process the WQ XRI abort event */
12956 phba->last_completion_time = jiffies;
12957 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12958 (struct sli4_wcqe_xri_aborted *)&wcqe);
12960 case CQE_CODE_RECEIVE_V1:
12961 case CQE_CODE_RECEIVE:
12962 phba->last_completion_time = jiffies;
12965 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12966 "0144 Not a valid CQE code: x%x\n",
12967 bf_get(lpfc_wcqe_c_code, &wcqe));
12974 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
12975 * @phba: Pointer to HBA context object.
12976 * @eqe: Pointer to fast-path event queue entry.
12978 * This routine process a event queue entry from the fast-path event queue.
12979 * It will check the MajorCode and MinorCode to determine this is for a
12980 * completion event on a completion queue, if not, an error shall be logged
12981 * and just return. Otherwise, it will get to the corresponding completion
12982 * queue and process all the entries on the completion queue, rearm the
12983 * completion queue, and then return.
12986 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12989 struct lpfc_queue *cq = NULL;
12990 struct lpfc_cqe *cqe;
12991 bool workposted = false;
12995 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12997 "0366 Not a valid completion "
12998 "event: majorcode=x%x, minorcode=x%x\n",
12999 bf_get_le32(lpfc_eqe_major_code, eqe),
13000 bf_get_le32(lpfc_eqe_minor_code, eqe));
13004 /* Get the reference to the corresponding CQ */
13005 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13007 if (phba->sli4_hba.nvme_cq_map &&
13008 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13009 /* Process NVME command completion */
13010 cq = phba->sli4_hba.nvme_cq[qidx];
13014 if (phba->sli4_hba.fcp_cq_map &&
13015 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13016 /* Process FCP command completion */
13017 cq = phba->sli4_hba.fcp_cq[qidx];
13021 if (phba->sli4_hba.nvmels_cq &&
13022 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13023 /* Process NVME unsol rcv */
13024 cq = phba->sli4_hba.nvmels_cq;
13027 /* Otherwise this is a Slow path event */
13029 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13034 if (unlikely(cqid != cq->queue_id)) {
13035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13036 "0368 Miss-matched fast-path completion "
13037 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13038 cqid, cq->queue_id);
13042 /* Save EQ associated with this CQ */
13043 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13045 /* Process all the entries to the CQ */
13046 while ((cqe = lpfc_sli4_cq_get(cq))) {
13047 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13048 if (!(++ecount % cq->entry_repost))
13049 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13052 /* Track the max number of CQEs processed in 1 EQ */
13053 if (ecount > cq->CQ_max_cqe)
13054 cq->CQ_max_cqe = ecount;
13056 /* Catch the no cq entry condition */
13057 if (unlikely(ecount == 0))
13058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13059 "0369 No entry from fast-path completion "
13060 "queue fcpcqid=%d\n", cq->queue_id);
13062 /* In any case, flash and re-arm the CQ */
13063 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13065 /* wake up worker thread if there are works to be done */
13067 lpfc_worker_wake_up(phba);
13071 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13073 struct lpfc_eqe *eqe;
13075 /* walk all the EQ entries and drop on the floor */
13076 while ((eqe = lpfc_sli4_eq_get(eq)))
13079 /* Clear and re-arm the EQ */
13080 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13085 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13087 * @phba: Pointer to HBA context object.
13088 * @eqe: Pointer to fast-path event queue entry.
13090 * This routine process a event queue entry from the Flash Optimized Fabric
13091 * event queue. It will check the MajorCode and MinorCode to determine this
13092 * is for a completion event on a completion queue, if not, an error shall be
13093 * logged and just return. Otherwise, it will get to the corresponding
13094 * completion queue and process all the entries on the completion queue, rearm
13095 * the completion queue, and then return.
13098 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13100 struct lpfc_queue *cq;
13101 struct lpfc_cqe *cqe;
13102 bool workposted = false;
13106 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13108 "9147 Not a valid completion "
13109 "event: majorcode=x%x, minorcode=x%x\n",
13110 bf_get_le32(lpfc_eqe_major_code, eqe),
13111 bf_get_le32(lpfc_eqe_minor_code, eqe));
13115 /* Get the reference to the corresponding CQ */
13116 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13118 /* Next check for OAS */
13119 cq = phba->sli4_hba.oas_cq;
13120 if (unlikely(!cq)) {
13121 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13123 "9148 OAS completion queue "
13124 "does not exist\n");
13128 if (unlikely(cqid != cq->queue_id)) {
13129 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13130 "9149 Miss-matched fast-path compl "
13131 "queue id: eqcqid=%d, fcpcqid=%d\n",
13132 cqid, cq->queue_id);
13136 /* Process all the entries to the OAS CQ */
13137 while ((cqe = lpfc_sli4_cq_get(cq))) {
13138 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13139 if (!(++ecount % cq->entry_repost))
13140 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
13143 /* Track the max number of CQEs processed in 1 EQ */
13144 if (ecount > cq->CQ_max_cqe)
13145 cq->CQ_max_cqe = ecount;
13147 /* Catch the no cq entry condition */
13148 if (unlikely(ecount == 0))
13149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13150 "9153 No entry from fast-path completion "
13151 "queue fcpcqid=%d\n", cq->queue_id);
13153 /* In any case, flash and re-arm the CQ */
13154 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13156 /* wake up worker thread if there are works to be done */
13158 lpfc_worker_wake_up(phba);
13162 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13163 * @irq: Interrupt number.
13164 * @dev_id: The device context pointer.
13166 * This function is directly called from the PCI layer as an interrupt
13167 * service routine when device with SLI-4 interface spec is enabled with
13168 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13169 * IOCB ring event in the HBA. However, when the device is enabled with either
13170 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13171 * device-level interrupt handler. When the PCI slot is in error recovery
13172 * or the HBA is undergoing initialization, the interrupt handler will not
13173 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13174 * the intrrupt context. This function is called without any lock held.
13175 * It gets the hbalock to access and update SLI data structures. Note that,
13176 * the EQ to CQ are one-to-one map such that the EQ index is
13177 * equal to that of CQ index.
13179 * This function returns IRQ_HANDLED when interrupt is handled else it
13180 * returns IRQ_NONE.
13183 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13185 struct lpfc_hba *phba;
13186 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13187 struct lpfc_queue *eq;
13188 struct lpfc_eqe *eqe;
13189 unsigned long iflag;
13192 /* Get the driver's phba structure from the dev_id */
13193 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13194 phba = hba_eq_hdl->phba;
13196 if (unlikely(!phba))
13199 /* Get to the EQ struct associated with this vector */
13200 eq = phba->sli4_hba.fof_eq;
13204 /* Check device state for handling interrupt */
13205 if (unlikely(lpfc_intr_state_check(phba))) {
13207 /* Check again for link_state with lock held */
13208 spin_lock_irqsave(&phba->hbalock, iflag);
13209 if (phba->link_state < LPFC_LINK_DOWN)
13210 /* Flush, clear interrupt, and rearm the EQ */
13211 lpfc_sli4_eq_flush(phba, eq);
13212 spin_unlock_irqrestore(&phba->hbalock, iflag);
13217 * Process all the event on FCP fast-path EQ
13219 while ((eqe = lpfc_sli4_eq_get(eq))) {
13220 lpfc_sli4_fof_handle_eqe(phba, eqe);
13221 if (!(++ecount % eq->entry_repost))
13222 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
13223 eq->EQ_processed++;
13226 /* Track the max number of EQEs processed in 1 intr */
13227 if (ecount > eq->EQ_max_eqe)
13228 eq->EQ_max_eqe = ecount;
13231 if (unlikely(ecount == 0)) {
13234 if (phba->intr_type == MSIX)
13235 /* MSI-X treated interrupt served as no EQ share INT */
13236 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13237 "9145 MSI-X interrupt with no EQE\n");
13239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13240 "9146 ISR interrupt with no EQE\n");
13241 /* Non MSI-X treated on interrupt as EQ share INT */
13245 /* Always clear and re-arm the fast-path EQ */
13246 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13247 return IRQ_HANDLED;
13251 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13252 * @irq: Interrupt number.
13253 * @dev_id: The device context pointer.
13255 * This function is directly called from the PCI layer as an interrupt
13256 * service routine when device with SLI-4 interface spec is enabled with
13257 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13258 * ring event in the HBA. However, when the device is enabled with either
13259 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13260 * device-level interrupt handler. When the PCI slot is in error recovery
13261 * or the HBA is undergoing initialization, the interrupt handler will not
13262 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13263 * the intrrupt context. This function is called without any lock held.
13264 * It gets the hbalock to access and update SLI data structures. Note that,
13265 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13266 * equal to that of FCP CQ index.
13268 * The link attention and ELS ring attention events are handled
13269 * by the worker thread. The interrupt handler signals the worker thread
13270 * and returns for these events. This function is called without any lock
13271 * held. It gets the hbalock to access and update SLI data structures.
13273 * This function returns IRQ_HANDLED when interrupt is handled else it
13274 * returns IRQ_NONE.
13277 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13279 struct lpfc_hba *phba;
13280 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13281 struct lpfc_queue *fpeq;
13282 struct lpfc_eqe *eqe;
13283 unsigned long iflag;
13287 /* Get the driver's phba structure from the dev_id */
13288 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13289 phba = hba_eq_hdl->phba;
13290 hba_eqidx = hba_eq_hdl->idx;
13292 if (unlikely(!phba))
13294 if (unlikely(!phba->sli4_hba.hba_eq))
13297 /* Get to the EQ struct associated with this vector */
13298 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13299 if (unlikely(!fpeq))
13302 if (lpfc_fcp_look_ahead) {
13303 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13304 lpfc_sli4_eq_clr_intr(fpeq);
13306 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13311 /* Check device state for handling interrupt */
13312 if (unlikely(lpfc_intr_state_check(phba))) {
13313 fpeq->EQ_badstate++;
13314 /* Check again for link_state with lock held */
13315 spin_lock_irqsave(&phba->hbalock, iflag);
13316 if (phba->link_state < LPFC_LINK_DOWN)
13317 /* Flush, clear interrupt, and rearm the EQ */
13318 lpfc_sli4_eq_flush(phba, fpeq);
13319 spin_unlock_irqrestore(&phba->hbalock, iflag);
13320 if (lpfc_fcp_look_ahead)
13321 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13326 * Process all the event on FCP fast-path EQ
13328 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
13332 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13333 if (!(++ecount % fpeq->entry_repost))
13334 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
13335 fpeq->EQ_processed++;
13338 /* Track the max number of EQEs processed in 1 intr */
13339 if (ecount > fpeq->EQ_max_eqe)
13340 fpeq->EQ_max_eqe = ecount;
13342 /* Always clear and re-arm the fast-path EQ */
13343 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13345 if (unlikely(ecount == 0)) {
13346 fpeq->EQ_no_entry++;
13348 if (lpfc_fcp_look_ahead) {
13349 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13353 if (phba->intr_type == MSIX)
13354 /* MSI-X treated interrupt served as no EQ share INT */
13355 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13356 "0358 MSI-X interrupt with no EQE\n");
13358 /* Non MSI-X treated on interrupt as EQ share INT */
13362 if (lpfc_fcp_look_ahead)
13363 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13365 return IRQ_HANDLED;
13366 } /* lpfc_sli4_fp_intr_handler */
13369 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13370 * @irq: Interrupt number.
13371 * @dev_id: The device context pointer.
13373 * This function is the device-level interrupt handler to device with SLI-4
13374 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13375 * interrupt mode is enabled and there is an event in the HBA which requires
13376 * driver attention. This function invokes the slow-path interrupt attention
13377 * handling function and fast-path interrupt attention handling function in
13378 * turn to process the relevant HBA attention events. This function is called
13379 * without any lock held. It gets the hbalock to access and update SLI data
13382 * This function returns IRQ_HANDLED when interrupt is handled, else it
13383 * returns IRQ_NONE.
13386 lpfc_sli4_intr_handler(int irq, void *dev_id)
13388 struct lpfc_hba *phba;
13389 irqreturn_t hba_irq_rc;
13390 bool hba_handled = false;
13393 /* Get the driver's phba structure from the dev_id */
13394 phba = (struct lpfc_hba *)dev_id;
13396 if (unlikely(!phba))
13400 * Invoke fast-path host attention interrupt handling as appropriate.
13402 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
13403 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
13404 &phba->sli4_hba.hba_eq_hdl[qidx]);
13405 if (hba_irq_rc == IRQ_HANDLED)
13406 hba_handled |= true;
13409 if (phba->cfg_fof) {
13410 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
13411 &phba->sli4_hba.hba_eq_hdl[qidx]);
13412 if (hba_irq_rc == IRQ_HANDLED)
13413 hba_handled |= true;
13416 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
13417 } /* lpfc_sli4_intr_handler */
13420 * lpfc_sli4_queue_free - free a queue structure and associated memory
13421 * @queue: The queue structure to free.
13423 * This function frees a queue structure and the DMAable memory used for
13424 * the host resident queue. This function must be called after destroying the
13425 * queue on the HBA.
13428 lpfc_sli4_queue_free(struct lpfc_queue *queue)
13430 struct lpfc_dmabuf *dmabuf;
13435 while (!list_empty(&queue->page_list)) {
13436 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13438 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
13439 dmabuf->virt, dmabuf->phys);
13443 lpfc_free_rq_buffer(queue->phba, queue);
13444 kfree(queue->rqbp);
13446 kfree(queue->pring);
13452 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13453 * @phba: The HBA that this queue is being created on.
13454 * @entry_size: The size of each queue entry for this queue.
13455 * @entry count: The number of entries that this queue will handle.
13457 * This function allocates a queue structure and the DMAable memory used for
13458 * the host resident queue. This function must be called before creating the
13459 * queue on the HBA.
13461 struct lpfc_queue *
13462 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13463 uint32_t entry_count)
13465 struct lpfc_queue *queue;
13466 struct lpfc_dmabuf *dmabuf;
13467 int x, total_qe_count;
13469 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13471 if (!phba->sli4_hba.pc_sli4_params.supported)
13472 hw_page_size = SLI4_PAGE_SIZE;
13474 queue = kzalloc(sizeof(struct lpfc_queue) +
13475 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13478 queue->page_count = (ALIGN(entry_size * entry_count,
13479 hw_page_size))/hw_page_size;
13481 /* If needed, Adjust page count to match the max the adapter supports */
13482 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
13483 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13485 INIT_LIST_HEAD(&queue->list);
13486 INIT_LIST_HEAD(&queue->wq_list);
13487 INIT_LIST_HEAD(&queue->page_list);
13488 INIT_LIST_HEAD(&queue->child_list);
13489 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13490 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13493 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13494 hw_page_size, &dmabuf->phys,
13496 if (!dmabuf->virt) {
13500 dmabuf->buffer_tag = x;
13501 list_add_tail(&dmabuf->list, &queue->page_list);
13502 /* initialize queue's entry array */
13503 dma_pointer = dmabuf->virt;
13504 for (; total_qe_count < entry_count &&
13505 dma_pointer < (hw_page_size + dmabuf->virt);
13506 total_qe_count++, dma_pointer += entry_size) {
13507 queue->qe[total_qe_count].address = dma_pointer;
13510 queue->entry_size = entry_size;
13511 queue->entry_count = entry_count;
13514 * entry_repost is calculated based on the number of entries in the
13515 * queue. This works out except for RQs. If buffers are NOT initially
13516 * posted for every RQE, entry_repost should be adjusted accordingly.
13518 queue->entry_repost = (entry_count >> 3);
13519 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13520 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13521 queue->phba = phba;
13525 lpfc_sli4_queue_free(queue);
13530 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13531 * @phba: HBA structure that indicates port to create a queue on.
13532 * @pci_barset: PCI BAR set flag.
13534 * This function shall perform iomap of the specified PCI BAR address to host
13535 * memory address if not already done so and return it. The returned host
13536 * memory address can be NULL.
13538 static void __iomem *
13539 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13544 switch (pci_barset) {
13545 case WQ_PCI_BAR_0_AND_1:
13546 return phba->pci_bar0_memmap_p;
13547 case WQ_PCI_BAR_2_AND_3:
13548 return phba->pci_bar2_memmap_p;
13549 case WQ_PCI_BAR_4_AND_5:
13550 return phba->pci_bar4_memmap_p;
13558 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
13559 * @phba: HBA structure that indicates port to create a queue on.
13560 * @startq: The starting FCP EQ to modify
13562 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13564 * The @phba struct is used to send mailbox command to HBA. The @startq
13565 * is used to get the starting FCP EQ to change.
13566 * This function is asynchronous and will wait for the mailbox
13567 * command to finish before continuing.
13569 * On success this function will return a zero. If unable to allocate enough
13570 * memory this function will return -ENOMEM. If the queue create mailbox command
13571 * fails this function will return -ENXIO.
13574 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13576 struct lpfc_mbx_modify_eq_delay *eq_delay;
13577 LPFC_MBOXQ_t *mbox;
13578 struct lpfc_queue *eq;
13579 int cnt, rc, length, status = 0;
13580 uint32_t shdr_status, shdr_add_status;
13583 union lpfc_sli4_cfg_shdr *shdr;
13586 if (startq >= phba->io_channel_irqs)
13589 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13592 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13593 sizeof(struct lpfc_sli4_cfg_mhdr));
13594 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13595 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13596 length, LPFC_SLI4_MBX_EMBED);
13597 eq_delay = &mbox->u.mqe.un.eq_delay;
13599 /* Calculate delay multiper from maximum interrupt per second */
13600 result = phba->cfg_fcp_imax / phba->io_channel_irqs;
13601 if (result > LPFC_DMULT_CONST || result == 0)
13604 dmult = LPFC_DMULT_CONST/result - 1;
13607 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
13608 eq = phba->sli4_hba.hba_eq[qidx];
13611 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13612 eq_delay->u.request.eq[cnt].phase = 0;
13613 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13615 if (cnt >= LPFC_MAX_EQ_DELAY)
13618 eq_delay->u.request.num_eq = cnt;
13620 mbox->vport = phba->pport;
13621 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13622 mbox->context1 = NULL;
13623 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13624 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13625 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13626 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13627 if (shdr_status || shdr_add_status || rc) {
13628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13629 "2512 MODIFY_EQ_DELAY mailbox failed with "
13630 "status x%x add_status x%x, mbx status x%x\n",
13631 shdr_status, shdr_add_status, rc);
13634 mempool_free(mbox, phba->mbox_mem_pool);
13639 * lpfc_eq_create - Create an Event Queue on the HBA
13640 * @phba: HBA structure that indicates port to create a queue on.
13641 * @eq: The queue structure to use to create the event queue.
13642 * @imax: The maximum interrupt per second limit.
13644 * This function creates an event queue, as detailed in @eq, on a port,
13645 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13647 * The @phba struct is used to send mailbox command to HBA. The @eq struct
13648 * is used to get the entry count and entry size that are necessary to
13649 * determine the number of pages to allocate and use for this queue. This
13650 * function will send the EQ_CREATE mailbox command to the HBA to setup the
13651 * event queue. This function is asynchronous and will wait for the mailbox
13652 * command to finish before continuing.
13654 * On success this function will return a zero. If unable to allocate enough
13655 * memory this function will return -ENOMEM. If the queue create mailbox command
13656 * fails this function will return -ENXIO.
13659 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
13661 struct lpfc_mbx_eq_create *eq_create;
13662 LPFC_MBOXQ_t *mbox;
13663 int rc, length, status = 0;
13664 struct lpfc_dmabuf *dmabuf;
13665 uint32_t shdr_status, shdr_add_status;
13666 union lpfc_sli4_cfg_shdr *shdr;
13668 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13670 /* sanity check on queue memory */
13673 if (!phba->sli4_hba.pc_sli4_params.supported)
13674 hw_page_size = SLI4_PAGE_SIZE;
13676 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13679 length = (sizeof(struct lpfc_mbx_eq_create) -
13680 sizeof(struct lpfc_sli4_cfg_mhdr));
13681 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13682 LPFC_MBOX_OPCODE_EQ_CREATE,
13683 length, LPFC_SLI4_MBX_EMBED);
13684 eq_create = &mbox->u.mqe.un.eq_create;
13685 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13687 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13689 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
13690 /* don't setup delay multiplier using EQ_CREATE */
13692 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13694 switch (eq->entry_count) {
13696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13697 "0360 Unsupported EQ count. (%d)\n",
13699 if (eq->entry_count < 256)
13701 /* otherwise default to smallest count (drop through) */
13703 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13707 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13711 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13715 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13719 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13723 list_for_each_entry(dmabuf, &eq->page_list, list) {
13724 memset(dmabuf->virt, 0, hw_page_size);
13725 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13726 putPaddrLow(dmabuf->phys);
13727 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13728 putPaddrHigh(dmabuf->phys);
13730 mbox->vport = phba->pport;
13731 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13732 mbox->context1 = NULL;
13733 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13734 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13735 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13736 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13737 if (shdr_status || shdr_add_status || rc) {
13738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13739 "2500 EQ_CREATE mailbox failed with "
13740 "status x%x add_status x%x, mbx status x%x\n",
13741 shdr_status, shdr_add_status, rc);
13744 eq->type = LPFC_EQ;
13745 eq->subtype = LPFC_NONE;
13746 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13747 if (eq->queue_id == 0xFFFF)
13749 eq->host_index = 0;
13752 mempool_free(mbox, phba->mbox_mem_pool);
13757 * lpfc_cq_create - Create a Completion Queue on the HBA
13758 * @phba: HBA structure that indicates port to create a queue on.
13759 * @cq: The queue structure to use to create the completion queue.
13760 * @eq: The event queue to bind this completion queue to.
13762 * This function creates a completion queue, as detailed in @wq, on a port,
13763 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13765 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13766 * is used to get the entry count and entry size that are necessary to
13767 * determine the number of pages to allocate and use for this queue. The @eq
13768 * is used to indicate which event queue to bind this completion queue to. This
13769 * function will send the CQ_CREATE mailbox command to the HBA to setup the
13770 * completion queue. This function is asynchronous and will wait for the mailbox
13771 * command to finish before continuing.
13773 * On success this function will return a zero. If unable to allocate enough
13774 * memory this function will return -ENOMEM. If the queue create mailbox command
13775 * fails this function will return -ENXIO.
13778 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13779 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13781 struct lpfc_mbx_cq_create *cq_create;
13782 struct lpfc_dmabuf *dmabuf;
13783 LPFC_MBOXQ_t *mbox;
13784 int rc, length, status = 0;
13785 uint32_t shdr_status, shdr_add_status;
13786 union lpfc_sli4_cfg_shdr *shdr;
13787 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13789 /* sanity check on queue memory */
13792 if (!phba->sli4_hba.pc_sli4_params.supported)
13793 hw_page_size = SLI4_PAGE_SIZE;
13795 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13798 length = (sizeof(struct lpfc_mbx_cq_create) -
13799 sizeof(struct lpfc_sli4_cfg_mhdr));
13800 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13801 LPFC_MBOX_OPCODE_CQ_CREATE,
13802 length, LPFC_SLI4_MBX_EMBED);
13803 cq_create = &mbox->u.mqe.un.cq_create;
13804 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
13805 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13807 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13808 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
13809 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13810 phba->sli4_hba.pc_sli4_params.cqv);
13811 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
13812 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13813 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
13814 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13817 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13820 switch (cq->entry_count) {
13822 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13823 "0361 Unsupported CQ count: "
13824 "entry cnt %d sz %d pg cnt %d repost %d\n",
13825 cq->entry_count, cq->entry_size,
13826 cq->page_count, cq->entry_repost);
13827 if (cq->entry_count < 256) {
13831 /* otherwise default to smallest count (drop through) */
13833 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13837 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13841 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13845 list_for_each_entry(dmabuf, &cq->page_list, list) {
13846 memset(dmabuf->virt, 0, hw_page_size);
13847 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13848 putPaddrLow(dmabuf->phys);
13849 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13850 putPaddrHigh(dmabuf->phys);
13852 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13854 /* The IOCTL status is embedded in the mailbox subheader. */
13855 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13856 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13857 if (shdr_status || shdr_add_status || rc) {
13858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13859 "2501 CQ_CREATE mailbox failed with "
13860 "status x%x add_status x%x, mbx status x%x\n",
13861 shdr_status, shdr_add_status, rc);
13865 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13866 if (cq->queue_id == 0xFFFF) {
13870 /* link the cq onto the parent eq child list */
13871 list_add_tail(&cq->list, &eq->child_list);
13872 /* Set up completion queue's type and subtype */
13874 cq->subtype = subtype;
13875 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13876 cq->assoc_qid = eq->queue_id;
13877 cq->host_index = 0;
13881 mempool_free(mbox, phba->mbox_mem_pool);
13886 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
13887 * @phba: HBA structure that indicates port to create a queue on.
13888 * @mq: The queue structure to use to create the mailbox queue.
13889 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13890 * @cq: The completion queue to associate with this cq.
13892 * This function provides failback (fb) functionality when the
13893 * mq_create_ext fails on older FW generations. It's purpose is identical
13894 * to mq_create_ext otherwise.
13896 * This routine cannot fail as all attributes were previously accessed and
13897 * initialized in mq_create_ext.
13900 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13901 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
13903 struct lpfc_mbx_mq_create *mq_create;
13904 struct lpfc_dmabuf *dmabuf;
13907 length = (sizeof(struct lpfc_mbx_mq_create) -
13908 sizeof(struct lpfc_sli4_cfg_mhdr));
13909 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13910 LPFC_MBOX_OPCODE_MQ_CREATE,
13911 length, LPFC_SLI4_MBX_EMBED);
13912 mq_create = &mbox->u.mqe.un.mq_create;
13913 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
13915 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
13917 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13918 switch (mq->entry_count) {
13920 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13921 LPFC_MQ_RING_SIZE_16);
13924 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13925 LPFC_MQ_RING_SIZE_32);
13928 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13929 LPFC_MQ_RING_SIZE_64);
13932 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13933 LPFC_MQ_RING_SIZE_128);
13936 list_for_each_entry(dmabuf, &mq->page_list, list) {
13937 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13938 putPaddrLow(dmabuf->phys);
13939 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13940 putPaddrHigh(dmabuf->phys);
13945 * lpfc_mq_create - Create a mailbox Queue on the HBA
13946 * @phba: HBA structure that indicates port to create a queue on.
13947 * @mq: The queue structure to use to create the mailbox queue.
13948 * @cq: The completion queue to associate with this cq.
13949 * @subtype: The queue's subtype.
13951 * This function creates a mailbox queue, as detailed in @mq, on a port,
13952 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
13954 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13955 * is used to get the entry count and entry size that are necessary to
13956 * determine the number of pages to allocate and use for this queue. This
13957 * function will send the MQ_CREATE mailbox command to the HBA to setup the
13958 * mailbox queue. This function is asynchronous and will wait for the mailbox
13959 * command to finish before continuing.
13961 * On success this function will return a zero. If unable to allocate enough
13962 * memory this function will return -ENOMEM. If the queue create mailbox command
13963 * fails this function will return -ENXIO.
13966 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
13967 struct lpfc_queue *cq, uint32_t subtype)
13969 struct lpfc_mbx_mq_create *mq_create;
13970 struct lpfc_mbx_mq_create_ext *mq_create_ext;
13971 struct lpfc_dmabuf *dmabuf;
13972 LPFC_MBOXQ_t *mbox;
13973 int rc, length, status = 0;
13974 uint32_t shdr_status, shdr_add_status;
13975 union lpfc_sli4_cfg_shdr *shdr;
13976 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13978 /* sanity check on queue memory */
13981 if (!phba->sli4_hba.pc_sli4_params.supported)
13982 hw_page_size = SLI4_PAGE_SIZE;
13984 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13987 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
13988 sizeof(struct lpfc_sli4_cfg_mhdr));
13989 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13990 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
13991 length, LPFC_SLI4_MBX_EMBED);
13993 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
13994 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
13995 bf_set(lpfc_mbx_mq_create_ext_num_pages,
13996 &mq_create_ext->u.request, mq->page_count);
13997 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
13998 &mq_create_ext->u.request, 1);
13999 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14000 &mq_create_ext->u.request, 1);
14001 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14002 &mq_create_ext->u.request, 1);
14003 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14004 &mq_create_ext->u.request, 1);
14005 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14006 &mq_create_ext->u.request, 1);
14007 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14008 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14009 phba->sli4_hba.pc_sli4_params.mqv);
14010 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14011 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14014 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14016 switch (mq->entry_count) {
14018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14019 "0362 Unsupported MQ count. (%d)\n",
14021 if (mq->entry_count < 16) {
14025 /* otherwise default to smallest count (drop through) */
14027 bf_set(lpfc_mq_context_ring_size,
14028 &mq_create_ext->u.request.context,
14029 LPFC_MQ_RING_SIZE_16);
14032 bf_set(lpfc_mq_context_ring_size,
14033 &mq_create_ext->u.request.context,
14034 LPFC_MQ_RING_SIZE_32);
14037 bf_set(lpfc_mq_context_ring_size,
14038 &mq_create_ext->u.request.context,
14039 LPFC_MQ_RING_SIZE_64);
14042 bf_set(lpfc_mq_context_ring_size,
14043 &mq_create_ext->u.request.context,
14044 LPFC_MQ_RING_SIZE_128);
14047 list_for_each_entry(dmabuf, &mq->page_list, list) {
14048 memset(dmabuf->virt, 0, hw_page_size);
14049 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
14050 putPaddrLow(dmabuf->phys);
14051 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
14052 putPaddrHigh(dmabuf->phys);
14054 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14055 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14056 &mq_create_ext->u.response);
14057 if (rc != MBX_SUCCESS) {
14058 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14059 "2795 MQ_CREATE_EXT failed with "
14060 "status x%x. Failback to MQ_CREATE.\n",
14062 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14063 mq_create = &mbox->u.mqe.un.mq_create;
14064 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14065 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14066 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14067 &mq_create->u.response);
14070 /* The IOCTL status is embedded in the mailbox subheader. */
14071 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14072 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14073 if (shdr_status || shdr_add_status || rc) {
14074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14075 "2502 MQ_CREATE mailbox failed with "
14076 "status x%x add_status x%x, mbx status x%x\n",
14077 shdr_status, shdr_add_status, rc);
14081 if (mq->queue_id == 0xFFFF) {
14085 mq->type = LPFC_MQ;
14086 mq->assoc_qid = cq->queue_id;
14087 mq->subtype = subtype;
14088 mq->host_index = 0;
14091 /* link the mq onto the parent cq child list */
14092 list_add_tail(&mq->list, &cq->child_list);
14094 mempool_free(mbox, phba->mbox_mem_pool);
14099 * lpfc_wq_create - Create a Work Queue on the HBA
14100 * @phba: HBA structure that indicates port to create a queue on.
14101 * @wq: The queue structure to use to create the work queue.
14102 * @cq: The completion queue to bind this work queue to.
14103 * @subtype: The subtype of the work queue indicating its functionality.
14105 * This function creates a work queue, as detailed in @wq, on a port, described
14106 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14108 * The @phba struct is used to send mailbox command to HBA. The @wq struct
14109 * is used to get the entry count and entry size that are necessary to
14110 * determine the number of pages to allocate and use for this queue. The @cq
14111 * is used to indicate which completion queue to bind this work queue to. This
14112 * function will send the WQ_CREATE mailbox command to the HBA to setup the
14113 * work queue. This function is asynchronous and will wait for the mailbox
14114 * command to finish before continuing.
14116 * On success this function will return a zero. If unable to allocate enough
14117 * memory this function will return -ENOMEM. If the queue create mailbox command
14118 * fails this function will return -ENXIO.
14121 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14122 struct lpfc_queue *cq, uint32_t subtype)
14124 struct lpfc_mbx_wq_create *wq_create;
14125 struct lpfc_dmabuf *dmabuf;
14126 LPFC_MBOXQ_t *mbox;
14127 int rc, length, status = 0;
14128 uint32_t shdr_status, shdr_add_status;
14129 union lpfc_sli4_cfg_shdr *shdr;
14130 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14131 struct dma_address *page;
14132 void __iomem *bar_memmap_p;
14133 uint32_t db_offset;
14134 uint16_t pci_barset;
14136 /* sanity check on queue memory */
14139 if (!phba->sli4_hba.pc_sli4_params.supported)
14140 hw_page_size = SLI4_PAGE_SIZE;
14142 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14145 length = (sizeof(struct lpfc_mbx_wq_create) -
14146 sizeof(struct lpfc_sli4_cfg_mhdr));
14147 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14148 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14149 length, LPFC_SLI4_MBX_EMBED);
14150 wq_create = &mbox->u.mqe.un.wq_create;
14151 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
14152 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14154 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14157 /* wqv is the earliest version supported, NOT the latest */
14158 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14159 phba->sli4_hba.pc_sli4_params.wqv);
14161 switch (phba->sli4_hba.pc_sli4_params.wqv) {
14162 case LPFC_Q_CREATE_VERSION_0:
14163 switch (wq->entry_size) {
14166 /* Nothing to do, version 0 ONLY supports 64 byte */
14167 page = wq_create->u.request.page;
14170 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14171 LPFC_WQ_SZ128_SUPPORT)) {
14175 /* If we get here the HBA MUST also support V1 and
14178 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14179 LPFC_Q_CREATE_VERSION_1);
14181 bf_set(lpfc_mbx_wq_create_wqe_count,
14182 &wq_create->u.request_1, wq->entry_count);
14183 bf_set(lpfc_mbx_wq_create_wqe_size,
14184 &wq_create->u.request_1,
14185 LPFC_WQ_WQE_SIZE_128);
14186 bf_set(lpfc_mbx_wq_create_page_size,
14187 &wq_create->u.request_1,
14188 LPFC_WQ_PAGE_SIZE_4096);
14189 page = wq_create->u.request_1.page;
14193 case LPFC_Q_CREATE_VERSION_1:
14194 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14196 switch (wq->entry_size) {
14199 bf_set(lpfc_mbx_wq_create_wqe_size,
14200 &wq_create->u.request_1,
14201 LPFC_WQ_WQE_SIZE_64);
14204 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14205 LPFC_WQ_SZ128_SUPPORT)) {
14209 bf_set(lpfc_mbx_wq_create_wqe_size,
14210 &wq_create->u.request_1,
14211 LPFC_WQ_WQE_SIZE_128);
14214 bf_set(lpfc_mbx_wq_create_page_size,
14215 &wq_create->u.request_1,
14216 LPFC_WQ_PAGE_SIZE_4096);
14217 page = wq_create->u.request_1.page;
14224 list_for_each_entry(dmabuf, &wq->page_list, list) {
14225 memset(dmabuf->virt, 0, hw_page_size);
14226 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14227 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
14230 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14231 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14233 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14234 /* The IOCTL status is embedded in the mailbox subheader. */
14235 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14236 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14237 if (shdr_status || shdr_add_status || rc) {
14238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14239 "2503 WQ_CREATE mailbox failed with "
14240 "status x%x add_status x%x, mbx status x%x\n",
14241 shdr_status, shdr_add_status, rc);
14245 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14246 if (wq->queue_id == 0xFFFF) {
14250 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14251 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14252 &wq_create->u.response);
14253 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14254 (wq->db_format != LPFC_DB_RING_FORMAT)) {
14255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14256 "3265 WQ[%d] doorbell format not "
14257 "supported: x%x\n", wq->queue_id,
14262 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14263 &wq_create->u.response);
14264 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14265 if (!bar_memmap_p) {
14266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14267 "3263 WQ[%d] failed to memmap pci "
14268 "barset:x%x\n", wq->queue_id,
14273 db_offset = wq_create->u.response.doorbell_offset;
14274 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14275 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14277 "3252 WQ[%d] doorbell offset not "
14278 "supported: x%x\n", wq->queue_id,
14283 wq->db_regaddr = bar_memmap_p + db_offset;
14284 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14285 "3264 WQ[%d]: barset:x%x, offset:x%x, "
14286 "format:x%x\n", wq->queue_id, pci_barset,
14287 db_offset, wq->db_format);
14289 wq->db_format = LPFC_DB_LIST_FORMAT;
14290 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
14292 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
14293 if (wq->pring == NULL) {
14297 wq->type = LPFC_WQ;
14298 wq->assoc_qid = cq->queue_id;
14299 wq->subtype = subtype;
14300 wq->host_index = 0;
14302 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
14304 /* link the wq onto the parent cq child list */
14305 list_add_tail(&wq->list, &cq->child_list);
14307 mempool_free(mbox, phba->mbox_mem_pool);
14312 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
14313 * @phba: HBA structure that indicates port to create a queue on.
14314 * @rq: The queue structure to use for the receive queue.
14315 * @qno: The associated HBQ number
14318 * For SLI4 we need to adjust the RQ repost value based on
14319 * the number of buffers that are initially posted to the RQ.
14322 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
14326 /* sanity check on queue memory */
14329 cnt = lpfc_hbq_defs[qno]->entry_count;
14331 /* Recalc repost for RQs based on buffers initially posted */
14333 if (cnt < LPFC_QUEUE_MIN_REPOST)
14334 cnt = LPFC_QUEUE_MIN_REPOST;
14336 rq->entry_repost = cnt;
14340 * lpfc_rq_create - Create a Receive Queue on the HBA
14341 * @phba: HBA structure that indicates port to create a queue on.
14342 * @hrq: The queue structure to use to create the header receive queue.
14343 * @drq: The queue structure to use to create the data receive queue.
14344 * @cq: The completion queue to bind this work queue to.
14346 * This function creates a receive buffer queue pair , as detailed in @hrq and
14347 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
14350 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
14351 * struct is used to get the entry count that is necessary to determine the
14352 * number of pages to use for this queue. The @cq is used to indicate which
14353 * completion queue to bind received buffers that are posted to these queues to.
14354 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
14355 * receive queue pair. This function is asynchronous and will wait for the
14356 * mailbox command to finish before continuing.
14358 * On success this function will return a zero. If unable to allocate enough
14359 * memory this function will return -ENOMEM. If the queue create mailbox command
14360 * fails this function will return -ENXIO.
14363 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14364 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
14366 struct lpfc_mbx_rq_create *rq_create;
14367 struct lpfc_dmabuf *dmabuf;
14368 LPFC_MBOXQ_t *mbox;
14369 int rc, length, status = 0;
14370 uint32_t shdr_status, shdr_add_status;
14371 union lpfc_sli4_cfg_shdr *shdr;
14372 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14373 void __iomem *bar_memmap_p;
14374 uint32_t db_offset;
14375 uint16_t pci_barset;
14377 /* sanity check on queue memory */
14378 if (!hrq || !drq || !cq)
14380 if (!phba->sli4_hba.pc_sli4_params.supported)
14381 hw_page_size = SLI4_PAGE_SIZE;
14383 if (hrq->entry_count != drq->entry_count)
14385 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14388 length = (sizeof(struct lpfc_mbx_rq_create) -
14389 sizeof(struct lpfc_sli4_cfg_mhdr));
14390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14391 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14392 length, LPFC_SLI4_MBX_EMBED);
14393 rq_create = &mbox->u.mqe.un.rq_create;
14394 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14395 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14396 phba->sli4_hba.pc_sli4_params.rqv);
14397 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14398 bf_set(lpfc_rq_context_rqe_count_1,
14399 &rq_create->u.request.context,
14401 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
14402 bf_set(lpfc_rq_context_rqe_size,
14403 &rq_create->u.request.context,
14405 bf_set(lpfc_rq_context_page_size,
14406 &rq_create->u.request.context,
14407 LPFC_RQ_PAGE_SIZE_4096);
14409 switch (hrq->entry_count) {
14411 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14412 "2535 Unsupported RQ count. (%d)\n",
14414 if (hrq->entry_count < 512) {
14418 /* otherwise default to smallest count (drop through) */
14420 bf_set(lpfc_rq_context_rqe_count,
14421 &rq_create->u.request.context,
14422 LPFC_RQ_RING_SIZE_512);
14425 bf_set(lpfc_rq_context_rqe_count,
14426 &rq_create->u.request.context,
14427 LPFC_RQ_RING_SIZE_1024);
14430 bf_set(lpfc_rq_context_rqe_count,
14431 &rq_create->u.request.context,
14432 LPFC_RQ_RING_SIZE_2048);
14435 bf_set(lpfc_rq_context_rqe_count,
14436 &rq_create->u.request.context,
14437 LPFC_RQ_RING_SIZE_4096);
14440 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14441 LPFC_HDR_BUF_SIZE);
14443 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14445 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14447 list_for_each_entry(dmabuf, &hrq->page_list, list) {
14448 memset(dmabuf->virt, 0, hw_page_size);
14449 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14450 putPaddrLow(dmabuf->phys);
14451 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14452 putPaddrHigh(dmabuf->phys);
14454 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14455 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14457 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14458 /* The IOCTL status is embedded in the mailbox subheader. */
14459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14461 if (shdr_status || shdr_add_status || rc) {
14462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14463 "2504 RQ_CREATE mailbox failed with "
14464 "status x%x add_status x%x, mbx status x%x\n",
14465 shdr_status, shdr_add_status, rc);
14469 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14470 if (hrq->queue_id == 0xFFFF) {
14475 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14476 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
14477 &rq_create->u.response);
14478 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
14479 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
14480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14481 "3262 RQ [%d] doorbell format not "
14482 "supported: x%x\n", hrq->queue_id,
14488 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
14489 &rq_create->u.response);
14490 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14491 if (!bar_memmap_p) {
14492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14493 "3269 RQ[%d] failed to memmap pci "
14494 "barset:x%x\n", hrq->queue_id,
14500 db_offset = rq_create->u.response.doorbell_offset;
14501 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
14502 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
14503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14504 "3270 RQ[%d] doorbell offset not "
14505 "supported: x%x\n", hrq->queue_id,
14510 hrq->db_regaddr = bar_memmap_p + db_offset;
14511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14512 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
14513 "format:x%x\n", hrq->queue_id, pci_barset,
14514 db_offset, hrq->db_format);
14516 hrq->db_format = LPFC_DB_RING_FORMAT;
14517 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
14519 hrq->type = LPFC_HRQ;
14520 hrq->assoc_qid = cq->queue_id;
14521 hrq->subtype = subtype;
14522 hrq->host_index = 0;
14523 hrq->hba_index = 0;
14525 /* now create the data queue */
14526 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14527 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14528 length, LPFC_SLI4_MBX_EMBED);
14529 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14530 phba->sli4_hba.pc_sli4_params.rqv);
14531 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14532 bf_set(lpfc_rq_context_rqe_count_1,
14533 &rq_create->u.request.context, hrq->entry_count);
14534 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
14535 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
14537 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
14538 (PAGE_SIZE/SLI4_PAGE_SIZE));
14540 switch (drq->entry_count) {
14542 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14543 "2536 Unsupported RQ count. (%d)\n",
14545 if (drq->entry_count < 512) {
14549 /* otherwise default to smallest count (drop through) */
14551 bf_set(lpfc_rq_context_rqe_count,
14552 &rq_create->u.request.context,
14553 LPFC_RQ_RING_SIZE_512);
14556 bf_set(lpfc_rq_context_rqe_count,
14557 &rq_create->u.request.context,
14558 LPFC_RQ_RING_SIZE_1024);
14561 bf_set(lpfc_rq_context_rqe_count,
14562 &rq_create->u.request.context,
14563 LPFC_RQ_RING_SIZE_2048);
14566 bf_set(lpfc_rq_context_rqe_count,
14567 &rq_create->u.request.context,
14568 LPFC_RQ_RING_SIZE_4096);
14571 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14572 LPFC_DATA_BUF_SIZE);
14574 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14576 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14578 list_for_each_entry(dmabuf, &drq->page_list, list) {
14579 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14580 putPaddrLow(dmabuf->phys);
14581 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14582 putPaddrHigh(dmabuf->phys);
14584 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14585 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14586 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14587 /* The IOCTL status is embedded in the mailbox subheader. */
14588 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14591 if (shdr_status || shdr_add_status || rc) {
14595 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14596 if (drq->queue_id == 0xFFFF) {
14600 drq->type = LPFC_DRQ;
14601 drq->assoc_qid = cq->queue_id;
14602 drq->subtype = subtype;
14603 drq->host_index = 0;
14604 drq->hba_index = 0;
14606 /* link the header and data RQs onto the parent cq child list */
14607 list_add_tail(&hrq->list, &cq->child_list);
14608 list_add_tail(&drq->list, &cq->child_list);
14611 mempool_free(mbox, phba->mbox_mem_pool);
14616 * lpfc_eq_destroy - Destroy an event Queue on the HBA
14617 * @eq: The queue structure associated with the queue to destroy.
14619 * This function destroys a queue, as detailed in @eq by sending an mailbox
14620 * command, specific to the type of queue, to the HBA.
14622 * The @eq struct is used to get the queue ID of the queue to destroy.
14624 * On success this function will return a zero. If the queue destroy mailbox
14625 * command fails this function will return -ENXIO.
14628 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
14630 LPFC_MBOXQ_t *mbox;
14631 int rc, length, status = 0;
14632 uint32_t shdr_status, shdr_add_status;
14633 union lpfc_sli4_cfg_shdr *shdr;
14635 /* sanity check on queue memory */
14638 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
14641 length = (sizeof(struct lpfc_mbx_eq_destroy) -
14642 sizeof(struct lpfc_sli4_cfg_mhdr));
14643 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14644 LPFC_MBOX_OPCODE_EQ_DESTROY,
14645 length, LPFC_SLI4_MBX_EMBED);
14646 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
14648 mbox->vport = eq->phba->pport;
14649 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14651 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
14652 /* The IOCTL status is embedded in the mailbox subheader. */
14653 shdr = (union lpfc_sli4_cfg_shdr *)
14654 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
14655 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14656 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14657 if (shdr_status || shdr_add_status || rc) {
14658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14659 "2505 EQ_DESTROY mailbox failed with "
14660 "status x%x add_status x%x, mbx status x%x\n",
14661 shdr_status, shdr_add_status, rc);
14665 /* Remove eq from any list */
14666 list_del_init(&eq->list);
14667 mempool_free(mbox, eq->phba->mbox_mem_pool);
14672 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
14673 * @cq: The queue structure associated with the queue to destroy.
14675 * This function destroys a queue, as detailed in @cq by sending an mailbox
14676 * command, specific to the type of queue, to the HBA.
14678 * The @cq struct is used to get the queue ID of the queue to destroy.
14680 * On success this function will return a zero. If the queue destroy mailbox
14681 * command fails this function will return -ENXIO.
14684 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14686 LPFC_MBOXQ_t *mbox;
14687 int rc, length, status = 0;
14688 uint32_t shdr_status, shdr_add_status;
14689 union lpfc_sli4_cfg_shdr *shdr;
14691 /* sanity check on queue memory */
14694 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14697 length = (sizeof(struct lpfc_mbx_cq_destroy) -
14698 sizeof(struct lpfc_sli4_cfg_mhdr));
14699 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14700 LPFC_MBOX_OPCODE_CQ_DESTROY,
14701 length, LPFC_SLI4_MBX_EMBED);
14702 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14704 mbox->vport = cq->phba->pport;
14705 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14706 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14707 /* The IOCTL status is embedded in the mailbox subheader. */
14708 shdr = (union lpfc_sli4_cfg_shdr *)
14709 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
14710 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14711 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14712 if (shdr_status || shdr_add_status || rc) {
14713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14714 "2506 CQ_DESTROY mailbox failed with "
14715 "status x%x add_status x%x, mbx status x%x\n",
14716 shdr_status, shdr_add_status, rc);
14719 /* Remove cq from any list */
14720 list_del_init(&cq->list);
14721 mempool_free(mbox, cq->phba->mbox_mem_pool);
14726 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14727 * @qm: The queue structure associated with the queue to destroy.
14729 * This function destroys a queue, as detailed in @mq by sending an mailbox
14730 * command, specific to the type of queue, to the HBA.
14732 * The @mq struct is used to get the queue ID of the queue to destroy.
14734 * On success this function will return a zero. If the queue destroy mailbox
14735 * command fails this function will return -ENXIO.
14738 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14740 LPFC_MBOXQ_t *mbox;
14741 int rc, length, status = 0;
14742 uint32_t shdr_status, shdr_add_status;
14743 union lpfc_sli4_cfg_shdr *shdr;
14745 /* sanity check on queue memory */
14748 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14751 length = (sizeof(struct lpfc_mbx_mq_destroy) -
14752 sizeof(struct lpfc_sli4_cfg_mhdr));
14753 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14754 LPFC_MBOX_OPCODE_MQ_DESTROY,
14755 length, LPFC_SLI4_MBX_EMBED);
14756 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14758 mbox->vport = mq->phba->pport;
14759 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14760 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14761 /* The IOCTL status is embedded in the mailbox subheader. */
14762 shdr = (union lpfc_sli4_cfg_shdr *)
14763 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14764 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14765 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14766 if (shdr_status || shdr_add_status || rc) {
14767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14768 "2507 MQ_DESTROY mailbox failed with "
14769 "status x%x add_status x%x, mbx status x%x\n",
14770 shdr_status, shdr_add_status, rc);
14773 /* Remove mq from any list */
14774 list_del_init(&mq->list);
14775 mempool_free(mbox, mq->phba->mbox_mem_pool);
14780 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14781 * @wq: The queue structure associated with the queue to destroy.
14783 * This function destroys a queue, as detailed in @wq by sending an mailbox
14784 * command, specific to the type of queue, to the HBA.
14786 * The @wq struct is used to get the queue ID of the queue to destroy.
14788 * On success this function will return a zero. If the queue destroy mailbox
14789 * command fails this function will return -ENXIO.
14792 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14794 LPFC_MBOXQ_t *mbox;
14795 int rc, length, status = 0;
14796 uint32_t shdr_status, shdr_add_status;
14797 union lpfc_sli4_cfg_shdr *shdr;
14799 /* sanity check on queue memory */
14802 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14805 length = (sizeof(struct lpfc_mbx_wq_destroy) -
14806 sizeof(struct lpfc_sli4_cfg_mhdr));
14807 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14808 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14809 length, LPFC_SLI4_MBX_EMBED);
14810 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14812 mbox->vport = wq->phba->pport;
14813 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14814 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14815 shdr = (union lpfc_sli4_cfg_shdr *)
14816 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14817 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14818 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14819 if (shdr_status || shdr_add_status || rc) {
14820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14821 "2508 WQ_DESTROY mailbox failed with "
14822 "status x%x add_status x%x, mbx status x%x\n",
14823 shdr_status, shdr_add_status, rc);
14826 /* Remove wq from any list */
14827 list_del_init(&wq->list);
14828 mempool_free(mbox, wq->phba->mbox_mem_pool);
14833 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14834 * @rq: The queue structure associated with the queue to destroy.
14836 * This function destroys a queue, as detailed in @rq by sending an mailbox
14837 * command, specific to the type of queue, to the HBA.
14839 * The @rq struct is used to get the queue ID of the queue to destroy.
14841 * On success this function will return a zero. If the queue destroy mailbox
14842 * command fails this function will return -ENXIO.
14845 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14846 struct lpfc_queue *drq)
14848 LPFC_MBOXQ_t *mbox;
14849 int rc, length, status = 0;
14850 uint32_t shdr_status, shdr_add_status;
14851 union lpfc_sli4_cfg_shdr *shdr;
14853 /* sanity check on queue memory */
14856 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14859 length = (sizeof(struct lpfc_mbx_rq_destroy) -
14860 sizeof(struct lpfc_sli4_cfg_mhdr));
14861 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14862 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14863 length, LPFC_SLI4_MBX_EMBED);
14864 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14866 mbox->vport = hrq->phba->pport;
14867 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14868 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14869 /* The IOCTL status is embedded in the mailbox subheader. */
14870 shdr = (union lpfc_sli4_cfg_shdr *)
14871 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14872 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14873 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14874 if (shdr_status || shdr_add_status || rc) {
14875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14876 "2509 RQ_DESTROY mailbox failed with "
14877 "status x%x add_status x%x, mbx status x%x\n",
14878 shdr_status, shdr_add_status, rc);
14879 if (rc != MBX_TIMEOUT)
14880 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14883 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14885 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14886 shdr = (union lpfc_sli4_cfg_shdr *)
14887 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14888 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14889 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14890 if (shdr_status || shdr_add_status || rc) {
14891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14892 "2510 RQ_DESTROY mailbox failed with "
14893 "status x%x add_status x%x, mbx status x%x\n",
14894 shdr_status, shdr_add_status, rc);
14897 list_del_init(&hrq->list);
14898 list_del_init(&drq->list);
14899 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14904 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14905 * @phba: The virtual port for which this call being executed.
14906 * @pdma_phys_addr0: Physical address of the 1st SGL page.
14907 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14908 * @xritag: the xritag that ties this io to the SGL pages.
14910 * This routine will post the sgl pages for the IO that has the xritag
14911 * that is in the iocbq structure. The xritag is assigned during iocbq
14912 * creation and persists for as long as the driver is loaded.
14913 * if the caller has fewer than 256 scatter gather segments to map then
14914 * pdma_phys_addr1 should be 0.
14915 * If the caller needs to map more than 256 scatter gather segment then
14916 * pdma_phys_addr1 should be a valid physical address.
14917 * physical address for SGLs must be 64 byte aligned.
14918 * If you are going to map 2 SGL's then the first one must have 256 entries
14919 * the second sgl can have between 1 and 256 entries.
14923 * -ENXIO, -ENOMEM - Failure
14926 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14927 dma_addr_t pdma_phys_addr0,
14928 dma_addr_t pdma_phys_addr1,
14931 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
14932 LPFC_MBOXQ_t *mbox;
14934 uint32_t shdr_status, shdr_add_status;
14936 union lpfc_sli4_cfg_shdr *shdr;
14938 if (xritag == NO_XRI) {
14939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14940 "0364 Invalid param:\n");
14944 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14948 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14949 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
14950 sizeof(struct lpfc_mbx_post_sgl_pages) -
14951 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14953 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
14954 &mbox->u.mqe.un.post_sgl_pages;
14955 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
14956 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
14958 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
14959 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
14960 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
14961 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
14963 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
14964 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
14965 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
14966 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
14967 if (!phba->sli4_hba.intr_enable)
14968 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14970 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14971 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14973 /* The IOCTL status is embedded in the mailbox subheader. */
14974 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
14975 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14976 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14977 if (rc != MBX_TIMEOUT)
14978 mempool_free(mbox, phba->mbox_mem_pool);
14979 if (shdr_status || shdr_add_status || rc) {
14980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14981 "2511 POST_SGL mailbox failed with "
14982 "status x%x add_status x%x, mbx status x%x\n",
14983 shdr_status, shdr_add_status, rc);
14989 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
14990 * @phba: pointer to lpfc hba data structure.
14992 * This routine is invoked to post rpi header templates to the
14993 * HBA consistent with the SLI-4 interface spec. This routine
14994 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14995 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14998 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14999 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15002 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15007 * Fetch the next logical xri. Because this index is logical,
15008 * the driver starts at 0 each time.
15010 spin_lock_irq(&phba->hbalock);
15011 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15012 phba->sli4_hba.max_cfg_param.max_xri, 0);
15013 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15014 spin_unlock_irq(&phba->hbalock);
15017 set_bit(xri, phba->sli4_hba.xri_bmask);
15018 phba->sli4_hba.max_cfg_param.xri_used++;
15020 spin_unlock_irq(&phba->hbalock);
15025 * lpfc_sli4_free_xri - Release an xri for reuse.
15026 * @phba: pointer to lpfc hba data structure.
15028 * This routine is invoked to release an xri to the pool of
15029 * available rpis maintained by the driver.
15032 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15034 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
15035 phba->sli4_hba.max_cfg_param.xri_used--;
15040 * lpfc_sli4_free_xri - Release an xri for reuse.
15041 * @phba: pointer to lpfc hba data structure.
15043 * This routine is invoked to release an xri to the pool of
15044 * available rpis maintained by the driver.
15047 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15049 spin_lock_irq(&phba->hbalock);
15050 __lpfc_sli4_free_xri(phba, xri);
15051 spin_unlock_irq(&phba->hbalock);
15055 * lpfc_sli4_next_xritag - Get an xritag for the io
15056 * @phba: Pointer to HBA context object.
15058 * This function gets an xritag for the iocb. If there is no unused xritag
15059 * it will return 0xffff.
15060 * The function returns the allocated xritag if successful, else returns zero.
15061 * Zero is not a valid xritag.
15062 * The caller is not required to hold any lock.
15065 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15067 uint16_t xri_index;
15069 xri_index = lpfc_sli4_alloc_xri(phba);
15070 if (xri_index == NO_XRI)
15071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15072 "2004 Failed to allocate XRI.last XRITAG is %d"
15073 " Max XRI is %d, Used XRI is %d\n",
15075 phba->sli4_hba.max_cfg_param.max_xri,
15076 phba->sli4_hba.max_cfg_param.xri_used);
15081 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
15082 * @phba: pointer to lpfc hba data structure.
15083 * @post_sgl_list: pointer to els sgl entry list.
15084 * @count: number of els sgl entries on the list.
15086 * This routine is invoked to post a block of driver's sgl pages to the
15087 * HBA using non-embedded mailbox command. No Lock is held. This routine
15088 * is only called when the driver is loading and after all IO has been
15092 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
15093 struct list_head *post_sgl_list,
15096 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
15097 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15098 struct sgl_page_pairs *sgl_pg_pairs;
15100 LPFC_MBOXQ_t *mbox;
15101 uint32_t reqlen, alloclen, pg_pairs;
15103 uint16_t xritag_start = 0;
15105 uint32_t shdr_status, shdr_add_status;
15106 union lpfc_sli4_cfg_shdr *shdr;
15108 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
15109 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
15110 if (reqlen > SLI4_PAGE_SIZE) {
15111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15112 "2559 Block sgl registration required DMA "
15113 "size (%d) great than a page\n", reqlen);
15117 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15121 /* Allocate DMA memory and set up the non-embedded mailbox command */
15122 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15123 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15124 LPFC_SLI4_MBX_NEMBED);
15126 if (alloclen < reqlen) {
15127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15128 "0285 Allocated DMA memory size (%d) is "
15129 "less than the requested DMA memory "
15130 "size (%d)\n", alloclen, reqlen);
15131 lpfc_sli4_mbox_cmd_free(phba, mbox);
15134 /* Set up the SGL pages in the non-embedded DMA pages */
15135 viraddr = mbox->sge_array->addr[0];
15136 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15137 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15140 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
15141 /* Set up the sge entry */
15142 sgl_pg_pairs->sgl_pg0_addr_lo =
15143 cpu_to_le32(putPaddrLow(sglq_entry->phys));
15144 sgl_pg_pairs->sgl_pg0_addr_hi =
15145 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
15146 sgl_pg_pairs->sgl_pg1_addr_lo =
15147 cpu_to_le32(putPaddrLow(0));
15148 sgl_pg_pairs->sgl_pg1_addr_hi =
15149 cpu_to_le32(putPaddrHigh(0));
15151 /* Keep the first xritag on the list */
15153 xritag_start = sglq_entry->sli4_xritag;
15158 /* Complete initialization and perform endian conversion. */
15159 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
15160 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
15161 sgl->word0 = cpu_to_le32(sgl->word0);
15163 if (!phba->sli4_hba.intr_enable)
15164 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15166 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15167 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15169 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15170 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15171 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15172 if (rc != MBX_TIMEOUT)
15173 lpfc_sli4_mbox_cmd_free(phba, mbox);
15174 if (shdr_status || shdr_add_status || rc) {
15175 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15176 "2513 POST_SGL_BLOCK mailbox command failed "
15177 "status x%x add_status x%x mbx status x%x\n",
15178 shdr_status, shdr_add_status, rc);
15185 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
15186 * @phba: pointer to lpfc hba data structure.
15187 * @sblist: pointer to scsi buffer list.
15188 * @count: number of scsi buffers on the list.
15190 * This routine is invoked to post a block of @count scsi sgl pages from a
15191 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
15196 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
15197 struct list_head *sblist,
15200 struct lpfc_scsi_buf *psb;
15201 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15202 struct sgl_page_pairs *sgl_pg_pairs;
15204 LPFC_MBOXQ_t *mbox;
15205 uint32_t reqlen, alloclen, pg_pairs;
15207 uint16_t xritag_start = 0;
15209 uint32_t shdr_status, shdr_add_status;
15210 dma_addr_t pdma_phys_bpl1;
15211 union lpfc_sli4_cfg_shdr *shdr;
15213 /* Calculate the requested length of the dma memory */
15214 reqlen = count * sizeof(struct sgl_page_pairs) +
15215 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
15216 if (reqlen > SLI4_PAGE_SIZE) {
15217 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
15218 "0217 Block sgl registration required DMA "
15219 "size (%d) great than a page\n", reqlen);
15222 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15225 "0283 Failed to allocate mbox cmd memory\n");
15229 /* Allocate DMA memory and set up the non-embedded mailbox command */
15230 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15231 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15232 LPFC_SLI4_MBX_NEMBED);
15234 if (alloclen < reqlen) {
15235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15236 "2561 Allocated DMA memory size (%d) is "
15237 "less than the requested DMA memory "
15238 "size (%d)\n", alloclen, reqlen);
15239 lpfc_sli4_mbox_cmd_free(phba, mbox);
15243 /* Get the first SGE entry from the non-embedded DMA memory */
15244 viraddr = mbox->sge_array->addr[0];
15246 /* Set up the SGL pages in the non-embedded DMA pages */
15247 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
15248 sgl_pg_pairs = &sgl->sgl_pg_pairs;
15251 list_for_each_entry(psb, sblist, list) {
15252 /* Set up the sge entry */
15253 sgl_pg_pairs->sgl_pg0_addr_lo =
15254 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
15255 sgl_pg_pairs->sgl_pg0_addr_hi =
15256 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
15257 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
15258 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
15260 pdma_phys_bpl1 = 0;
15261 sgl_pg_pairs->sgl_pg1_addr_lo =
15262 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
15263 sgl_pg_pairs->sgl_pg1_addr_hi =
15264 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
15265 /* Keep the first xritag on the list */
15267 xritag_start = psb->cur_iocbq.sli4_xritag;
15271 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
15272 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
15273 /* Perform endian conversion if necessary */
15274 sgl->word0 = cpu_to_le32(sgl->word0);
15276 if (!phba->sli4_hba.intr_enable)
15277 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15279 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15280 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15282 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
15283 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15284 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15285 if (rc != MBX_TIMEOUT)
15286 lpfc_sli4_mbox_cmd_free(phba, mbox);
15287 if (shdr_status || shdr_add_status || rc) {
15288 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15289 "2564 POST_SGL_BLOCK mailbox command failed "
15290 "status x%x add_status x%x mbx status x%x\n",
15291 shdr_status, shdr_add_status, rc);
15297 static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
15298 static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
15301 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
15302 * @phba: pointer to lpfc_hba struct that the frame was received on
15303 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15305 * This function checks the fields in the @fc_hdr to see if the FC frame is a
15306 * valid type of frame that the LPFC driver will handle. This function will
15307 * return a zero if the frame is a valid frame or a non zero value when the
15308 * frame does not pass the check.
15311 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
15313 /* make rctl_names static to save stack space */
15314 struct fc_vft_header *fc_vft_hdr;
15315 uint32_t *header = (uint32_t *) fc_hdr;
15317 switch (fc_hdr->fh_r_ctl) {
15318 case FC_RCTL_DD_UNCAT: /* uncategorized information */
15319 case FC_RCTL_DD_SOL_DATA: /* solicited data */
15320 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
15321 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
15322 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
15323 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
15324 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
15325 case FC_RCTL_DD_CMD_STATUS: /* command status */
15326 case FC_RCTL_ELS_REQ: /* extended link services request */
15327 case FC_RCTL_ELS_REP: /* extended link services reply */
15328 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
15329 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
15330 case FC_RCTL_BA_NOP: /* basic link service NOP */
15331 case FC_RCTL_BA_ABTS: /* basic link service abort */
15332 case FC_RCTL_BA_RMC: /* remove connection */
15333 case FC_RCTL_BA_ACC: /* basic accept */
15334 case FC_RCTL_BA_RJT: /* basic reject */
15335 case FC_RCTL_BA_PRMT:
15336 case FC_RCTL_ACK_1: /* acknowledge_1 */
15337 case FC_RCTL_ACK_0: /* acknowledge_0 */
15338 case FC_RCTL_P_RJT: /* port reject */
15339 case FC_RCTL_F_RJT: /* fabric reject */
15340 case FC_RCTL_P_BSY: /* port busy */
15341 case FC_RCTL_F_BSY: /* fabric busy to data frame */
15342 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
15343 case FC_RCTL_LCR: /* link credit reset */
15344 case FC_RCTL_END: /* end */
15346 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
15347 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
15348 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
15349 return lpfc_fc_frame_check(phba, fc_hdr);
15353 switch (fc_hdr->fh_type) {
15366 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
15367 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
15368 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
15369 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
15370 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
15371 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
15372 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
15373 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
15374 be32_to_cpu(header[6]));
15377 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
15378 "2539 Dropped frame rctl:%s type:%s\n",
15379 lpfc_rctl_names[fc_hdr->fh_r_ctl],
15380 lpfc_type_names[fc_hdr->fh_type]);
15385 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
15386 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15388 * This function processes the FC header to retrieve the VFI from the VF
15389 * header, if one exists. This function will return the VFI if one exists
15390 * or 0 if no VSAN Header exists.
15393 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
15395 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
15397 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
15399 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
15403 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
15404 * @phba: Pointer to the HBA structure to search for the vport on
15405 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
15406 * @fcfi: The FC Fabric ID that the frame came from
15408 * This function searches the @phba for a vport that matches the content of the
15409 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
15410 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
15411 * returns the matching vport pointer or NULL if unable to match frame to a
15414 static struct lpfc_vport *
15415 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
15416 uint16_t fcfi, uint32_t did)
15418 struct lpfc_vport **vports;
15419 struct lpfc_vport *vport = NULL;
15422 if (did == Fabric_DID)
15423 return phba->pport;
15424 if ((phba->pport->fc_flag & FC_PT2PT) &&
15425 !(phba->link_state == LPFC_HBA_READY))
15426 return phba->pport;
15428 vports = lpfc_create_vport_work_array(phba);
15429 if (vports != NULL) {
15430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
15431 if (phba->fcf.fcfi == fcfi &&
15432 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
15433 vports[i]->fc_myDID == did) {
15439 lpfc_destroy_vport_work_array(phba, vports);
15444 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
15445 * @vport: The vport to work on.
15447 * This function updates the receive sequence time stamp for this vport. The
15448 * receive sequence time stamp indicates the time that the last frame of the
15449 * the sequence that has been idle for the longest amount of time was received.
15450 * the driver uses this time stamp to indicate if any received sequences have
15454 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
15456 struct lpfc_dmabuf *h_buf;
15457 struct hbq_dmabuf *dmabuf = NULL;
15459 /* get the oldest sequence on the rcv list */
15460 h_buf = list_get_first(&vport->rcv_buffer_list,
15461 struct lpfc_dmabuf, list);
15464 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15465 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
15469 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
15470 * @vport: The vport that the received sequences were sent to.
15472 * This function cleans up all outstanding received sequences. This is called
15473 * by the driver when a link event or user action invalidates all the received
15477 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
15479 struct lpfc_dmabuf *h_buf, *hnext;
15480 struct lpfc_dmabuf *d_buf, *dnext;
15481 struct hbq_dmabuf *dmabuf = NULL;
15483 /* start with the oldest sequence on the rcv list */
15484 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15485 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15486 list_del_init(&dmabuf->hbuf.list);
15487 list_for_each_entry_safe(d_buf, dnext,
15488 &dmabuf->dbuf.list, list) {
15489 list_del_init(&d_buf->list);
15490 lpfc_in_buf_free(vport->phba, d_buf);
15492 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15497 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
15498 * @vport: The vport that the received sequences were sent to.
15500 * This function determines whether any received sequences have timed out by
15501 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
15502 * indicates that there is at least one timed out sequence this routine will
15503 * go through the received sequences one at a time from most inactive to most
15504 * active to determine which ones need to be cleaned up. Once it has determined
15505 * that a sequence needs to be cleaned up it will simply free up the resources
15506 * without sending an abort.
15509 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
15511 struct lpfc_dmabuf *h_buf, *hnext;
15512 struct lpfc_dmabuf *d_buf, *dnext;
15513 struct hbq_dmabuf *dmabuf = NULL;
15514 unsigned long timeout;
15515 int abort_count = 0;
15517 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15518 vport->rcv_buffer_time_stamp);
15519 if (list_empty(&vport->rcv_buffer_list) ||
15520 time_before(jiffies, timeout))
15522 /* start with the oldest sequence on the rcv list */
15523 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15524 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15525 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15526 dmabuf->time_stamp);
15527 if (time_before(jiffies, timeout))
15530 list_del_init(&dmabuf->hbuf.list);
15531 list_for_each_entry_safe(d_buf, dnext,
15532 &dmabuf->dbuf.list, list) {
15533 list_del_init(&d_buf->list);
15534 lpfc_in_buf_free(vport->phba, d_buf);
15536 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15539 lpfc_update_rcv_time_stamp(vport);
15543 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
15544 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
15546 * This function searches through the existing incomplete sequences that have
15547 * been sent to this @vport. If the frame matches one of the incomplete
15548 * sequences then the dbuf in the @dmabuf is added to the list of frames that
15549 * make up that sequence. If no sequence is found that matches this frame then
15550 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
15551 * This function returns a pointer to the first dmabuf in the sequence list that
15552 * the frame was linked to.
15554 static struct hbq_dmabuf *
15555 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15557 struct fc_frame_header *new_hdr;
15558 struct fc_frame_header *temp_hdr;
15559 struct lpfc_dmabuf *d_buf;
15560 struct lpfc_dmabuf *h_buf;
15561 struct hbq_dmabuf *seq_dmabuf = NULL;
15562 struct hbq_dmabuf *temp_dmabuf = NULL;
15565 INIT_LIST_HEAD(&dmabuf->dbuf.list);
15566 dmabuf->time_stamp = jiffies;
15567 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15569 /* Use the hdr_buf to find the sequence that this frame belongs to */
15570 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15571 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15572 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15573 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15574 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15576 /* found a pending sequence that matches this frame */
15577 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15582 * This indicates first frame received for this sequence.
15583 * Queue the buffer on the vport's rcv_buffer_list.
15585 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15586 lpfc_update_rcv_time_stamp(vport);
15589 temp_hdr = seq_dmabuf->hbuf.virt;
15590 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
15591 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15592 list_del_init(&seq_dmabuf->hbuf.list);
15593 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15594 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15595 lpfc_update_rcv_time_stamp(vport);
15598 /* move this sequence to the tail to indicate a young sequence */
15599 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
15600 seq_dmabuf->time_stamp = jiffies;
15601 lpfc_update_rcv_time_stamp(vport);
15602 if (list_empty(&seq_dmabuf->dbuf.list)) {
15603 temp_hdr = dmabuf->hbuf.virt;
15604 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15607 /* find the correct place in the sequence to insert this frame */
15608 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
15610 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15611 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
15613 * If the frame's sequence count is greater than the frame on
15614 * the list then insert the frame right after this frame
15616 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
15617 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15618 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
15623 if (&d_buf->list == &seq_dmabuf->dbuf.list)
15625 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
15634 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
15635 * @vport: pointer to a vitural port
15636 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15638 * This function tries to abort from the partially assembed sequence, described
15639 * by the information from basic abbort @dmabuf. It checks to see whether such
15640 * partially assembled sequence held by the driver. If so, it shall free up all
15641 * the frames from the partially assembled sequence.
15644 * true -- if there is matching partially assembled sequence present and all
15645 * the frames freed with the sequence;
15646 * false -- if there is no matching partially assembled sequence present so
15647 * nothing got aborted in the lower layer driver
15650 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
15651 struct hbq_dmabuf *dmabuf)
15653 struct fc_frame_header *new_hdr;
15654 struct fc_frame_header *temp_hdr;
15655 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
15656 struct hbq_dmabuf *seq_dmabuf = NULL;
15658 /* Use the hdr_buf to find the sequence that matches this frame */
15659 INIT_LIST_HEAD(&dmabuf->dbuf.list);
15660 INIT_LIST_HEAD(&dmabuf->hbuf.list);
15661 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15662 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15663 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15664 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15665 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15666 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15668 /* found a pending sequence that matches this frame */
15669 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15673 /* Free up all the frames from the partially assembled sequence */
15675 list_for_each_entry_safe(d_buf, n_buf,
15676 &seq_dmabuf->dbuf.list, list) {
15677 list_del_init(&d_buf->list);
15678 lpfc_in_buf_free(vport->phba, d_buf);
15686 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
15687 * @vport: pointer to a vitural port
15688 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15690 * This function tries to abort from the assembed sequence from upper level
15691 * protocol, described by the information from basic abbort @dmabuf. It
15692 * checks to see whether such pending context exists at upper level protocol.
15693 * If so, it shall clean up the pending context.
15696 * true -- if there is matching pending context of the sequence cleaned
15698 * false -- if there is no matching pending context of the sequence present
15702 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15704 struct lpfc_hba *phba = vport->phba;
15707 /* Accepting abort at ulp with SLI4 only */
15708 if (phba->sli_rev < LPFC_SLI_REV4)
15711 /* Register all caring upper level protocols to attend abort */
15712 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15720 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
15721 * @phba: Pointer to HBA context object.
15722 * @cmd_iocbq: pointer to the command iocbq structure.
15723 * @rsp_iocbq: pointer to the response iocbq structure.
15725 * This function handles the sequence abort response iocb command complete
15726 * event. It properly releases the memory allocated to the sequence abort
15730 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
15731 struct lpfc_iocbq *cmd_iocbq,
15732 struct lpfc_iocbq *rsp_iocbq)
15734 struct lpfc_nodelist *ndlp;
15737 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15738 lpfc_nlp_put(ndlp);
15739 lpfc_nlp_not_used(ndlp);
15740 lpfc_sli_release_iocbq(phba, cmd_iocbq);
15743 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
15744 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15746 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15747 rsp_iocbq->iocb.ulpStatus,
15748 rsp_iocbq->iocb.un.ulpWord[4]);
15752 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15753 * @phba: Pointer to HBA context object.
15754 * @xri: xri id in transaction.
15756 * This function validates the xri maps to the known range of XRIs allocated an
15757 * used by the driver.
15760 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15765 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15766 if (xri == phba->sli4_hba.xri_ids[i])
15773 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
15774 * @phba: Pointer to HBA context object.
15775 * @fc_hdr: pointer to a FC frame header.
15777 * This function sends a basic response to a previous unsol sequence abort
15778 * event after aborting the sequence handling.
15781 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15782 struct fc_frame_header *fc_hdr, bool aborted)
15784 struct lpfc_hba *phba = vport->phba;
15785 struct lpfc_iocbq *ctiocb = NULL;
15786 struct lpfc_nodelist *ndlp;
15787 uint16_t oxid, rxid, xri, lxri;
15788 uint32_t sid, fctl;
15792 if (!lpfc_is_link_up(phba))
15795 sid = sli4_sid_from_fc_hdr(fc_hdr);
15796 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15797 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
15799 ndlp = lpfc_findnode_did(vport, sid);
15801 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15803 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15804 "1268 Failed to allocate ndlp for "
15805 "oxid:x%x SID:x%x\n", oxid, sid);
15808 lpfc_nlp_init(vport, ndlp, sid);
15809 /* Put ndlp onto pport node list */
15810 lpfc_enqueue_node(vport, ndlp);
15811 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
15812 /* re-setup ndlp without removing from node list */
15813 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15815 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15816 "3275 Failed to active ndlp found "
15817 "for oxid:x%x SID:x%x\n", oxid, sid);
15822 /* Allocate buffer for rsp iocb */
15823 ctiocb = lpfc_sli_get_iocbq(phba);
15827 /* Extract the F_CTL field from FC_HDR */
15828 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15830 icmd = &ctiocb->iocb;
15831 icmd->un.xseq64.bdl.bdeSize = 0;
15832 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
15833 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15834 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15835 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15837 /* Fill in the rest of iocb fields */
15838 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15839 icmd->ulpBdeCount = 0;
15841 icmd->ulpClass = CLASS3;
15842 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
15843 ctiocb->context1 = lpfc_nlp_get(ndlp);
15845 ctiocb->iocb_cmpl = NULL;
15846 ctiocb->vport = phba->pport;
15847 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
15848 ctiocb->sli4_lxritag = NO_XRI;
15849 ctiocb->sli4_xritag = NO_XRI;
15851 if (fctl & FC_FC_EX_CTX)
15852 /* Exchange responder sent the abort so we
15858 lxri = lpfc_sli4_xri_inrange(phba, xri);
15859 if (lxri != NO_XRI)
15860 lpfc_set_rrq_active(phba, ndlp, lxri,
15861 (xri == oxid) ? rxid : oxid, 0);
15862 /* For BA_ABTS from exchange responder, if the logical xri with
15863 * the oxid maps to the FCP XRI range, the port no longer has
15864 * that exchange context, send a BLS_RJT. Override the IOCB for
15867 if ((fctl & FC_FC_EX_CTX) &&
15868 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
15869 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15870 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15871 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15872 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15875 /* If BA_ABTS failed to abort a partially assembled receive sequence,
15876 * the driver no longer has that exchange, send a BLS_RJT. Override
15877 * the IOCB for a BA_RJT.
15879 if (aborted == false) {
15880 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15881 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15882 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15883 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15886 if (fctl & FC_FC_EX_CTX) {
15887 /* ABTS sent by responder to CT exchange, construction
15888 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15889 * field and RX_ID from ABTS for RX_ID field.
15891 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
15893 /* ABTS sent by initiator to CT exchange, construction
15894 * of BA_ACC will need to allocate a new XRI as for the
15897 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
15899 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
15900 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
15902 /* Xmit CT abts response on exchange <xid> */
15903 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15904 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15905 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
15907 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15908 if (rc == IOCB_ERROR) {
15909 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15910 "2925 Failed to issue CT ABTS RSP x%x on "
15911 "xri x%x, Data x%x\n",
15912 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15914 lpfc_nlp_put(ndlp);
15915 ctiocb->context1 = NULL;
15916 lpfc_sli_release_iocbq(phba, ctiocb);
15921 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
15922 * @vport: Pointer to the vport on which this sequence was received
15923 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15925 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
15926 * receive sequence is only partially assembed by the driver, it shall abort
15927 * the partially assembled frames for the sequence. Otherwise, if the
15928 * unsolicited receive sequence has been completely assembled and passed to
15929 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
15930 * unsolicited sequence has been aborted. After that, it will issue a basic
15931 * accept to accept the abort.
15934 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15935 struct hbq_dmabuf *dmabuf)
15937 struct lpfc_hba *phba = vport->phba;
15938 struct fc_frame_header fc_hdr;
15942 /* Make a copy of fc_hdr before the dmabuf being released */
15943 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
15944 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
15946 if (fctl & FC_FC_EX_CTX) {
15947 /* ABTS by responder to exchange, no cleanup needed */
15950 /* ABTS by initiator to exchange, need to do cleanup */
15951 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
15952 if (aborted == false)
15953 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
15955 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15957 /* Respond with BA_ACC or BA_RJT accordingly */
15958 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
15962 * lpfc_seq_complete - Indicates if a sequence is complete
15963 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15965 * This function checks the sequence, starting with the frame described by
15966 * @dmabuf, to see if all the frames associated with this sequence are present.
15967 * the frames associated with this sequence are linked to the @dmabuf using the
15968 * dbuf list. This function looks for two major things. 1) That the first frame
15969 * has a sequence count of zero. 2) There is a frame with last frame of sequence
15970 * set. 3) That there are no holes in the sequence count. The function will
15971 * return 1 when the sequence is complete, otherwise it will return 0.
15974 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
15976 struct fc_frame_header *hdr;
15977 struct lpfc_dmabuf *d_buf;
15978 struct hbq_dmabuf *seq_dmabuf;
15982 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15983 /* make sure first fame of sequence has a sequence count of zero */
15984 if (hdr->fh_seq_cnt != seq_count)
15986 fctl = (hdr->fh_f_ctl[0] << 16 |
15987 hdr->fh_f_ctl[1] << 8 |
15989 /* If last frame of sequence we can return success. */
15990 if (fctl & FC_FC_END_SEQ)
15992 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
15993 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15994 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15995 /* If there is a hole in the sequence count then fail. */
15996 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
15998 fctl = (hdr->fh_f_ctl[0] << 16 |
15999 hdr->fh_f_ctl[1] << 8 |
16001 /* If last frame of sequence we can return success. */
16002 if (fctl & FC_FC_END_SEQ)
16009 * lpfc_prep_seq - Prep sequence for ULP processing
16010 * @vport: Pointer to the vport on which this sequence was received
16011 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16013 * This function takes a sequence, described by a list of frames, and creates
16014 * a list of iocbq structures to describe the sequence. This iocbq list will be
16015 * used to issue to the generic unsolicited sequence handler. This routine
16016 * returns a pointer to the first iocbq in the list. If the function is unable
16017 * to allocate an iocbq then it throw out the received frames that were not
16018 * able to be described and return a pointer to the first iocbq. If unable to
16019 * allocate any iocbqs (including the first) this function will return NULL.
16021 static struct lpfc_iocbq *
16022 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16024 struct hbq_dmabuf *hbq_buf;
16025 struct lpfc_dmabuf *d_buf, *n_buf;
16026 struct lpfc_iocbq *first_iocbq, *iocbq;
16027 struct fc_frame_header *fc_hdr;
16029 uint32_t len, tot_len;
16030 struct ulp_bde64 *pbde;
16032 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16033 /* remove from receive buffer list */
16034 list_del_init(&seq_dmabuf->hbuf.list);
16035 lpfc_update_rcv_time_stamp(vport);
16036 /* get the Remote Port's SID */
16037 sid = sli4_sid_from_fc_hdr(fc_hdr);
16039 /* Get an iocbq struct to fill in. */
16040 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16042 /* Initialize the first IOCB. */
16043 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
16044 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
16045 first_iocbq->vport = vport;
16047 /* Check FC Header to see what TYPE of frame we are rcv'ing */
16048 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16049 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16050 first_iocbq->iocb.un.rcvels.parmRo =
16051 sli4_did_from_fc_hdr(fc_hdr);
16052 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16054 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
16055 first_iocbq->iocb.ulpContext = NO_XRI;
16056 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16057 be16_to_cpu(fc_hdr->fh_ox_id);
16058 /* iocbq is prepped for internal consumption. Physical vpi. */
16059 first_iocbq->iocb.unsli3.rcvsli3.vpi =
16060 vport->phba->vpi_ids[vport->vpi];
16061 /* put the first buffer into the first IOCBq */
16062 tot_len = bf_get(lpfc_rcqe_length,
16063 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16065 first_iocbq->context2 = &seq_dmabuf->dbuf;
16066 first_iocbq->context3 = NULL;
16067 first_iocbq->iocb.ulpBdeCount = 1;
16068 if (tot_len > LPFC_DATA_BUF_SIZE)
16069 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16070 LPFC_DATA_BUF_SIZE;
16072 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16074 first_iocbq->iocb.un.rcvels.remoteID = sid;
16076 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16078 iocbq = first_iocbq;
16080 * Each IOCBq can have two Buffers assigned, so go through the list
16081 * of buffers for this sequence and save two buffers in each IOCBq
16083 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16085 lpfc_in_buf_free(vport->phba, d_buf);
16088 if (!iocbq->context3) {
16089 iocbq->context3 = d_buf;
16090 iocbq->iocb.ulpBdeCount++;
16091 /* We need to get the size out of the right CQE */
16092 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16093 len = bf_get(lpfc_rcqe_length,
16094 &hbq_buf->cq_event.cqe.rcqe_cmpl);
16095 pbde = (struct ulp_bde64 *)
16096 &iocbq->iocb.unsli3.sli3Words[4];
16097 if (len > LPFC_DATA_BUF_SIZE)
16098 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
16100 pbde->tus.f.bdeSize = len;
16102 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
16105 iocbq = lpfc_sli_get_iocbq(vport->phba);
16108 first_iocbq->iocb.ulpStatus =
16109 IOSTAT_FCP_RSP_ERROR;
16110 first_iocbq->iocb.un.ulpWord[4] =
16111 IOERR_NO_RESOURCES;
16113 lpfc_in_buf_free(vport->phba, d_buf);
16116 /* We need to get the size out of the right CQE */
16117 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16118 len = bf_get(lpfc_rcqe_length,
16119 &hbq_buf->cq_event.cqe.rcqe_cmpl);
16120 iocbq->context2 = d_buf;
16121 iocbq->context3 = NULL;
16122 iocbq->iocb.ulpBdeCount = 1;
16123 if (len > LPFC_DATA_BUF_SIZE)
16124 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16125 LPFC_DATA_BUF_SIZE;
16127 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
16130 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16132 iocbq->iocb.un.rcvels.remoteID = sid;
16133 list_add_tail(&iocbq->list, &first_iocbq->list);
16136 return first_iocbq;
16140 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16141 struct hbq_dmabuf *seq_dmabuf)
16143 struct fc_frame_header *fc_hdr;
16144 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
16145 struct lpfc_hba *phba = vport->phba;
16147 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16148 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
16150 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16151 "2707 Ring %d handler: Failed to allocate "
16152 "iocb Rctl x%x Type x%x received\n",
16154 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16157 if (!lpfc_complete_unsol_iocb(phba,
16158 phba->sli4_hba.els_wq->pring,
16159 iocbq, fc_hdr->fh_r_ctl,
16161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16162 "2540 Ring %d handler: unexpected Rctl "
16163 "x%x Type x%x received\n",
16165 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16167 /* Free iocb created in lpfc_prep_seq */
16168 list_for_each_entry_safe(curr_iocb, next_iocb,
16169 &iocbq->list, list) {
16170 list_del_init(&curr_iocb->list);
16171 lpfc_sli_release_iocbq(phba, curr_iocb);
16173 lpfc_sli_release_iocbq(phba, iocbq);
16177 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16178 * @phba: Pointer to HBA context object.
16180 * This function is called with no lock held. This function processes all
16181 * the received buffers and gives it to upper layers when a received buffer
16182 * indicates that it is the final frame in the sequence. The interrupt
16183 * service routine processes received buffers at interrupt contexts.
16184 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
16185 * appropriate receive function when the final frame in a sequence is received.
16188 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16189 struct hbq_dmabuf *dmabuf)
16191 struct hbq_dmabuf *seq_dmabuf;
16192 struct fc_frame_header *fc_hdr;
16193 struct lpfc_vport *vport;
16197 /* Process each received buffer */
16198 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16200 /* check to see if this a valid type of frame */
16201 if (lpfc_fc_frame_check(phba, fc_hdr)) {
16202 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16206 if ((bf_get(lpfc_cqe_code,
16207 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
16208 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
16209 &dmabuf->cq_event.cqe.rcqe_cmpl);
16211 fcfi = bf_get(lpfc_rcqe_fcf_id,
16212 &dmabuf->cq_event.cqe.rcqe_cmpl);
16214 /* d_id this frame is directed to */
16215 did = sli4_did_from_fc_hdr(fc_hdr);
16217 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
16219 /* throw out the frame */
16220 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16224 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
16225 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
16226 (did != Fabric_DID)) {
16228 * Throw out the frame if we are not pt2pt.
16229 * The pt2pt protocol allows for discovery frames
16230 * to be received without a registered VPI.
16232 if (!(vport->fc_flag & FC_PT2PT) ||
16233 (phba->link_state == LPFC_HBA_READY)) {
16234 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16239 /* Handle the basic abort sequence (BA_ABTS) event */
16240 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
16241 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
16245 /* Link this frame */
16246 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
16248 /* unable to add frame to vport - throw it out */
16249 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16252 /* If not last frame in sequence continue processing frames. */
16253 if (!lpfc_seq_complete(seq_dmabuf))
16256 /* Send the complete sequence to the upper layer protocol */
16257 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
16261 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
16262 * @phba: pointer to lpfc hba data structure.
16264 * This routine is invoked to post rpi header templates to the
16265 * HBA consistent with the SLI-4 interface spec. This routine
16266 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16267 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16269 * This routine does not require any locks. It's usage is expected
16270 * to be driver load or reset recovery when the driver is
16275 * -EIO - The mailbox failed to complete successfully.
16276 * When this error occurs, the driver is not guaranteed
16277 * to have any rpi regions posted to the device and
16278 * must either attempt to repost the regions or take a
16282 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
16284 struct lpfc_rpi_hdr *rpi_page;
16288 /* SLI4 ports that support extents do not require RPI headers. */
16289 if (!phba->sli4_hba.rpi_hdrs_in_use)
16291 if (phba->sli4_hba.extents_in_use)
16294 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
16296 * Assign the rpi headers a physical rpi only if the driver
16297 * has not initialized those resources. A port reset only
16298 * needs the headers posted.
16300 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
16302 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
16304 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
16305 if (rc != MBX_SUCCESS) {
16306 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16307 "2008 Error %d posting all rpi "
16315 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
16316 LPFC_RPI_RSRC_RDY);
16321 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
16322 * @phba: pointer to lpfc hba data structure.
16323 * @rpi_page: pointer to the rpi memory region.
16325 * This routine is invoked to post a single rpi header to the
16326 * HBA consistent with the SLI-4 interface spec. This memory region
16327 * maps up to 64 rpi context regions.
16331 * -ENOMEM - No available memory
16332 * -EIO - The mailbox failed to complete successfully.
16335 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
16337 LPFC_MBOXQ_t *mboxq;
16338 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
16340 uint32_t shdr_status, shdr_add_status;
16341 union lpfc_sli4_cfg_shdr *shdr;
16343 /* SLI4 ports that support extents do not require RPI headers. */
16344 if (!phba->sli4_hba.rpi_hdrs_in_use)
16346 if (phba->sli4_hba.extents_in_use)
16349 /* The port is notified of the header region via a mailbox command. */
16350 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16353 "2001 Unable to allocate memory for issuing "
16354 "SLI_CONFIG_SPECIAL mailbox command\n");
16358 /* Post all rpi memory regions to the port. */
16359 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
16360 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16361 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
16362 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
16363 sizeof(struct lpfc_sli4_cfg_mhdr),
16364 LPFC_SLI4_MBX_EMBED);
16367 /* Post the physical rpi to the port for this rpi header. */
16368 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
16369 rpi_page->start_rpi);
16370 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
16371 hdr_tmpl, rpi_page->page_count);
16373 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
16374 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
16375 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
16376 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
16377 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16378 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16379 if (rc != MBX_TIMEOUT)
16380 mempool_free(mboxq, phba->mbox_mem_pool);
16381 if (shdr_status || shdr_add_status || rc) {
16382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16383 "2514 POST_RPI_HDR mailbox failed with "
16384 "status x%x add_status x%x, mbx status x%x\n",
16385 shdr_status, shdr_add_status, rc);
16392 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
16393 * @phba: pointer to lpfc hba data structure.
16395 * This routine is invoked to post rpi header templates to the
16396 * HBA consistent with the SLI-4 interface spec. This routine
16397 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16398 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16401 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16402 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16405 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
16408 uint16_t max_rpi, rpi_limit;
16409 uint16_t rpi_remaining, lrpi = 0;
16410 struct lpfc_rpi_hdr *rpi_hdr;
16411 unsigned long iflag;
16414 * Fetch the next logical rpi. Because this index is logical,
16415 * the driver starts at 0 each time.
16417 spin_lock_irqsave(&phba->hbalock, iflag);
16418 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
16419 rpi_limit = phba->sli4_hba.next_rpi;
16421 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
16422 if (rpi >= rpi_limit)
16423 rpi = LPFC_RPI_ALLOC_ERROR;
16425 set_bit(rpi, phba->sli4_hba.rpi_bmask);
16426 phba->sli4_hba.max_cfg_param.rpi_used++;
16427 phba->sli4_hba.rpi_count++;
16429 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
16430 "0001 rpi:%x max:%x lim:%x\n",
16431 (int) rpi, max_rpi, rpi_limit);
16434 * Don't try to allocate more rpi header regions if the device limit
16435 * has been exhausted.
16437 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
16438 (phba->sli4_hba.rpi_count >= max_rpi)) {
16439 spin_unlock_irqrestore(&phba->hbalock, iflag);
16444 * RPI header postings are not required for SLI4 ports capable of
16447 if (!phba->sli4_hba.rpi_hdrs_in_use) {
16448 spin_unlock_irqrestore(&phba->hbalock, iflag);
16453 * If the driver is running low on rpi resources, allocate another
16454 * page now. Note that the next_rpi value is used because
16455 * it represents how many are actually in use whereas max_rpi notes
16456 * how many are supported max by the device.
16458 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
16459 spin_unlock_irqrestore(&phba->hbalock, iflag);
16460 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
16461 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
16463 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16464 "2002 Error Could not grow rpi "
16467 lrpi = rpi_hdr->start_rpi;
16468 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
16469 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
16477 * lpfc_sli4_free_rpi - Release an rpi for reuse.
16478 * @phba: pointer to lpfc hba data structure.
16480 * This routine is invoked to release an rpi to the pool of
16481 * available rpis maintained by the driver.
16484 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16486 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
16487 phba->sli4_hba.rpi_count--;
16488 phba->sli4_hba.max_cfg_param.rpi_used--;
16493 * lpfc_sli4_free_rpi - Release an rpi for reuse.
16494 * @phba: pointer to lpfc hba data structure.
16496 * This routine is invoked to release an rpi to the pool of
16497 * available rpis maintained by the driver.
16500 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16502 spin_lock_irq(&phba->hbalock);
16503 __lpfc_sli4_free_rpi(phba, rpi);
16504 spin_unlock_irq(&phba->hbalock);
16508 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
16509 * @phba: pointer to lpfc hba data structure.
16511 * This routine is invoked to remove the memory region that
16512 * provided rpi via a bitmask.
16515 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
16517 kfree(phba->sli4_hba.rpi_bmask);
16518 kfree(phba->sli4_hba.rpi_ids);
16519 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
16523 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
16524 * @phba: pointer to lpfc hba data structure.
16526 * This routine is invoked to remove the memory region that
16527 * provided rpi via a bitmask.
16530 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
16531 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
16533 LPFC_MBOXQ_t *mboxq;
16534 struct lpfc_hba *phba = ndlp->phba;
16537 /* The port is notified of the header region via a mailbox command. */
16538 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16542 /* Post all rpi memory regions to the port. */
16543 lpfc_resume_rpi(mboxq, ndlp);
16545 mboxq->mbox_cmpl = cmpl;
16546 mboxq->context1 = arg;
16547 mboxq->context2 = ndlp;
16549 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16550 mboxq->vport = ndlp->vport;
16551 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16552 if (rc == MBX_NOT_FINISHED) {
16553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16554 "2010 Resume RPI Mailbox failed "
16555 "status %d, mbxStatus x%x\n", rc,
16556 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16557 mempool_free(mboxq, phba->mbox_mem_pool);
16564 * lpfc_sli4_init_vpi - Initialize a vpi with the port
16565 * @vport: Pointer to the vport for which the vpi is being initialized
16567 * This routine is invoked to activate a vpi with the port.
16571 * -Evalue otherwise
16574 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
16576 LPFC_MBOXQ_t *mboxq;
16578 int retval = MBX_SUCCESS;
16580 struct lpfc_hba *phba = vport->phba;
16581 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16584 lpfc_init_vpi(phba, mboxq, vport->vpi);
16585 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
16586 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
16587 if (rc != MBX_SUCCESS) {
16588 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
16589 "2022 INIT VPI Mailbox failed "
16590 "status %d, mbxStatus x%x\n", rc,
16591 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16594 if (rc != MBX_TIMEOUT)
16595 mempool_free(mboxq, vport->phba->mbox_mem_pool);
16601 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
16602 * @phba: pointer to lpfc hba data structure.
16603 * @mboxq: Pointer to mailbox object.
16605 * This routine is invoked to manually add a single FCF record. The caller
16606 * must pass a completely initialized FCF_Record. This routine takes
16607 * care of the nonembedded mailbox operations.
16610 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
16613 union lpfc_sli4_cfg_shdr *shdr;
16614 uint32_t shdr_status, shdr_add_status;
16616 virt_addr = mboxq->sge_array->addr[0];
16617 /* The IOCTL status is embedded in the mailbox subheader. */
16618 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
16619 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16620 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16622 if ((shdr_status || shdr_add_status) &&
16623 (shdr_status != STATUS_FCF_IN_USE))
16624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16625 "2558 ADD_FCF_RECORD mailbox failed with "
16626 "status x%x add_status x%x\n",
16627 shdr_status, shdr_add_status);
16629 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16633 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
16634 * @phba: pointer to lpfc hba data structure.
16635 * @fcf_record: pointer to the initialized fcf record to add.
16637 * This routine is invoked to manually add a single FCF record. The caller
16638 * must pass a completely initialized FCF_Record. This routine takes
16639 * care of the nonembedded mailbox operations.
16642 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
16645 LPFC_MBOXQ_t *mboxq;
16648 struct lpfc_mbx_sge sge;
16649 uint32_t alloc_len, req_len;
16652 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16655 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
16659 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
16662 /* Allocate DMA memory and set up the non-embedded mailbox command */
16663 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16664 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
16665 req_len, LPFC_SLI4_MBX_NEMBED);
16666 if (alloc_len < req_len) {
16667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16668 "2523 Allocated DMA memory size (x%x) is "
16669 "less than the requested DMA memory "
16670 "size (x%x)\n", alloc_len, req_len);
16671 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16676 * Get the first SGE entry from the non-embedded DMA memory. This
16677 * routine only uses a single SGE.
16679 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
16680 virt_addr = mboxq->sge_array->addr[0];
16682 * Configure the FCF record for FCFI 0. This is the driver's
16683 * hardcoded default and gets used in nonFIP mode.
16685 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16686 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16687 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16690 * Copy the fcf_index and the FCF Record Data. The data starts after
16691 * the FCoE header plus word10. The data copy needs to be endian
16694 bytep += sizeof(uint32_t);
16695 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16696 mboxq->vport = phba->pport;
16697 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16698 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16699 if (rc == MBX_NOT_FINISHED) {
16700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16701 "2515 ADD_FCF_RECORD mailbox failed with "
16702 "status 0x%x\n", rc);
16703 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16712 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
16713 * @phba: pointer to lpfc hba data structure.
16714 * @fcf_record: pointer to the fcf record to write the default data.
16715 * @fcf_index: FCF table entry index.
16717 * This routine is invoked to build the driver's default FCF record. The
16718 * values used are hardcoded. This routine handles memory initialization.
16722 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16723 struct fcf_record *fcf_record,
16724 uint16_t fcf_index)
16726 memset(fcf_record, 0, sizeof(struct fcf_record));
16727 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16728 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16729 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16730 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16731 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16732 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16733 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16734 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16735 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16736 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16737 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16738 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16739 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
16740 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
16741 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16742 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16743 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16744 /* Set the VLAN bit map */
16745 if (phba->valid_vlan) {
16746 fcf_record->vlan_bitmap[phba->vlan_id / 8]
16747 = 1 << (phba->vlan_id % 8);
16752 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
16753 * @phba: pointer to lpfc hba data structure.
16754 * @fcf_index: FCF table entry offset.
16756 * This routine is invoked to scan the entire FCF table by reading FCF
16757 * record and processing it one at a time starting from the @fcf_index
16758 * for initial FCF discovery or fast FCF failover rediscovery.
16760 * Return 0 if the mailbox command is submitted successfully, none 0
16764 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16767 LPFC_MBOXQ_t *mboxq;
16769 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
16770 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
16771 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16774 "2000 Failed to allocate mbox for "
16777 goto fail_fcf_scan;
16779 /* Construct the read FCF record mailbox command */
16780 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16783 goto fail_fcf_scan;
16785 /* Issue the mailbox command asynchronously */
16786 mboxq->vport = phba->pport;
16787 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
16789 spin_lock_irq(&phba->hbalock);
16790 phba->hba_flag |= FCF_TS_INPROG;
16791 spin_unlock_irq(&phba->hbalock);
16793 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16794 if (rc == MBX_NOT_FINISHED)
16797 /* Reset eligible FCF count for new scan */
16798 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
16799 phba->fcf.eligible_fcf_cnt = 0;
16805 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16806 /* FCF scan failed, clear FCF_TS_INPROG flag */
16807 spin_lock_irq(&phba->hbalock);
16808 phba->hba_flag &= ~FCF_TS_INPROG;
16809 spin_unlock_irq(&phba->hbalock);
16815 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
16816 * @phba: pointer to lpfc hba data structure.
16817 * @fcf_index: FCF table entry offset.
16819 * This routine is invoked to read an FCF record indicated by @fcf_index
16820 * and to use it for FLOGI roundrobin FCF failover.
16822 * Return 0 if the mailbox command is submitted successfully, none 0
16826 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16829 LPFC_MBOXQ_t *mboxq;
16831 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16833 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16834 "2763 Failed to allocate mbox for "
16837 goto fail_fcf_read;
16839 /* Construct the read FCF record mailbox command */
16840 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16843 goto fail_fcf_read;
16845 /* Issue the mailbox command asynchronously */
16846 mboxq->vport = phba->pport;
16847 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16848 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16849 if (rc == MBX_NOT_FINISHED)
16855 if (error && mboxq)
16856 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16861 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16862 * @phba: pointer to lpfc hba data structure.
16863 * @fcf_index: FCF table entry offset.
16865 * This routine is invoked to read an FCF record indicated by @fcf_index to
16866 * determine whether it's eligible for FLOGI roundrobin failover list.
16868 * Return 0 if the mailbox command is submitted successfully, none 0
16872 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16875 LPFC_MBOXQ_t *mboxq;
16877 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16879 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16880 "2758 Failed to allocate mbox for "
16883 goto fail_fcf_read;
16885 /* Construct the read FCF record mailbox command */
16886 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16889 goto fail_fcf_read;
16891 /* Issue the mailbox command asynchronously */
16892 mboxq->vport = phba->pport;
16893 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16894 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16895 if (rc == MBX_NOT_FINISHED)
16901 if (error && mboxq)
16902 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16907 * lpfc_check_next_fcf_pri_level
16908 * phba pointer to the lpfc_hba struct for this port.
16909 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16910 * routine when the rr_bmask is empty. The FCF indecies are put into the
16911 * rr_bmask based on their priority level. Starting from the highest priority
16912 * to the lowest. The most likely FCF candidate will be in the highest
16913 * priority group. When this routine is called it searches the fcf_pri list for
16914 * next lowest priority group and repopulates the rr_bmask with only those
16917 * 1=success 0=failure
16920 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16922 uint16_t next_fcf_pri;
16923 uint16_t last_index;
16924 struct lpfc_fcf_pri *fcf_pri;
16928 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
16929 LPFC_SLI4_FCF_TBL_INDX_MAX);
16930 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16931 "3060 Last IDX %d\n", last_index);
16933 /* Verify the priority list has 2 or more entries */
16934 spin_lock_irq(&phba->hbalock);
16935 if (list_empty(&phba->fcf.fcf_pri_list) ||
16936 list_is_singular(&phba->fcf.fcf_pri_list)) {
16937 spin_unlock_irq(&phba->hbalock);
16938 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16939 "3061 Last IDX %d\n", last_index);
16940 return 0; /* Empty rr list */
16942 spin_unlock_irq(&phba->hbalock);
16946 * Clear the rr_bmask and set all of the bits that are at this
16949 memset(phba->fcf.fcf_rr_bmask, 0,
16950 sizeof(*phba->fcf.fcf_rr_bmask));
16951 spin_lock_irq(&phba->hbalock);
16952 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16953 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
16956 * the 1st priority that has not FLOGI failed
16957 * will be the highest.
16960 next_fcf_pri = fcf_pri->fcf_rec.priority;
16961 spin_unlock_irq(&phba->hbalock);
16962 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16963 rc = lpfc_sli4_fcf_rr_index_set(phba,
16964 fcf_pri->fcf_rec.fcf_index);
16968 spin_lock_irq(&phba->hbalock);
16971 * if next_fcf_pri was not set above and the list is not empty then
16972 * we have failed flogis on all of them. So reset flogi failed
16973 * and start at the beginning.
16975 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
16976 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16977 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
16979 * the 1st priority that has not FLOGI failed
16980 * will be the highest.
16983 next_fcf_pri = fcf_pri->fcf_rec.priority;
16984 spin_unlock_irq(&phba->hbalock);
16985 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16986 rc = lpfc_sli4_fcf_rr_index_set(phba,
16987 fcf_pri->fcf_rec.fcf_index);
16991 spin_lock_irq(&phba->hbalock);
16995 spin_unlock_irq(&phba->hbalock);
17000 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
17001 * @phba: pointer to lpfc hba data structure.
17003 * This routine is to get the next eligible FCF record index in a round
17004 * robin fashion. If the next eligible FCF record index equals to the
17005 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
17006 * shall be returned, otherwise, the next eligible FCF record's index
17007 * shall be returned.
17010 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
17012 uint16_t next_fcf_index;
17015 /* Search start from next bit of currently registered FCF index */
17016 next_fcf_index = phba->fcf.current_rec.fcf_indx;
17019 /* Determine the next fcf index to check */
17020 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
17021 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17022 LPFC_SLI4_FCF_TBL_INDX_MAX,
17025 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
17026 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17028 * If we have wrapped then we need to clear the bits that
17029 * have been tested so that we can detect when we should
17030 * change the priority level.
17032 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
17033 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
17037 /* Check roundrobin failover list empty condition */
17038 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
17039 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
17041 * If next fcf index is not found check if there are lower
17042 * Priority level fcf's in the fcf_priority list.
17043 * Set up the rr_bmask with all of the avaiable fcf bits
17044 * at that level and continue the selection process.
17046 if (lpfc_check_next_fcf_pri_level(phba))
17047 goto initial_priority;
17048 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17049 "2844 No roundrobin failover FCF available\n");
17050 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
17051 return LPFC_FCOE_FCF_NEXT_NONE;
17053 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
17054 "3063 Only FCF available idx %d, flag %x\n",
17056 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
17057 return next_fcf_index;
17061 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
17062 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
17063 LPFC_FCF_FLOGI_FAILED) {
17064 if (list_is_singular(&phba->fcf.fcf_pri_list))
17065 return LPFC_FCOE_FCF_NEXT_NONE;
17067 goto next_priority;
17070 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17071 "2845 Get next roundrobin failover FCF (x%x)\n",
17074 return next_fcf_index;
17078 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
17079 * @phba: pointer to lpfc hba data structure.
17081 * This routine sets the FCF record index in to the eligible bmask for
17082 * roundrobin failover search. It checks to make sure that the index
17083 * does not go beyond the range of the driver allocated bmask dimension
17084 * before setting the bit.
17086 * Returns 0 if the index bit successfully set, otherwise, it returns
17090 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
17092 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17093 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17094 "2610 FCF (x%x) reached driver's book "
17095 "keeping dimension:x%x\n",
17096 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17099 /* Set the eligible FCF record index bmask */
17100 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
17102 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17103 "2790 Set FCF (x%x) to roundrobin FCF failover "
17104 "bmask\n", fcf_index);
17110 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
17111 * @phba: pointer to lpfc hba data structure.
17113 * This routine clears the FCF record index from the eligible bmask for
17114 * roundrobin failover search. It checks to make sure that the index
17115 * does not go beyond the range of the driver allocated bmask dimension
17116 * before clearing the bit.
17119 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
17121 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
17122 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
17123 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17124 "2762 FCF (x%x) reached driver's book "
17125 "keeping dimension:x%x\n",
17126 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
17129 /* Clear the eligible FCF record index bmask */
17130 spin_lock_irq(&phba->hbalock);
17131 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
17133 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
17134 list_del_init(&fcf_pri->list);
17138 spin_unlock_irq(&phba->hbalock);
17139 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
17141 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17142 "2791 Clear FCF (x%x) from roundrobin failover "
17143 "bmask\n", fcf_index);
17147 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
17148 * @phba: pointer to lpfc hba data structure.
17150 * This routine is the completion routine for the rediscover FCF table mailbox
17151 * command. If the mailbox command returned failure, it will try to stop the
17152 * FCF rediscover wait timer.
17155 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
17157 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17158 uint32_t shdr_status, shdr_add_status;
17160 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17162 shdr_status = bf_get(lpfc_mbox_hdr_status,
17163 &redisc_fcf->header.cfg_shdr.response);
17164 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
17165 &redisc_fcf->header.cfg_shdr.response);
17166 if (shdr_status || shdr_add_status) {
17167 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17168 "2746 Requesting for FCF rediscovery failed "
17169 "status x%x add_status x%x\n",
17170 shdr_status, shdr_add_status);
17171 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
17172 spin_lock_irq(&phba->hbalock);
17173 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
17174 spin_unlock_irq(&phba->hbalock);
17176 * CVL event triggered FCF rediscover request failed,
17177 * last resort to re-try current registered FCF entry.
17179 lpfc_retry_pport_discovery(phba);
17181 spin_lock_irq(&phba->hbalock);
17182 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
17183 spin_unlock_irq(&phba->hbalock);
17185 * DEAD FCF event triggered FCF rediscover request
17186 * failed, last resort to fail over as a link down
17187 * to FCF registration.
17189 lpfc_sli4_fcf_dead_failthrough(phba);
17192 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17193 "2775 Start FCF rediscover quiescent timer\n");
17195 * Start FCF rediscovery wait timer for pending FCF
17196 * before rescan FCF record table.
17198 lpfc_fcf_redisc_wait_start_timer(phba);
17201 mempool_free(mbox, phba->mbox_mem_pool);
17205 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
17206 * @phba: pointer to lpfc hba data structure.
17208 * This routine is invoked to request for rediscovery of the entire FCF table
17212 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
17214 LPFC_MBOXQ_t *mbox;
17215 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
17218 /* Cancel retry delay timers to all vports before FCF rediscover */
17219 lpfc_cancel_all_vport_retry_delay_timer(phba);
17221 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17224 "2745 Failed to allocate mbox for "
17225 "requesting FCF rediscover.\n");
17229 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
17230 sizeof(struct lpfc_sli4_cfg_mhdr));
17231 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17232 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
17233 length, LPFC_SLI4_MBX_EMBED);
17235 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
17236 /* Set count to 0 for invalidating the entire FCF database */
17237 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
17239 /* Issue the mailbox command asynchronously */
17240 mbox->vport = phba->pport;
17241 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
17242 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
17244 if (rc == MBX_NOT_FINISHED) {
17245 mempool_free(mbox, phba->mbox_mem_pool);
17252 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
17253 * @phba: pointer to lpfc hba data structure.
17255 * This function is the failover routine as a last resort to the FCF DEAD
17256 * event when driver failed to perform fast FCF failover.
17259 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
17261 uint32_t link_state;
17264 * Last resort as FCF DEAD event failover will treat this as
17265 * a link down, but save the link state because we don't want
17266 * it to be changed to Link Down unless it is already down.
17268 link_state = phba->link_state;
17269 lpfc_linkdown(phba);
17270 phba->link_state = link_state;
17272 /* Unregister FCF if no devices connected to it */
17273 lpfc_unregister_unused_fcf(phba);
17277 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
17278 * @phba: pointer to lpfc hba data structure.
17279 * @rgn23_data: pointer to configure region 23 data.
17281 * This function gets SLI3 port configure region 23 data through memory dump
17282 * mailbox command. When it successfully retrieves data, the size of the data
17283 * will be returned, otherwise, 0 will be returned.
17286 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
17288 LPFC_MBOXQ_t *pmb = NULL;
17290 uint32_t offset = 0;
17296 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17299 "2600 failed to allocate mailbox memory\n");
17305 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
17306 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
17308 if (rc != MBX_SUCCESS) {
17309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17310 "2601 failed to read config "
17311 "region 23, rc 0x%x Status 0x%x\n",
17312 rc, mb->mbxStatus);
17313 mb->un.varDmp.word_cnt = 0;
17316 * dump mem may return a zero when finished or we got a
17317 * mailbox error, either way we are done.
17319 if (mb->un.varDmp.word_cnt == 0)
17321 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
17322 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
17324 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
17325 rgn23_data + offset,
17326 mb->un.varDmp.word_cnt);
17327 offset += mb->un.varDmp.word_cnt;
17328 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
17330 mempool_free(pmb, phba->mbox_mem_pool);
17335 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
17336 * @phba: pointer to lpfc hba data structure.
17337 * @rgn23_data: pointer to configure region 23 data.
17339 * This function gets SLI4 port configure region 23 data through memory dump
17340 * mailbox command. When it successfully retrieves data, the size of the data
17341 * will be returned, otherwise, 0 will be returned.
17344 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
17346 LPFC_MBOXQ_t *mboxq = NULL;
17347 struct lpfc_dmabuf *mp = NULL;
17348 struct lpfc_mqe *mqe;
17349 uint32_t data_length = 0;
17355 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17358 "3105 failed to allocate mailbox memory\n");
17362 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
17364 mqe = &mboxq->u.mqe;
17365 mp = (struct lpfc_dmabuf *) mboxq->context1;
17366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17369 data_length = mqe->un.mb_words[5];
17370 if (data_length == 0)
17372 if (data_length > DMP_RGN23_SIZE) {
17376 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
17378 mempool_free(mboxq, phba->mbox_mem_pool);
17380 lpfc_mbuf_free(phba, mp->virt, mp->phys);
17383 return data_length;
17387 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
17388 * @phba: pointer to lpfc hba data structure.
17390 * This function read region 23 and parse TLV for port status to
17391 * decide if the user disaled the port. If the TLV indicates the
17392 * port is disabled, the hba_flag is set accordingly.
17395 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
17397 uint8_t *rgn23_data = NULL;
17398 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
17399 uint32_t offset = 0;
17401 /* Get adapter Region 23 data */
17402 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
17406 if (phba->sli_rev < LPFC_SLI_REV4)
17407 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
17409 if_type = bf_get(lpfc_sli_intf_if_type,
17410 &phba->sli4_hba.sli_intf);
17411 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
17413 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
17419 /* Check the region signature first */
17420 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
17421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17422 "2619 Config region 23 has bad signature\n");
17427 /* Check the data structure version */
17428 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
17429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17430 "2620 Config region 23 has bad version\n");
17435 /* Parse TLV entries in the region */
17436 while (offset < data_size) {
17437 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
17440 * If the TLV is not driver specific TLV or driver id is
17441 * not linux driver id, skip the record.
17443 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
17444 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
17445 (rgn23_data[offset + 3] != 0)) {
17446 offset += rgn23_data[offset + 1] * 4 + 4;
17450 /* Driver found a driver specific TLV in the config region */
17451 sub_tlv_len = rgn23_data[offset + 1] * 4;
17456 * Search for configured port state sub-TLV.
17458 while ((offset < data_size) &&
17459 (tlv_offset < sub_tlv_len)) {
17460 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
17465 if (rgn23_data[offset] != PORT_STE_TYPE) {
17466 offset += rgn23_data[offset + 1] * 4 + 4;
17467 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
17471 /* This HBA contains PORT_STE configured */
17472 if (!rgn23_data[offset + 2])
17473 phba->hba_flag |= LINK_DISABLED;
17485 * lpfc_wr_object - write an object to the firmware
17486 * @phba: HBA structure that indicates port to create a queue on.
17487 * @dmabuf_list: list of dmabufs to write to the port.
17488 * @size: the total byte value of the objects to write to the port.
17489 * @offset: the current offset to be used to start the transfer.
17491 * This routine will create a wr_object mailbox command to send to the port.
17492 * the mailbox command will be constructed using the dma buffers described in
17493 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
17494 * BDEs that the imbedded mailbox can support. The @offset variable will be
17495 * used to indicate the starting offset of the transfer and will also return
17496 * the offset after the write object mailbox has completed. @size is used to
17497 * determine the end of the object and whether the eof bit should be set.
17499 * Return 0 is successful and offset will contain the the new offset to use
17500 * for the next write.
17501 * Return negative value for error cases.
17504 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
17505 uint32_t size, uint32_t *offset)
17507 struct lpfc_mbx_wr_object *wr_object;
17508 LPFC_MBOXQ_t *mbox;
17510 uint32_t shdr_status, shdr_add_status;
17512 union lpfc_sli4_cfg_shdr *shdr;
17513 struct lpfc_dmabuf *dmabuf;
17514 uint32_t written = 0;
17516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17520 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17521 LPFC_MBOX_OPCODE_WRITE_OBJECT,
17522 sizeof(struct lpfc_mbx_wr_object) -
17523 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17525 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
17526 wr_object->u.request.write_offset = *offset;
17527 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
17528 wr_object->u.request.object_name[0] =
17529 cpu_to_le32(wr_object->u.request.object_name[0]);
17530 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
17531 list_for_each_entry(dmabuf, dmabuf_list, list) {
17532 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
17534 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
17535 wr_object->u.request.bde[i].addrHigh =
17536 putPaddrHigh(dmabuf->phys);
17537 if (written + SLI4_PAGE_SIZE >= size) {
17538 wr_object->u.request.bde[i].tus.f.bdeSize =
17540 written += (size - written);
17541 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
17543 wr_object->u.request.bde[i].tus.f.bdeSize =
17545 written += SLI4_PAGE_SIZE;
17549 wr_object->u.request.bde_count = i;
17550 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
17551 if (!phba->sli4_hba.intr_enable)
17552 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17554 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17555 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17557 /* The IOCTL status is embedded in the mailbox subheader. */
17558 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
17559 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17560 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17561 if (rc != MBX_TIMEOUT)
17562 mempool_free(mbox, phba->mbox_mem_pool);
17563 if (shdr_status || shdr_add_status || rc) {
17564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17565 "3025 Write Object mailbox failed with "
17566 "status x%x add_status x%x, mbx status x%x\n",
17567 shdr_status, shdr_add_status, rc);
17570 *offset += wr_object->u.response.actual_write_length;
17575 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
17576 * @vport: pointer to vport data structure.
17578 * This function iterate through the mailboxq and clean up all REG_LOGIN
17579 * and REG_VPI mailbox commands associated with the vport. This function
17580 * is called when driver want to restart discovery of the vport due to
17581 * a Clear Virtual Link event.
17584 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
17586 struct lpfc_hba *phba = vport->phba;
17587 LPFC_MBOXQ_t *mb, *nextmb;
17588 struct lpfc_dmabuf *mp;
17589 struct lpfc_nodelist *ndlp;
17590 struct lpfc_nodelist *act_mbx_ndlp = NULL;
17591 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
17592 LIST_HEAD(mbox_cmd_list);
17593 uint8_t restart_loop;
17595 /* Clean up internally queued mailbox commands with the vport */
17596 spin_lock_irq(&phba->hbalock);
17597 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
17598 if (mb->vport != vport)
17601 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17602 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17605 list_del(&mb->list);
17606 list_add_tail(&mb->list, &mbox_cmd_list);
17608 /* Clean up active mailbox command with the vport */
17609 mb = phba->sli.mbox_active;
17610 if (mb && (mb->vport == vport)) {
17611 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
17612 (mb->u.mb.mbxCommand == MBX_REG_VPI))
17613 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17614 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17615 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
17616 /* Put reference count for delayed processing */
17617 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
17618 /* Unregister the RPI when mailbox complete */
17619 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17622 /* Cleanup any mailbox completions which are not yet processed */
17625 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
17627 * If this mailox is already processed or it is
17628 * for another vport ignore it.
17630 if ((mb->vport != vport) ||
17631 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
17634 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17635 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17638 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17639 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17640 ndlp = (struct lpfc_nodelist *)mb->context2;
17641 /* Unregister the RPI when mailbox complete */
17642 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17644 spin_unlock_irq(&phba->hbalock);
17645 spin_lock(shost->host_lock);
17646 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17647 spin_unlock(shost->host_lock);
17648 spin_lock_irq(&phba->hbalock);
17652 } while (restart_loop);
17654 spin_unlock_irq(&phba->hbalock);
17656 /* Release the cleaned-up mailbox commands */
17657 while (!list_empty(&mbox_cmd_list)) {
17658 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
17659 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17660 mp = (struct lpfc_dmabuf *) (mb->context1);
17662 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
17665 ndlp = (struct lpfc_nodelist *) mb->context2;
17666 mb->context2 = NULL;
17668 spin_lock(shost->host_lock);
17669 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17670 spin_unlock(shost->host_lock);
17671 lpfc_nlp_put(ndlp);
17674 mempool_free(mb, phba->mbox_mem_pool);
17677 /* Release the ndlp with the cleaned-up active mailbox command */
17678 if (act_mbx_ndlp) {
17679 spin_lock(shost->host_lock);
17680 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17681 spin_unlock(shost->host_lock);
17682 lpfc_nlp_put(act_mbx_ndlp);
17687 * lpfc_drain_txq - Drain the txq
17688 * @phba: Pointer to HBA context object.
17690 * This function attempt to submit IOCBs on the txq
17691 * to the adapter. For SLI4 adapters, the txq contains
17692 * ELS IOCBs that have been deferred because the there
17693 * are no SGLs. This congestion can occur with large
17694 * vport counts during node discovery.
17698 lpfc_drain_txq(struct lpfc_hba *phba)
17700 LIST_HEAD(completions);
17701 struct lpfc_sli_ring *pring;
17702 struct lpfc_iocbq *piocbq = NULL;
17703 unsigned long iflags = 0;
17704 char *fail_msg = NULL;
17705 struct lpfc_sglq *sglq;
17706 union lpfc_wqe128 wqe128;
17707 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
17708 uint32_t txq_cnt = 0;
17710 pring = lpfc_phba_elsring(phba);
17712 spin_lock_irqsave(&pring->ring_lock, iflags);
17713 list_for_each_entry(piocbq, &pring->txq, list) {
17717 if (txq_cnt > pring->txq_max)
17718 pring->txq_max = txq_cnt;
17720 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17722 while (!list_empty(&pring->txq)) {
17723 spin_lock_irqsave(&pring->ring_lock, iflags);
17725 piocbq = lpfc_sli_ringtx_get(phba, pring);
17727 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17729 "2823 txq empty and txq_cnt is %d\n ",
17733 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
17735 __lpfc_sli_ringtx_put(phba, pring, piocbq);
17736 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17741 /* The xri and iocb resources secured,
17742 * attempt to issue request
17744 piocbq->sli4_lxritag = sglq->sli4_lxritag;
17745 piocbq->sli4_xritag = sglq->sli4_xritag;
17746 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17747 fail_msg = "to convert bpl to sgl";
17748 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
17749 fail_msg = "to convert iocb to wqe";
17750 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
17751 fail_msg = " - Wq is full";
17753 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17756 /* Failed means we can't issue and need to cancel */
17757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17758 "2822 IOCB failed %s iotag 0x%x "
17761 piocbq->iotag, piocbq->sli4_xritag);
17762 list_add_tail(&piocbq->list, &completions);
17764 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17767 /* Cancel all the IOCBs that cannot be issued */
17768 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17769 IOERR_SLI_ABORTED);
17775 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
17776 * @phba: Pointer to HBA context object.
17777 * @pwqe: Pointer to command WQE.
17778 * @sglq: Pointer to the scatter gather queue object.
17780 * This routine converts the bpl or bde that is in the WQE
17781 * to a sgl list for the sli4 hardware. The physical address
17782 * of the bpl/bde is converted back to a virtual address.
17783 * If the WQE contains a BPL then the list of BDE's is
17784 * converted to sli4_sge's. If the WQE contains a single
17785 * BDE then it is converted to a single sli_sge.
17786 * The WQE is still in cpu endianness so the contents of
17787 * the bpl can be used without byte swapping.
17789 * Returns valid XRI = Success, NO_XRI = Failure.
17792 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
17793 struct lpfc_sglq *sglq)
17795 uint16_t xritag = NO_XRI;
17796 struct ulp_bde64 *bpl = NULL;
17797 struct ulp_bde64 bde;
17798 struct sli4_sge *sgl = NULL;
17799 struct lpfc_dmabuf *dmabuf;
17800 union lpfc_wqe *wqe;
17803 uint32_t offset = 0; /* accumulated offset in the sg request list */
17804 int inbound = 0; /* number of sg reply entries inbound from firmware */
17807 if (!pwqeq || !sglq)
17810 sgl = (struct sli4_sge *)sglq->sgl;
17812 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
17814 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
17815 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
17816 return sglq->sli4_xritag;
17817 numBdes = pwqeq->rsvd2;
17819 /* The addrHigh and addrLow fields within the WQE
17820 * have not been byteswapped yet so there is no
17821 * need to swap them back.
17823 if (pwqeq->context3)
17824 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
17828 bpl = (struct ulp_bde64 *)dmabuf->virt;
17832 for (i = 0; i < numBdes; i++) {
17833 /* Should already be byte swapped. */
17834 sgl->addr_hi = bpl->addrHigh;
17835 sgl->addr_lo = bpl->addrLow;
17837 sgl->word2 = le32_to_cpu(sgl->word2);
17838 if ((i+1) == numBdes)
17839 bf_set(lpfc_sli4_sge_last, sgl, 1);
17841 bf_set(lpfc_sli4_sge_last, sgl, 0);
17842 /* swap the size field back to the cpu so we
17843 * can assign it to the sgl.
17845 bde.tus.w = le32_to_cpu(bpl->tus.w);
17846 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
17847 /* The offsets in the sgl need to be accumulated
17848 * separately for the request and reply lists.
17849 * The request is always first, the reply follows.
17852 case CMD_GEN_REQUEST64_WQE:
17853 /* add up the reply sg entries */
17854 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
17856 /* first inbound? reset the offset */
17859 bf_set(lpfc_sli4_sge_offset, sgl, offset);
17860 bf_set(lpfc_sli4_sge_type, sgl,
17861 LPFC_SGE_TYPE_DATA);
17862 offset += bde.tus.f.bdeSize;
17864 case CMD_FCP_TRSP64_WQE:
17865 bf_set(lpfc_sli4_sge_offset, sgl, 0);
17866 bf_set(lpfc_sli4_sge_type, sgl,
17867 LPFC_SGE_TYPE_DATA);
17869 case CMD_FCP_TSEND64_WQE:
17870 case CMD_FCP_TRECEIVE64_WQE:
17871 bf_set(lpfc_sli4_sge_type, sgl,
17872 bpl->tus.f.bdeFlags);
17876 offset += bde.tus.f.bdeSize;
17877 bf_set(lpfc_sli4_sge_offset, sgl, offset);
17880 sgl->word2 = cpu_to_le32(sgl->word2);
17884 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
17885 /* The addrHigh and addrLow fields of the BDE have not
17886 * been byteswapped yet so they need to be swapped
17887 * before putting them in the sgl.
17889 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
17890 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
17891 sgl->word2 = le32_to_cpu(sgl->word2);
17892 bf_set(lpfc_sli4_sge_last, sgl, 1);
17893 sgl->word2 = cpu_to_le32(sgl->word2);
17894 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
17896 return sglq->sli4_xritag;
17900 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
17901 * @phba: Pointer to HBA context object.
17902 * @ring_number: Base sli ring number
17903 * @pwqe: Pointer to command WQE.
17906 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
17907 struct lpfc_iocbq *pwqe)
17909 union lpfc_wqe *wqe = &pwqe->wqe;
17910 struct lpfc_queue *wq;
17911 struct lpfc_sglq *sglq;
17912 struct lpfc_sli_ring *pring;
17913 unsigned long iflags;
17915 /* NVME_LS and NVME_LS ABTS requests. */
17916 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
17917 pring = phba->sli4_hba.nvmels_wq->pring;
17918 spin_lock_irqsave(&pring->ring_lock, iflags);
17919 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
17921 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17924 pwqe->sli4_lxritag = sglq->sli4_lxritag;
17925 pwqe->sli4_xritag = sglq->sli4_xritag;
17926 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
17927 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17930 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
17931 pwqe->sli4_xritag);
17932 if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
17933 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17936 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
17937 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17941 /* NVME_FCREQ and NVME_ABTS requests */
17942 if (pwqe->iocb_flag & LPFC_IO_NVME) {
17943 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
17944 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
17946 spin_lock_irqsave(&pring->ring_lock, iflags);
17947 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
17948 bf_set(wqe_cqid, &wqe->generic.wqe_com,
17949 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
17950 if (lpfc_sli4_wq_put(wq, wqe)) {
17951 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17954 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
17955 spin_unlock_irqrestore(&pring->ring_lock, iflags);