2 * CXL Flash Device Driver
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 * Copyright (C) 2015 IBM Corporation
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
20 #include <asm/unaligned.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
38 * process_cmd_err() - command error handler
39 * @cmd: AFU command that experienced the error.
40 * @scp: SCSI command associated with the AFU command in error.
42 * Translates error bits from AFU command to SCSI command results.
44 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
46 struct afu *afu = cmd->parent;
47 struct cxlflash_cfg *cfg = afu->parent;
48 struct device *dev = &cfg->dev->dev;
49 struct sisl_ioarcb *ioarcb;
50 struct sisl_ioasa *ioasa;
59 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
61 scsi_set_resid(scp, resid);
62 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
63 __func__, cmd, scp, resid);
66 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
67 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
69 scp->result = (DID_ERROR << 16);
72 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
74 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
75 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
77 if (ioasa->rc.scsi_rc) {
78 /* We have a SCSI status */
79 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
80 memcpy(scp->sense_buffer, ioasa->sense_data,
82 scp->result = ioasa->rc.scsi_rc;
84 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
88 * We encountered an error. Set scp->result based on nature
91 if (ioasa->rc.fc_rc) {
92 /* We have an FC status */
93 switch (ioasa->rc.fc_rc) {
94 case SISL_FC_RC_LINKDOWN:
95 scp->result = (DID_REQUEUE << 16);
97 case SISL_FC_RC_RESID:
98 /* This indicates an FCP resid underrun */
99 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
100 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
101 * then we will handle this error else where.
102 * If not then we must handle it here.
103 * This is probably an AFU bug.
105 scp->result = (DID_ERROR << 16);
108 case SISL_FC_RC_RESIDERR:
109 /* Resid mismatch between adapter and device */
110 case SISL_FC_RC_TGTABORT:
111 case SISL_FC_RC_ABORTOK:
112 case SISL_FC_RC_ABORTFAIL:
113 case SISL_FC_RC_NOLOGI:
114 case SISL_FC_RC_ABORTPEND:
115 case SISL_FC_RC_WRABORTPEND:
116 case SISL_FC_RC_NOEXP:
117 case SISL_FC_RC_INUSE:
118 scp->result = (DID_ERROR << 16);
123 if (ioasa->rc.afu_rc) {
124 /* We have an AFU error */
125 switch (ioasa->rc.afu_rc) {
126 case SISL_AFU_RC_NO_CHANNELS:
127 scp->result = (DID_NO_CONNECT << 16);
129 case SISL_AFU_RC_DATA_DMA_ERR:
130 switch (ioasa->afu_extra) {
131 case SISL_AFU_DMA_ERR_PAGE_IN:
133 scp->result = (DID_IMM_RETRY << 16);
135 case SISL_AFU_DMA_ERR_INVALID_EA:
137 scp->result = (DID_ERROR << 16);
140 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
142 scp->result = (DID_ALLOC_FAILURE << 16);
145 scp->result = (DID_ERROR << 16);
151 * cmd_complete() - command completion handler
152 * @cmd: AFU command that has completed.
154 * Prepares and submits command that has either completed or timed out to
155 * the SCSI stack. Checks AFU command back into command pool for non-internal
156 * (cmd->scp populated) commands.
158 static void cmd_complete(struct afu_cmd *cmd)
160 struct scsi_cmnd *scp;
162 struct afu *afu = cmd->parent;
163 struct cxlflash_cfg *cfg = afu->parent;
164 struct device *dev = &cfg->dev->dev;
169 if (unlikely(cmd->sa.ioasc))
170 process_cmd_err(cmd, scp);
172 scp->result = (DID_OK << 16);
174 cmd_is_tmf = cmd->cmd_tmf;
176 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
177 __func__, scp, scp->result, cmd->sa.ioasc);
183 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
184 cfg->tmf_active = false;
185 wake_up_all_locked(&cfg->tmf_waitq);
186 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
189 complete(&cmd->cevent);
193 * context_reset() - reset command owner context via specified register
194 * @cmd: AFU command that timed out.
195 * @reset_reg: MMIO register to perform reset.
197 static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
201 struct afu *afu = cmd->parent;
202 struct cxlflash_cfg *cfg = afu->parent;
203 struct device *dev = &cfg->dev->dev;
205 dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
207 writeq_be(rrin, reset_reg);
209 rrin = readq_be(reset_reg);
212 /* Double delay each time */
214 } while (nretry++ < MC_ROOM_RETRY_CNT);
216 dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
217 __func__, rrin, nretry);
221 * context_reset_ioarrin() - reset command owner context via IOARRIN register
222 * @cmd: AFU command that timed out.
224 static void context_reset_ioarrin(struct afu_cmd *cmd)
226 struct afu *afu = cmd->parent;
228 context_reset(cmd, &afu->host_map->ioarrin);
232 * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233 * @cmd: AFU command that timed out.
235 static void context_reset_sq(struct afu_cmd *cmd)
237 struct afu *afu = cmd->parent;
239 context_reset(cmd, &afu->host_map->sq_ctx_reset);
243 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
244 * @afu: AFU associated with the host.
245 * @cmd: AFU command to send.
248 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
250 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
252 struct cxlflash_cfg *cfg = afu->parent;
253 struct device *dev = &cfg->dev->dev;
259 * To avoid the performance penalty of MMIO, spread the update of
260 * 'room' over multiple commands.
262 spin_lock_irqsave(&afu->rrin_slock, lock_flags);
263 if (--afu->room < 0) {
264 room = readq_be(&afu->host_map->cmd_room);
266 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
267 "0x%02X, room=0x%016llX\n",
268 __func__, cmd->rcb.cdb[0], room);
270 rc = SCSI_MLQUEUE_HOST_BUSY;
273 afu->room = room - 1;
276 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
278 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
279 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
280 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
285 * send_cmd_sq() - sends an AFU command via SQ ring
286 * @afu: AFU associated with the host.
287 * @cmd: AFU command to send.
290 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
292 static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
294 struct cxlflash_cfg *cfg = afu->parent;
295 struct device *dev = &cfg->dev->dev;
300 newval = atomic_dec_if_positive(&afu->hsq_credits);
302 rc = SCSI_MLQUEUE_HOST_BUSY;
306 cmd->rcb.ioasa = &cmd->sa;
308 spin_lock_irqsave(&afu->hsq_slock, lock_flags);
310 *afu->hsq_curr = cmd->rcb;
311 if (afu->hsq_curr < afu->hsq_end)
314 afu->hsq_curr = afu->hsq_start;
315 writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
317 spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
319 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
320 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
321 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
322 readq_be(&afu->host_map->sq_head),
323 readq_be(&afu->host_map->sq_tail));
328 * wait_resp() - polls for a response or timeout to a sent AFU command
329 * @afu: AFU associated with the host.
330 * @cmd: AFU command that was sent.
333 * 0 on success, -1 on timeout/error
335 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
337 struct cxlflash_cfg *cfg = afu->parent;
338 struct device *dev = &cfg->dev->dev;
340 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
342 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
344 afu->context_reset(cmd);
348 if (unlikely(cmd->sa.ioasc != 0)) {
349 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
350 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
358 * send_tmf() - sends a Task Management Function (TMF)
359 * @afu: AFU to checkout from.
360 * @scp: SCSI command from stack.
361 * @tmfcmd: TMF command to send.
364 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
366 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
368 u32 port_sel = scp->device->channel + 1;
369 struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
370 struct afu_cmd *cmd = sc_to_afucz(scp);
371 struct device *dev = &cfg->dev->dev;
376 /* When Task Management Function is active do not send another */
377 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
379 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
382 cfg->tmf_active = true;
383 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
389 cmd->rcb.ctx_id = afu->ctx_hndl;
390 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
391 cmd->rcb.port_sel = port_sel;
392 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
393 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
394 SISL_REQ_FLAGS_SUP_UNDERRUN |
395 SISL_REQ_FLAGS_TMF_CMD);
396 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
398 rc = afu->send_cmd(afu, cmd);
400 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
401 cfg->tmf_active = false;
402 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
406 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
407 to = msecs_to_jiffies(5000);
408 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
413 cfg->tmf_active = false;
414 dev_err(dev, "%s: TMF timed out\n", __func__);
417 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
423 * cxlflash_driver_info() - information handler for this host driver
424 * @host: SCSI host associated with device.
426 * Return: A string describing the device.
428 static const char *cxlflash_driver_info(struct Scsi_Host *host)
430 return CXLFLASH_ADAPTER_NAME;
434 * cxlflash_queuecommand() - sends a mid-layer request
435 * @host: SCSI host associated with device.
436 * @scp: SCSI command to send.
438 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
440 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
442 struct cxlflash_cfg *cfg = shost_priv(host);
443 struct afu *afu = cfg->afu;
444 struct device *dev = &cfg->dev->dev;
445 struct afu_cmd *cmd = sc_to_afucz(scp);
446 struct scatterlist *sg = scsi_sglist(scp);
447 u32 port_sel = scp->device->channel + 1;
448 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
453 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
454 "cdb=(%08x-%08x-%08x-%08x)\n",
455 __func__, scp, host->host_no, scp->device->channel,
456 scp->device->id, scp->device->lun,
457 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
458 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
459 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
460 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
463 * If a Task Management Function is active, wait for it to complete
464 * before continuing with regular commands.
466 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
467 if (cfg->tmf_active) {
468 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
469 rc = SCSI_MLQUEUE_HOST_BUSY;
472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
474 switch (cfg->state) {
476 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
477 rc = SCSI_MLQUEUE_HOST_BUSY;
480 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
481 scp->result = (DID_NO_CONNECT << 16);
490 nseg = scsi_dma_map(scp);
491 if (unlikely(nseg < 0)) {
492 dev_err(dev, "%s: Fail DMA map\n", __func__);
493 rc = SCSI_MLQUEUE_HOST_BUSY;
497 cmd->rcb.data_len = sg_dma_len(sg);
498 cmd->rcb.data_ea = sg_dma_address(sg);
504 cmd->rcb.ctx_id = afu->ctx_hndl;
505 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
506 cmd->rcb.port_sel = port_sel;
507 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
509 if (scp->sc_data_direction == DMA_TO_DEVICE)
510 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
512 cmd->rcb.req_flags = req_flags;
513 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
515 rc = afu->send_cmd(afu, cmd);
523 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
524 * @cfg: Internal structure associated with the host.
526 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
528 struct pci_dev *pdev = cfg->dev;
530 if (pci_channel_offline(pdev))
531 wait_event_timeout(cfg->reset_waitq,
532 !pci_channel_offline(pdev),
533 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
537 * free_mem() - free memory associated with the AFU
538 * @cfg: Internal structure associated with the host.
540 static void free_mem(struct cxlflash_cfg *cfg)
542 struct afu *afu = cfg->afu;
545 free_pages((ulong)afu, get_order(sizeof(struct afu)));
551 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
552 * @cfg: Internal structure associated with the host.
554 * Safe to call with AFU in a partially allocated/initialized state.
556 * Cancels scheduled worker threads, waits for any active internal AFU
557 * commands to timeout and then unmaps the MMIO space.
559 static void stop_afu(struct cxlflash_cfg *cfg)
561 struct afu *afu = cfg->afu;
563 cancel_work_sync(&cfg->work_q);
566 while (atomic_read(&afu->cmds_active))
568 if (likely(afu->afu_map)) {
569 cxl_psa_unmap((void __iomem *)afu->afu_map);
576 * term_intr() - disables all AFU interrupts
577 * @cfg: Internal structure associated with the host.
578 * @level: Depth of allocation, where to begin waterfall tear down.
580 * Safe to call with AFU/MC in partially allocated/initialized state.
582 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
584 struct afu *afu = cfg->afu;
585 struct device *dev = &cfg->dev->dev;
587 if (!afu || !cfg->mcctx) {
588 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
594 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
596 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
598 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
600 cxl_free_afu_irqs(cfg->mcctx);
603 /* No action required */
609 * term_mc() - terminates the master context
610 * @cfg: Internal structure associated with the host.
611 * @level: Depth of allocation, where to begin waterfall tear down.
613 * Safe to call with AFU/MC in partially allocated/initialized state.
615 static void term_mc(struct cxlflash_cfg *cfg)
618 struct afu *afu = cfg->afu;
619 struct device *dev = &cfg->dev->dev;
621 if (!afu || !cfg->mcctx) {
622 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
626 rc = cxl_stop_context(cfg->mcctx);
632 * term_afu() - terminates the AFU
633 * @cfg: Internal structure associated with the host.
635 * Safe to call with AFU/MC in partially allocated/initialized state.
637 static void term_afu(struct cxlflash_cfg *cfg)
639 struct device *dev = &cfg->dev->dev;
642 * Tear down is carefully orchestrated to ensure
643 * no interrupts can come in when the problem state
646 * 1) Disable all AFU interrupts
647 * 2) Unmap the problem state area
648 * 3) Stop the master context
650 term_intr(cfg, UNMAP_THREE);
656 dev_dbg(dev, "%s: returning\n", __func__);
660 * notify_shutdown() - notifies device of pending shutdown
661 * @cfg: Internal structure associated with the host.
662 * @wait: Whether to wait for shutdown processing to complete.
664 * This function will notify the AFU that the adapter is being shutdown
665 * and will wait for shutdown processing to complete if wait is true.
666 * This notification should flush pending I/Os to the device and halt
667 * further I/Os until the next AFU reset is issued and device restarted.
669 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
671 struct afu *afu = cfg->afu;
672 struct device *dev = &cfg->dev->dev;
673 struct sisl_global_map __iomem *global;
674 struct dev_dependent_vals *ddv;
676 int i, retry_cnt = 0;
678 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
679 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
682 if (!afu || !afu->afu_map) {
683 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
687 global = &afu->afu_map->global;
690 for (i = 0; i < NUM_FC_PORTS; i++) {
691 reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
692 reg |= SISL_FC_SHUTDOWN_NORMAL;
693 writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
699 /* Wait up to 1.5 seconds for shutdown processing to complete */
700 for (i = 0; i < NUM_FC_PORTS; i++) {
703 status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
704 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
706 if (++retry_cnt >= MC_RETRY_CNT) {
707 dev_dbg(dev, "%s: port %d shutdown processing "
708 "not yet completed\n", __func__, i);
711 msleep(100 * retry_cnt);
717 * cxlflash_remove() - PCI entry point to tear down host
718 * @pdev: PCI device associated with the host.
720 * Safe to use as a cleanup in partially allocated/initialized state.
722 static void cxlflash_remove(struct pci_dev *pdev)
724 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
725 struct device *dev = &pdev->dev;
728 if (!pci_is_enabled(pdev)) {
729 dev_dbg(dev, "%s: Device is disabled\n", __func__);
733 /* If a Task Management Function is active, wait for it to complete
734 * before continuing with remove.
736 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
738 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
741 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
743 /* Notify AFU and wait for shutdown processing to complete */
744 notify_shutdown(cfg, true);
746 cfg->state = STATE_FAILTERM;
747 cxlflash_stop_term_user_contexts(cfg);
749 switch (cfg->init_state) {
750 case INIT_STATE_SCSI:
751 cxlflash_term_local_luns(cfg);
752 scsi_remove_host(cfg->host);
757 pci_disable_device(pdev);
758 case INIT_STATE_NONE:
760 scsi_host_put(cfg->host);
764 dev_dbg(dev, "%s: returning\n", __func__);
768 * alloc_mem() - allocates the AFU and its command pool
769 * @cfg: Internal structure associated with the host.
771 * A partially allocated state remains on failure.
775 * -ENOMEM on failure to allocate memory
777 static int alloc_mem(struct cxlflash_cfg *cfg)
780 struct device *dev = &cfg->dev->dev;
782 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
783 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
784 get_order(sizeof(struct afu)));
785 if (unlikely(!cfg->afu)) {
786 dev_err(dev, "%s: cannot get %d free pages\n",
787 __func__, get_order(sizeof(struct afu)));
791 cfg->afu->parent = cfg;
792 cfg->afu->afu_map = NULL;
798 * init_pci() - initializes the host as a PCI device
799 * @cfg: Internal structure associated with the host.
801 * Return: 0 on success, -errno on failure
803 static int init_pci(struct cxlflash_cfg *cfg)
805 struct pci_dev *pdev = cfg->dev;
806 struct device *dev = &cfg->dev->dev;
809 rc = pci_enable_device(pdev);
810 if (rc || pci_channel_offline(pdev)) {
811 if (pci_channel_offline(pdev)) {
812 cxlflash_wait_for_pci_err_recovery(cfg);
813 rc = pci_enable_device(pdev);
817 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
818 cxlflash_wait_for_pci_err_recovery(cfg);
824 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
829 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
830 * @cfg: Internal structure associated with the host.
832 * Return: 0 on success, -errno on failure
834 static int init_scsi(struct cxlflash_cfg *cfg)
836 struct pci_dev *pdev = cfg->dev;
837 struct device *dev = &cfg->dev->dev;
840 rc = scsi_add_host(cfg->host, &pdev->dev);
842 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
846 scsi_scan_host(cfg->host);
849 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
854 * set_port_online() - transitions the specified host FC port to online state
855 * @fc_regs: Top of MMIO region defined for specified port.
857 * The provided MMIO region must be mapped prior to call. Online state means
858 * that the FC link layer has synced, completed the handshaking process, and
859 * is ready for login to start.
861 static void set_port_online(__be64 __iomem *fc_regs)
865 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
866 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
867 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
868 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
872 * set_port_offline() - transitions the specified host FC port to offline state
873 * @fc_regs: Top of MMIO region defined for specified port.
875 * The provided MMIO region must be mapped prior to call.
877 static void set_port_offline(__be64 __iomem *fc_regs)
881 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
882 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
883 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
884 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
888 * wait_port_online() - waits for the specified host FC port come online
889 * @fc_regs: Top of MMIO region defined for specified port.
890 * @delay_us: Number of microseconds to delay between reading port status.
891 * @nretry: Number of cycles to retry reading port status.
893 * The provided MMIO region must be mapped prior to call. This will timeout
894 * when the cable is not plugged in.
897 * TRUE (1) when the specified port is online
898 * FALSE (0) when the specified port fails to come online after timeout
900 static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
904 WARN_ON(delay_us < 1000);
907 msleep(delay_us / 1000);
908 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
909 if (status == U64_MAX)
911 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
914 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
918 * wait_port_offline() - waits for the specified host FC port go offline
919 * @fc_regs: Top of MMIO region defined for specified port.
920 * @delay_us: Number of microseconds to delay between reading port status.
921 * @nretry: Number of cycles to retry reading port status.
923 * The provided MMIO region must be mapped prior to call.
926 * TRUE (1) when the specified port is offline
927 * FALSE (0) when the specified port fails to go offline after timeout
929 static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
933 WARN_ON(delay_us < 1000);
936 msleep(delay_us / 1000);
937 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
938 if (status == U64_MAX)
940 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
943 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
947 * afu_set_wwpn() - configures the WWPN for the specified host FC port
948 * @afu: AFU associated with the host that owns the specified FC port.
949 * @port: Port number being configured.
950 * @fc_regs: Top of MMIO region defined for specified port.
951 * @wwpn: The world-wide-port-number previously discovered for port.
953 * The provided MMIO region must be mapped prior to call. As part of the
954 * sequence to configure the WWPN, the port is toggled offline and then back
955 * online. This toggling action can cause this routine to delay up to a few
956 * seconds. When configured to use the internal LUN feature of the AFU, a
957 * failure to come online is overridden.
959 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
962 struct cxlflash_cfg *cfg = afu->parent;
963 struct device *dev = &cfg->dev->dev;
965 set_port_offline(fc_regs);
966 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
967 FC_PORT_STATUS_RETRY_CNT)) {
968 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
972 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
974 set_port_online(fc_regs);
975 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
976 FC_PORT_STATUS_RETRY_CNT)) {
977 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
983 * afu_link_reset() - resets the specified host FC port
984 * @afu: AFU associated with the host that owns the specified FC port.
985 * @port: Port number being configured.
986 * @fc_regs: Top of MMIO region defined for specified port.
988 * The provided MMIO region must be mapped prior to call. The sequence to
989 * reset the port involves toggling it offline and then back online. This
990 * action can cause this routine to delay up to a few seconds. An effort
991 * is made to maintain link with the device by switching to host to use
992 * the alternate port exclusively while the reset takes place.
993 * failure to come online is overridden.
995 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
997 struct cxlflash_cfg *cfg = afu->parent;
998 struct device *dev = &cfg->dev->dev;
1001 /* first switch the AFU to the other links, if any */
1002 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1003 port_sel &= ~(1ULL << port);
1004 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1005 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1007 set_port_offline(fc_regs);
1008 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1009 FC_PORT_STATUS_RETRY_CNT))
1010 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1013 set_port_online(fc_regs);
1014 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1015 FC_PORT_STATUS_RETRY_CNT))
1016 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1019 /* switch back to include this port */
1020 port_sel |= (1ULL << port);
1021 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1022 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1024 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1028 * Asynchronous interrupt information table
1030 static const struct asyc_intr_info ainfo[] = {
1031 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1032 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1033 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1034 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1035 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1036 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1037 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1038 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1039 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1040 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1041 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1042 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1043 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1044 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1045 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1046 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1047 {0x0, "", 0, 0} /* terminator */
1051 * find_ainfo() - locates and returns asynchronous interrupt information
1052 * @status: Status code set by AFU on error.
1054 * Return: The located information or NULL when the status code is invalid.
1056 static const struct asyc_intr_info *find_ainfo(u64 status)
1058 const struct asyc_intr_info *info;
1060 for (info = &ainfo[0]; info->status; info++)
1061 if (info->status == status)
1068 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1069 * @afu: AFU associated with the host.
1071 static void afu_err_intr_init(struct afu *afu)
1076 /* global async interrupts: AFU clears afu_ctrl on context exit
1077 * if async interrupts were sent to that context. This prevents
1078 * the AFU form sending further async interrupts when
1080 * nobody to receive them.
1084 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1085 /* set LISN# to send and point to master context */
1086 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1088 if (afu->internal_lun)
1089 reg |= 1; /* Bit 63 indicates local lun */
1090 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1092 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1093 /* unmask bits that are of interest */
1094 /* note: afu can send an interrupt after this step */
1095 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1096 /* clear again in case a bit came on after previous clear but before */
1098 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1100 /* Clear/Set internal lun bits */
1101 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1102 reg &= SISL_FC_INTERNAL_MASK;
1103 if (afu->internal_lun)
1104 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1105 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1107 /* now clear FC errors */
1108 for (i = 0; i < NUM_FC_PORTS; i++) {
1109 writeq_be(0xFFFFFFFFU,
1110 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1111 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1114 /* sync interrupts for master's IOARRIN write */
1115 /* note that unlike asyncs, there can be no pending sync interrupts */
1116 /* at this time (this is a fresh context and master has not written */
1117 /* IOARRIN yet), so there is nothing to clear. */
1119 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1120 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1121 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1125 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1126 * @irq: Interrupt number.
1127 * @data: Private data provided at interrupt registration, the AFU.
1129 * Return: Always return IRQ_HANDLED.
1131 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1133 struct afu *afu = (struct afu *)data;
1134 struct cxlflash_cfg *cfg = afu->parent;
1135 struct device *dev = &cfg->dev->dev;
1139 reg = readq_be(&afu->host_map->intr_status);
1140 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1142 if (reg_unmasked == 0UL) {
1143 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1145 goto cxlflash_sync_err_irq_exit;
1148 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1151 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1153 cxlflash_sync_err_irq_exit:
1158 * process_hrrq() - process the read-response queue
1159 * @afu: AFU associated with the host.
1160 * @doneq: Queue of commands harvested from the RRQ.
1162 * This routine must be called holding the disabled RRQ spin lock.
1164 * Return: The number of entries processed.
1166 static int process_hrrq(struct afu *afu, struct list_head *doneq)
1168 struct afu_cmd *cmd;
1169 struct sisl_ioasa *ioasa;
1170 struct sisl_ioarcb *ioarcb;
1171 bool toggle = afu->toggle;
1174 *hrrq_start = afu->hrrq_start,
1175 *hrrq_end = afu->hrrq_end,
1176 *hrrq_curr = afu->hrrq_curr;
1178 /* Process however many RRQ entries that are ready */
1182 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1185 entry &= ~SISL_RESP_HANDLE_T_BIT;
1187 if (afu_is_sq_cmd_mode(afu)) {
1188 ioasa = (struct sisl_ioasa *)entry;
1189 cmd = container_of(ioasa, struct afu_cmd, sa);
1191 ioarcb = (struct sisl_ioarcb *)entry;
1192 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1195 list_add_tail(&cmd->queue, doneq);
1197 /* Advance to next entry or wrap and flip the toggle bit */
1198 if (hrrq_curr < hrrq_end)
1201 hrrq_curr = hrrq_start;
1202 toggle ^= SISL_RESP_HANDLE_T_BIT;
1205 atomic_inc(&afu->hsq_credits);
1209 afu->hrrq_curr = hrrq_curr;
1210 afu->toggle = toggle;
1216 * process_cmd_doneq() - process a queue of harvested RRQ commands
1217 * @doneq: Queue of completed commands.
1219 * Note that upon return the queue can no longer be trusted.
1221 static void process_cmd_doneq(struct list_head *doneq)
1223 struct afu_cmd *cmd, *tmp;
1225 WARN_ON(list_empty(doneq));
1227 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1232 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1233 * @irq: Interrupt number.
1234 * @data: Private data provided at interrupt registration, the AFU.
1236 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1238 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1240 struct afu *afu = (struct afu *)data;
1241 unsigned long hrrq_flags;
1243 int num_entries = 0;
1245 spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
1246 num_entries = process_hrrq(afu, &doneq);
1247 spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
1249 if (num_entries == 0)
1252 process_cmd_doneq(&doneq);
1257 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1258 * @irq: Interrupt number.
1259 * @data: Private data provided at interrupt registration, the AFU.
1261 * Return: Always return IRQ_HANDLED.
1263 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1265 struct afu *afu = (struct afu *)data;
1266 struct cxlflash_cfg *cfg = afu->parent;
1267 struct device *dev = &cfg->dev->dev;
1269 const struct asyc_intr_info *info;
1270 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1275 reg = readq_be(&global->regs.aintr_status);
1276 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1278 if (reg_unmasked == 0) {
1279 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1284 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1285 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1287 /* Check each bit that is on */
1288 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1289 info = find_ainfo(1ULL << i);
1290 if (((reg_unmasked & 0x1) == 0) || !info)
1295 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1296 __func__, port, info->desc,
1297 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1300 * Do link reset first, some OTHER errors will set FC_ERROR
1301 * again if cleared before or w/o a reset
1303 if (info->action & LINK_RESET) {
1304 dev_err(dev, "%s: FC Port %d: resetting link\n",
1306 cfg->lr_state = LINK_RESET_REQUIRED;
1307 cfg->lr_port = port;
1308 schedule_work(&cfg->work_q);
1311 if (info->action & CLR_FC_ERROR) {
1312 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1315 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1316 * should be the same and tracing one is sufficient.
1319 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1320 __func__, port, reg);
1322 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1323 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1326 if (info->action & SCAN_HOST) {
1327 atomic_inc(&cfg->scan_host_needed);
1328 schedule_work(&cfg->work_q);
1337 * start_context() - starts the master context
1338 * @cfg: Internal structure associated with the host.
1340 * Return: A success or failure value from CXL services.
1342 static int start_context(struct cxlflash_cfg *cfg)
1344 struct device *dev = &cfg->dev->dev;
1347 rc = cxl_start_context(cfg->mcctx,
1348 cfg->afu->work.work_element_descriptor,
1351 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1356 * read_vpd() - obtains the WWPNs from VPD
1357 * @cfg: Internal structure associated with the host.
1358 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1360 * Return: 0 on success, -errno on failure
1362 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1364 struct device *dev = &cfg->dev->dev;
1365 struct pci_dev *pdev = cfg->dev;
1367 int ro_start, ro_size, i, j, k;
1369 char vpd_data[CXLFLASH_VPD_LEN];
1370 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1371 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1373 /* Get the VPD data from the device */
1374 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1375 if (unlikely(vpd_size <= 0)) {
1376 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1377 __func__, vpd_size);
1382 /* Get the read only section offset */
1383 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1384 PCI_VPD_LRDT_RO_DATA);
1385 if (unlikely(ro_start < 0)) {
1386 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1391 /* Get the read only section size, cap when extends beyond read VPD */
1392 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1394 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1395 if (unlikely((i + j) > vpd_size)) {
1396 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1397 __func__, (i + j), vpd_size);
1398 ro_size = vpd_size - i;
1402 * Find the offset of the WWPN tag within the read only
1403 * VPD data and validate the found field (partials are
1404 * no good to us). Convert the ASCII data to an integer
1405 * value. Note that we must copy to a temporary buffer
1406 * because the conversion service requires that the ASCII
1407 * string be terminated.
1409 for (k = 0; k < NUM_FC_PORTS; k++) {
1411 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1413 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1414 if (unlikely(i < 0)) {
1415 dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1421 j = pci_vpd_info_field_size(&vpd_data[i]);
1422 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1423 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1424 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1430 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1431 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1433 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1441 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1446 * init_pcr() - initialize the provisioning and control registers
1447 * @cfg: Internal structure associated with the host.
1449 * Also sets up fast access to the mapped registers and initializes AFU
1450 * command fields that never change.
1452 static void init_pcr(struct cxlflash_cfg *cfg)
1454 struct afu *afu = cfg->afu;
1455 struct sisl_ctrl_map __iomem *ctrl_map;
1458 for (i = 0; i < MAX_CONTEXT; i++) {
1459 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1460 /* Disrupt any clients that could be running */
1461 /* e.g. clients that survived a master restart */
1462 writeq_be(0, &ctrl_map->rht_start);
1463 writeq_be(0, &ctrl_map->rht_cnt_id);
1464 writeq_be(0, &ctrl_map->ctx_cap);
1467 /* Copy frequently used fields into afu */
1468 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1469 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1470 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1472 /* Program the Endian Control for the master context */
1473 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1477 * init_global() - initialize AFU global registers
1478 * @cfg: Internal structure associated with the host.
1480 static int init_global(struct cxlflash_cfg *cfg)
1482 struct afu *afu = cfg->afu;
1483 struct device *dev = &cfg->dev->dev;
1484 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1485 int i = 0, num_ports = 0;
1489 rc = read_vpd(cfg, &wwpn[0]);
1491 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1495 dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
1496 __func__, wwpn[0], wwpn[1]);
1498 /* Set up RRQ and SQ in AFU for master issued cmds */
1499 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1500 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1502 if (afu_is_sq_cmd_mode(afu)) {
1503 writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
1504 writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
1507 /* AFU configuration */
1508 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1509 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1510 /* enable all auto retry options and control endianness */
1511 /* leave others at default: */
1512 /* CTX_CAP write protected, mbox_r does not clear on read and */
1513 /* checker on if dual afu */
1514 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1516 /* Global port select: select either port */
1517 if (afu->internal_lun) {
1518 /* Only use port 0 */
1519 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1520 num_ports = NUM_FC_PORTS - 1;
1522 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1523 num_ports = NUM_FC_PORTS;
1526 for (i = 0; i < num_ports; i++) {
1527 /* Unmask all errors (but they are still masked at AFU) */
1528 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1529 /* Clear CRC error cnt & set a threshold */
1530 (void)readq_be(&afu->afu_map->global.
1531 fc_regs[i][FC_CNT_CRCERR / 8]);
1532 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1533 [FC_CRC_THRESH / 8]);
1535 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1537 afu_set_wwpn(afu, i,
1538 &afu->afu_map->global.fc_regs[i][0],
1540 /* Programming WWPN back to back causes additional
1541 * offline/online transitions and a PLOGI
1546 /* Set up master's own CTX_CAP to allow real mode, host translation */
1547 /* tables, afu cmds and read/write GSCSI cmds. */
1548 /* First, unlock ctx_cap write by reading mbox */
1549 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1550 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1551 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1552 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1553 &afu->ctrl_map->ctx_cap);
1554 /* Initialize heartbeat */
1555 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1561 * start_afu() - initializes and starts the AFU
1562 * @cfg: Internal structure associated with the host.
1564 static int start_afu(struct cxlflash_cfg *cfg)
1566 struct afu *afu = cfg->afu;
1567 struct device *dev = &cfg->dev->dev;
1572 /* Initialize RRQ */
1573 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1574 afu->hrrq_start = &afu->rrq_entry[0];
1575 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1576 afu->hrrq_curr = afu->hrrq_start;
1578 spin_lock_init(&afu->hrrq_slock);
1581 if (afu_is_sq_cmd_mode(afu)) {
1582 memset(&afu->sq, 0, sizeof(afu->sq));
1583 afu->hsq_start = &afu->sq[0];
1584 afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
1585 afu->hsq_curr = afu->hsq_start;
1587 spin_lock_init(&afu->hsq_slock);
1588 atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
1591 rc = init_global(cfg);
1593 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1598 * init_intr() - setup interrupt handlers for the master context
1599 * @cfg: Internal structure associated with the host.
1601 * Return: 0 on success, -errno on failure
1603 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1604 struct cxl_context *ctx)
1606 struct afu *afu = cfg->afu;
1607 struct device *dev = &cfg->dev->dev;
1609 enum undo_level level = UNDO_NOOP;
1611 rc = cxl_allocate_afu_irqs(ctx, 3);
1613 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1619 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1620 "SISL_MSI_SYNC_ERROR");
1621 if (unlikely(rc <= 0)) {
1622 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1627 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1628 "SISL_MSI_RRQ_UPDATED");
1629 if (unlikely(rc <= 0)) {
1630 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1635 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1636 "SISL_MSI_ASYNC_ERROR");
1637 if (unlikely(rc <= 0)) {
1638 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1647 * init_mc() - create and register as the master context
1648 * @cfg: Internal structure associated with the host.
1650 * Return: 0 on success, -errno on failure
1652 static int init_mc(struct cxlflash_cfg *cfg)
1654 struct cxl_context *ctx;
1655 struct device *dev = &cfg->dev->dev;
1657 enum undo_level level;
1659 ctx = cxl_get_context(cfg->dev);
1660 if (unlikely(!ctx)) {
1666 /* Set it up as a master with the CXL */
1667 cxl_set_master(ctx);
1669 /* During initialization reset the AFU to start from a clean slate */
1670 rc = cxl_afu_reset(cfg->mcctx);
1672 dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
1676 level = init_intr(cfg, ctx);
1677 if (unlikely(level)) {
1678 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1682 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1683 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1684 * element (pe) that is embedded in the context (ctx)
1686 rc = start_context(cfg);
1688 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1689 level = UNMAP_THREE;
1693 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1696 term_intr(cfg, level);
1701 * init_afu() - setup as master context and start AFU
1702 * @cfg: Internal structure associated with the host.
1704 * This routine is a higher level of control for configuring the
1705 * AFU on probe and reset paths.
1707 * Return: 0 on success, -errno on failure
1709 static int init_afu(struct cxlflash_cfg *cfg)
1713 struct afu *afu = cfg->afu;
1714 struct device *dev = &cfg->dev->dev;
1716 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1720 dev_err(dev, "%s: init_mc failed rc=%d\n",
1725 /* Map the entire MMIO space of the AFU */
1726 afu->afu_map = cxl_psa_map(cfg->mcctx);
1727 if (!afu->afu_map) {
1728 dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
1733 /* No byte reverse on reading afu_version or string will be backwards */
1734 reg = readq(&afu->afu_map->global.regs.afu_version);
1735 memcpy(afu->version, ®, sizeof(reg));
1736 afu->interface_version =
1737 readq_be(&afu->afu_map->global.regs.interface_version);
1738 if ((afu->interface_version + 1) == 0) {
1739 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
1740 "interface version %016llx\n", afu->version,
1741 afu->interface_version);
1746 if (afu_is_sq_cmd_mode(afu)) {
1747 afu->send_cmd = send_cmd_sq;
1748 afu->context_reset = context_reset_sq;
1750 afu->send_cmd = send_cmd_ioarrin;
1751 afu->context_reset = context_reset_ioarrin;
1754 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
1755 afu->version, afu->interface_version);
1757 rc = start_afu(cfg);
1759 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
1763 afu_err_intr_init(cfg->afu);
1764 spin_lock_init(&afu->rrin_slock);
1765 afu->room = readq_be(&afu->host_map->cmd_room);
1767 /* Restore the LUN mappings */
1768 cxlflash_restore_luntable(cfg);
1770 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1774 term_intr(cfg, UNMAP_THREE);
1780 * cxlflash_afu_sync() - builds and sends an AFU sync command
1781 * @afu: AFU associated with the host.
1782 * @ctx_hndl_u: Identifies context requesting sync.
1783 * @res_hndl_u: Identifies resource requesting sync.
1784 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1786 * The AFU can only take 1 sync command at a time. This routine enforces this
1787 * limitation by using a mutex to provide exclusive access to the AFU during
1788 * the sync. This design point requires calling threads to not be on interrupt
1789 * context due to the possibility of sleeping during concurrent sync operations.
1791 * AFU sync operations are only necessary and allowed when the device is
1792 * operating normally. When not operating normally, sync requests can occur as
1793 * part of cleaning up resources associated with an adapter prior to removal.
1794 * In this scenario, these requests are simply ignored (safe due to the AFU
1801 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1802 res_hndl_t res_hndl_u, u8 mode)
1804 struct cxlflash_cfg *cfg = afu->parent;
1805 struct device *dev = &cfg->dev->dev;
1806 struct afu_cmd *cmd = NULL;
1809 static DEFINE_MUTEX(sync_active);
1811 if (cfg->state != STATE_NORMAL) {
1812 dev_dbg(dev, "%s: Sync not required state=%u\n",
1813 __func__, cfg->state);
1817 mutex_lock(&sync_active);
1818 atomic_inc(&afu->cmds_active);
1819 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
1820 if (unlikely(!buf)) {
1821 dev_err(dev, "%s: no memory for command\n", __func__);
1826 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
1827 init_completion(&cmd->cevent);
1830 dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1832 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1833 cmd->rcb.ctx_id = afu->ctx_hndl;
1834 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
1835 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1837 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1838 cmd->rcb.cdb[1] = mode;
1840 /* The cdb is aligned, no unaligned accessors required */
1841 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1842 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1844 rc = afu->send_cmd(afu, cmd);
1848 rc = wait_resp(afu, cmd);
1852 atomic_dec(&afu->cmds_active);
1853 mutex_unlock(&sync_active);
1855 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1860 * afu_reset() - resets the AFU
1861 * @cfg: Internal structure associated with the host.
1863 * Return: 0 on success, -errno on failure
1865 static int afu_reset(struct cxlflash_cfg *cfg)
1867 struct device *dev = &cfg->dev->dev;
1870 /* Stop the context before the reset. Since the context is
1871 * no longer available restart it after the reset is complete
1877 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1882 * drain_ioctls() - wait until all currently executing ioctls have completed
1883 * @cfg: Internal structure associated with the host.
1885 * Obtain write access to read/write semaphore that wraps ioctl
1886 * handling to 'drain' ioctls currently executing.
1888 static void drain_ioctls(struct cxlflash_cfg *cfg)
1890 down_write(&cfg->ioctl_rwsem);
1891 up_write(&cfg->ioctl_rwsem);
1895 * cxlflash_eh_device_reset_handler() - reset a single LUN
1896 * @scp: SCSI command to send.
1899 * SUCCESS as defined in scsi/scsi.h
1900 * FAILED as defined in scsi/scsi.h
1902 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1905 struct Scsi_Host *host = scp->device->host;
1906 struct cxlflash_cfg *cfg = shost_priv(host);
1907 struct device *dev = &cfg->dev->dev;
1908 struct afu *afu = cfg->afu;
1911 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
1912 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
1913 scp->device->channel, scp->device->id, scp->device->lun,
1914 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1915 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1916 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1917 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1920 switch (cfg->state) {
1922 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1927 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1934 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1939 * cxlflash_eh_host_reset_handler() - reset the host adapter
1940 * @scp: SCSI command from stack identifying host.
1942 * Following a reset, the state is evaluated again in case an EEH occurred
1943 * during the reset. In such a scenario, the host reset will either yield
1944 * until the EEH recovery is complete or return success or failure based
1945 * upon the current device state.
1948 * SUCCESS as defined in scsi/scsi.h
1949 * FAILED as defined in scsi/scsi.h
1951 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1955 struct Scsi_Host *host = scp->device->host;
1956 struct cxlflash_cfg *cfg = shost_priv(host);
1957 struct device *dev = &cfg->dev->dev;
1959 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
1960 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
1961 scp->device->channel, scp->device->id, scp->device->lun,
1962 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1963 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1964 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1965 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1967 switch (cfg->state) {
1969 cfg->state = STATE_RESET;
1971 cxlflash_mark_contexts_error(cfg);
1972 rcr = afu_reset(cfg);
1975 cfg->state = STATE_FAILTERM;
1977 cfg->state = STATE_NORMAL;
1978 wake_up_all(&cfg->reset_waitq);
1982 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1983 if (cfg->state == STATE_NORMAL)
1991 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1996 * cxlflash_change_queue_depth() - change the queue depth for the device
1997 * @sdev: SCSI device destined for queue depth change.
1998 * @qdepth: Requested queue depth value to set.
2000 * The requested queue depth is capped to the maximum supported value.
2002 * Return: The actual queue depth set.
2004 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2007 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2008 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2010 scsi_change_queue_depth(sdev, qdepth);
2011 return sdev->queue_depth;
2015 * cxlflash_show_port_status() - queries and presents the current port status
2016 * @port: Desired port for status reporting.
2017 * @afu: AFU owning the specified port.
2018 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2020 * Return: The size of the ASCII string returned in @buf.
2022 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
2026 __be64 __iomem *fc_regs;
2028 if (port >= NUM_FC_PORTS)
2031 fc_regs = &afu->afu_map->global.fc_regs[port][0];
2032 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2033 status &= FC_MTIP_STATUS_MASK;
2035 if (status == FC_MTIP_STATUS_ONLINE)
2036 disp_status = "online";
2037 else if (status == FC_MTIP_STATUS_OFFLINE)
2038 disp_status = "offline";
2040 disp_status = "unknown";
2042 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2046 * port0_show() - queries and presents the current status of port 0
2047 * @dev: Generic device associated with the host owning the port.
2048 * @attr: Device attribute representing the port.
2049 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2051 * Return: The size of the ASCII string returned in @buf.
2053 static ssize_t port0_show(struct device *dev,
2054 struct device_attribute *attr,
2057 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2058 struct afu *afu = cfg->afu;
2060 return cxlflash_show_port_status(0, afu, buf);
2064 * port1_show() - queries and presents the current status of port 1
2065 * @dev: Generic device associated with the host owning the port.
2066 * @attr: Device attribute representing the port.
2067 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2069 * Return: The size of the ASCII string returned in @buf.
2071 static ssize_t port1_show(struct device *dev,
2072 struct device_attribute *attr,
2075 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2076 struct afu *afu = cfg->afu;
2078 return cxlflash_show_port_status(1, afu, buf);
2082 * lun_mode_show() - presents the current LUN mode of the host
2083 * @dev: Generic device associated with the host.
2084 * @attr: Device attribute representing the LUN mode.
2085 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2087 * Return: The size of the ASCII string returned in @buf.
2089 static ssize_t lun_mode_show(struct device *dev,
2090 struct device_attribute *attr, char *buf)
2092 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2093 struct afu *afu = cfg->afu;
2095 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2099 * lun_mode_store() - sets the LUN mode of the host
2100 * @dev: Generic device associated with the host.
2101 * @attr: Device attribute representing the LUN mode.
2102 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2103 * @count: Length of data resizing in @buf.
2105 * The CXL Flash AFU supports a dummy LUN mode where the external
2106 * links and storage are not required. Space on the FPGA is used
2107 * to create 1 or 2 small LUNs which are presented to the system
2108 * as if they were a normal storage device. This feature is useful
2109 * during development and also provides manufacturing with a way
2110 * to test the AFU without an actual device.
2112 * 0 = external LUN[s] (default)
2113 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2114 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2115 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2116 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2118 * Return: The size of the ASCII string returned in @buf.
2120 static ssize_t lun_mode_store(struct device *dev,
2121 struct device_attribute *attr,
2122 const char *buf, size_t count)
2124 struct Scsi_Host *shost = class_to_shost(dev);
2125 struct cxlflash_cfg *cfg = shost_priv(shost);
2126 struct afu *afu = cfg->afu;
2130 rc = kstrtouint(buf, 10, &lun_mode);
2131 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2132 afu->internal_lun = lun_mode;
2135 * When configured for internal LUN, there is only one channel,
2136 * channel number 0, else there will be 2 (default).
2138 if (afu->internal_lun)
2139 shost->max_channel = 0;
2141 shost->max_channel = NUM_FC_PORTS - 1;
2144 scsi_scan_host(cfg->host);
2151 * ioctl_version_show() - presents the current ioctl version of the host
2152 * @dev: Generic device associated with the host.
2153 * @attr: Device attribute representing the ioctl version.
2154 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2156 * Return: The size of the ASCII string returned in @buf.
2158 static ssize_t ioctl_version_show(struct device *dev,
2159 struct device_attribute *attr, char *buf)
2161 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2165 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2166 * @port: Desired port for status reporting.
2167 * @afu: AFU owning the specified port.
2168 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2170 * Return: The size of the ASCII string returned in @buf.
2172 static ssize_t cxlflash_show_port_lun_table(u32 port,
2178 __be64 __iomem *fc_port;
2180 if (port >= NUM_FC_PORTS)
2183 fc_port = &afu->afu_map->global.fc_port[port][0];
2185 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2186 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2187 "%03d: %016llx\n", i, readq_be(&fc_port[i]));
2192 * port0_lun_table_show() - presents the current LUN table of port 0
2193 * @dev: Generic device associated with the host owning the port.
2194 * @attr: Device attribute representing the port.
2195 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2197 * Return: The size of the ASCII string returned in @buf.
2199 static ssize_t port0_lun_table_show(struct device *dev,
2200 struct device_attribute *attr,
2203 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2204 struct afu *afu = cfg->afu;
2206 return cxlflash_show_port_lun_table(0, afu, buf);
2210 * port1_lun_table_show() - presents the current LUN table of port 1
2211 * @dev: Generic device associated with the host owning the port.
2212 * @attr: Device attribute representing the port.
2213 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2215 * Return: The size of the ASCII string returned in @buf.
2217 static ssize_t port1_lun_table_show(struct device *dev,
2218 struct device_attribute *attr,
2221 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2222 struct afu *afu = cfg->afu;
2224 return cxlflash_show_port_lun_table(1, afu, buf);
2228 * mode_show() - presents the current mode of the device
2229 * @dev: Generic device associated with the device.
2230 * @attr: Device attribute representing the device mode.
2231 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2233 * Return: The size of the ASCII string returned in @buf.
2235 static ssize_t mode_show(struct device *dev,
2236 struct device_attribute *attr, char *buf)
2238 struct scsi_device *sdev = to_scsi_device(dev);
2240 return scnprintf(buf, PAGE_SIZE, "%s\n",
2241 sdev->hostdata ? "superpipe" : "legacy");
2247 static DEVICE_ATTR_RO(port0);
2248 static DEVICE_ATTR_RO(port1);
2249 static DEVICE_ATTR_RW(lun_mode);
2250 static DEVICE_ATTR_RO(ioctl_version);
2251 static DEVICE_ATTR_RO(port0_lun_table);
2252 static DEVICE_ATTR_RO(port1_lun_table);
2254 static struct device_attribute *cxlflash_host_attrs[] = {
2258 &dev_attr_ioctl_version,
2259 &dev_attr_port0_lun_table,
2260 &dev_attr_port1_lun_table,
2267 static DEVICE_ATTR_RO(mode);
2269 static struct device_attribute *cxlflash_dev_attrs[] = {
2277 static struct scsi_host_template driver_template = {
2278 .module = THIS_MODULE,
2279 .name = CXLFLASH_ADAPTER_NAME,
2280 .info = cxlflash_driver_info,
2281 .ioctl = cxlflash_ioctl,
2282 .proc_name = CXLFLASH_NAME,
2283 .queuecommand = cxlflash_queuecommand,
2284 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2285 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2286 .change_queue_depth = cxlflash_change_queue_depth,
2287 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2288 .can_queue = CXLFLASH_MAX_CMDS,
2289 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
2291 .sg_tablesize = 1, /* No scatter gather support */
2292 .max_sectors = CXLFLASH_MAX_SECTORS,
2293 .use_clustering = ENABLE_CLUSTERING,
2294 .shost_attrs = cxlflash_host_attrs,
2295 .sdev_attrs = cxlflash_dev_attrs,
2299 * Device dependent values
2301 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2303 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2304 CXLFLASH_NOTIFY_SHUTDOWN };
2305 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
2306 CXLFLASH_NOTIFY_SHUTDOWN };
2309 * PCI device binding table
2311 static struct pci_device_id cxlflash_pci_table[] = {
2312 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2313 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2314 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2315 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2316 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
2317 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
2321 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2324 * cxlflash_worker_thread() - work thread handler for the AFU
2325 * @work: Work structure contained within cxlflash associated with host.
2327 * Handles the following events:
2328 * - Link reset which cannot be performed on interrupt context due to
2329 * blocking up to a few seconds
2332 static void cxlflash_worker_thread(struct work_struct *work)
2334 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2336 struct afu *afu = cfg->afu;
2337 struct device *dev = &cfg->dev->dev;
2341 /* Avoid MMIO if the device has failed */
2343 if (cfg->state != STATE_NORMAL)
2346 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2348 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2349 port = cfg->lr_port;
2351 dev_err(dev, "%s: invalid port index %d\n",
2354 spin_unlock_irqrestore(cfg->host->host_lock,
2357 /* The reset can block... */
2358 afu_link_reset(afu, port,
2359 &afu->afu_map->global.fc_regs[port][0]);
2360 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2363 cfg->lr_state = LINK_RESET_COMPLETE;
2366 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2368 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2369 scsi_scan_host(cfg->host);
2373 * cxlflash_probe() - PCI entry point to add host
2374 * @pdev: PCI device associated with the host.
2375 * @dev_id: PCI device id associated with device.
2377 * Return: 0 on success, -errno on failure
2379 static int cxlflash_probe(struct pci_dev *pdev,
2380 const struct pci_device_id *dev_id)
2382 struct Scsi_Host *host;
2383 struct cxlflash_cfg *cfg = NULL;
2384 struct device *dev = &pdev->dev;
2385 struct dev_dependent_vals *ddv;
2388 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2389 __func__, pdev->irq);
2391 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2392 driver_template.max_sectors = ddv->max_sectors;
2394 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2396 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
2401 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2402 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2403 host->max_channel = NUM_FC_PORTS - 1;
2404 host->unique_id = host->host_no;
2405 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2407 cfg = shost_priv(host);
2409 rc = alloc_mem(cfg);
2411 dev_err(dev, "%s: alloc_mem failed\n", __func__);
2413 scsi_host_put(cfg->host);
2417 cfg->init_state = INIT_STATE_NONE;
2419 cfg->cxl_fops = cxlflash_cxl_fops;
2422 * The promoted LUNs move to the top of the LUN table. The rest stay
2423 * on the bottom half. The bottom half grows from the end
2424 * (index = 255), whereas the top half grows from the beginning
2427 cfg->promote_lun_index = 0;
2428 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2429 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2431 cfg->dev_id = (struct pci_device_id *)dev_id;
2433 init_waitqueue_head(&cfg->tmf_waitq);
2434 init_waitqueue_head(&cfg->reset_waitq);
2436 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2437 cfg->lr_state = LINK_RESET_INVALID;
2439 spin_lock_init(&cfg->tmf_slock);
2440 mutex_init(&cfg->ctx_tbl_list_mutex);
2441 mutex_init(&cfg->ctx_recovery_mutex);
2442 init_rwsem(&cfg->ioctl_rwsem);
2443 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2444 INIT_LIST_HEAD(&cfg->lluns);
2446 pci_set_drvdata(pdev, cfg);
2448 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2452 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
2455 cfg->init_state = INIT_STATE_PCI;
2459 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
2462 cfg->init_state = INIT_STATE_AFU;
2464 rc = init_scsi(cfg);
2466 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
2469 cfg->init_state = INIT_STATE_SCSI;
2472 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2476 cxlflash_remove(pdev);
2481 * cxlflash_pci_error_detected() - called when a PCI error is detected
2482 * @pdev: PCI device struct.
2483 * @state: PCI channel state.
2485 * When an EEH occurs during an active reset, wait until the reset is
2486 * complete and then take action based upon the device state.
2488 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2490 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2491 pci_channel_state_t state)
2494 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2495 struct device *dev = &cfg->dev->dev;
2497 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2500 case pci_channel_io_frozen:
2501 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2502 if (cfg->state == STATE_FAILTERM)
2503 return PCI_ERS_RESULT_DISCONNECT;
2505 cfg->state = STATE_RESET;
2506 scsi_block_requests(cfg->host);
2508 rc = cxlflash_mark_contexts_error(cfg);
2510 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
2513 return PCI_ERS_RESULT_NEED_RESET;
2514 case pci_channel_io_perm_failure:
2515 cfg->state = STATE_FAILTERM;
2516 wake_up_all(&cfg->reset_waitq);
2517 scsi_unblock_requests(cfg->host);
2518 return PCI_ERS_RESULT_DISCONNECT;
2522 return PCI_ERS_RESULT_NEED_RESET;
2526 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2527 * @pdev: PCI device struct.
2529 * This routine is called by the pci error recovery code after the PCI
2530 * slot has been reset, just before we should resume normal operations.
2532 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2534 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2537 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2538 struct device *dev = &cfg->dev->dev;
2540 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2544 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
2545 return PCI_ERS_RESULT_DISCONNECT;
2548 return PCI_ERS_RESULT_RECOVERED;
2552 * cxlflash_pci_resume() - called when normal operation can resume
2553 * @pdev: PCI device struct
2555 static void cxlflash_pci_resume(struct pci_dev *pdev)
2557 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2558 struct device *dev = &cfg->dev->dev;
2560 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2562 cfg->state = STATE_NORMAL;
2563 wake_up_all(&cfg->reset_waitq);
2564 scsi_unblock_requests(cfg->host);
2567 static const struct pci_error_handlers cxlflash_err_handler = {
2568 .error_detected = cxlflash_pci_error_detected,
2569 .slot_reset = cxlflash_pci_slot_reset,
2570 .resume = cxlflash_pci_resume,
2574 * PCI device structure
2576 static struct pci_driver cxlflash_driver = {
2577 .name = CXLFLASH_NAME,
2578 .id_table = cxlflash_pci_table,
2579 .probe = cxlflash_probe,
2580 .remove = cxlflash_remove,
2581 .shutdown = cxlflash_remove,
2582 .err_handler = &cxlflash_err_handler,
2586 * init_cxlflash() - module entry point
2588 * Return: 0 on success, -errno on failure
2590 static int __init init_cxlflash(void)
2592 cxlflash_list_init();
2594 return pci_register_driver(&cxlflash_driver);
2598 * exit_cxlflash() - module exit point
2600 static void __exit exit_cxlflash(void)
2602 cxlflash_term_global_luns();
2603 cxlflash_free_errpage();
2605 pci_unregister_driver(&cxlflash_driver);
2608 module_init(init_cxlflash);
2609 module_exit(exit_cxlflash);