1 /* Copyright 2012 STEC, Inc.
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/scatterlist.h>
40 #include <linux/aer.h>
41 #include <linux/ctype.h>
42 #include <linux/wait.h>
43 #include <linux/uio.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_cmnd.h>
50 #include <linux/uaccess.h>
51 #include <asm-generic/unaligned.h>
53 #include "skd_s1120.h"
55 static int skd_dbg_level;
56 static int skd_isr_comp_limit = 4;
62 STEC_LINK_UNKNOWN = 0xFF
66 SKD_FLUSH_INITIALIZER,
67 SKD_FLUSH_ZERO_SIZE_FIRST,
68 SKD_FLUSH_DATA_SECOND,
71 #define DPRINTK(skdev, fmt, args ...) \
73 if (unlikely((skdev)->dbg_level > 0)) { \
74 pr_err("%s:%s:%d " fmt, (skdev)->name, \
75 __func__, __LINE__, ## args); \
79 #define SKD_ASSERT(expr) \
81 if (unlikely(!(expr))) { \
82 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
83 # expr, __FILE__, __func__, __LINE__); \
87 #define VPRINTK(skdev, fmt, args ...) \
89 if (unlikely((skdev)->dbg_level > 1)) { \
90 pr_err("%s:%s:%d " fmt, (skdev)->name, \
91 __func__, __LINE__, ## args); \
96 #define DRV_NAME "skd"
97 #define DRV_VERSION "2.2.1"
98 #define DRV_BUILD_ID "0260"
99 #define PFX DRV_NAME ": "
100 #define DRV_BIN_VERSION 0x100
101 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
103 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
104 MODULE_LICENSE("Dual BSD/GPL");
106 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block/BIO driver (b" DRV_BUILD_ID ")");
107 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
109 #define PCI_VENDOR_ID_STEC 0x1B39
110 #define PCI_DEVICE_ID_S1120 0x0001
112 #define SKD_FUA_NV (1 << 1)
113 #define SKD_MINORS_PER_DEVICE 16
115 #define SKD_MAX_QUEUE_DEPTH 200u
117 #define SKD_PAUSE_TIMEOUT (5 * 1000)
119 #define SKD_N_FITMSG_BYTES (512u)
121 #define SKD_N_SPECIAL_CONTEXT 32u
122 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
124 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
125 * 128KB limit. That allows 4096*4K = 16M xfer size
127 #define SKD_N_SG_PER_REQ_DEFAULT 256u
128 #define SKD_N_SG_PER_SPECIAL 256u
130 #define SKD_N_COMPLETION_ENTRY 256u
131 #define SKD_N_READ_CAP_BYTES (8u)
133 #define SKD_N_INTERNAL_BYTES (512u)
135 /* 5 bits of uniqifier, 0xF800 */
136 #define SKD_ID_INCR (0x400)
137 #define SKD_ID_TABLE_MASK (3u << 8u)
138 #define SKD_ID_RW_REQUEST (0u << 8u)
139 #define SKD_ID_INTERNAL (1u << 8u)
140 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
141 #define SKD_ID_FIT_MSG (3u << 8u)
142 #define SKD_ID_SLOT_MASK 0x00FFu
143 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
145 #define SKD_N_TIMEOUT_SLOT 4u
146 #define SKD_TIMEOUT_SLOT_MASK 3u
148 #define SKD_N_MAX_SECTORS 2048u
150 #define SKD_MAX_RETRIES 2u
152 #define SKD_TIMER_SECONDS(seconds) (seconds)
153 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
155 #define INQ_STD_NBYTES 36
156 #define SKD_DISCARD_CDB_LENGTH 24
158 enum skd_drvr_state {
162 SKD_DRVR_STATE_STARTING,
163 SKD_DRVR_STATE_ONLINE,
164 SKD_DRVR_STATE_PAUSING,
165 SKD_DRVR_STATE_PAUSED,
166 SKD_DRVR_STATE_DRAINING_TIMEOUT,
167 SKD_DRVR_STATE_RESTARTING,
168 SKD_DRVR_STATE_RESUMING,
169 SKD_DRVR_STATE_STOPPING,
170 SKD_DRVR_STATE_FAULT,
171 SKD_DRVR_STATE_DISAPPEARED,
172 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
173 SKD_DRVR_STATE_BUSY_ERASE,
174 SKD_DRVR_STATE_BUSY_SANITIZE,
175 SKD_DRVR_STATE_BUSY_IMMINENT,
176 SKD_DRVR_STATE_WAIT_BOOT,
177 SKD_DRVR_STATE_SYNCING,
180 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
181 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
182 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
183 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
184 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
185 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
186 #define SKD_START_WAIT_SECONDS 90u
192 SKD_REQ_STATE_COMPLETED,
193 SKD_REQ_STATE_TIMEOUT,
194 SKD_REQ_STATE_ABORTED,
197 enum skd_fit_msg_state {
202 enum skd_check_status_action {
203 SKD_CHECK_STATUS_REPORT_GOOD,
204 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
205 SKD_CHECK_STATUS_REQUEUE_REQUEST,
206 SKD_CHECK_STATUS_REPORT_ERROR,
207 SKD_CHECK_STATUS_BUSY_IMMINENT,
210 struct skd_fitmsg_context {
211 enum skd_fit_msg_state state;
213 struct skd_fitmsg_context *next;
222 dma_addr_t mb_dma_address;
225 struct skd_request_context {
226 enum skd_req_state state;
228 struct skd_request_context *next;
235 unsigned long start_time;
241 struct scatterlist *sg;
245 struct fit_sg_descriptor *sksg_list;
246 dma_addr_t sksg_dma_address;
248 struct fit_completion_entry_v1 completion;
250 struct fit_comp_error_info err_info;
253 #define SKD_DATA_DIR_HOST_TO_CARD 1
254 #define SKD_DATA_DIR_CARD_TO_HOST 2
255 #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
257 struct skd_special_context {
258 struct skd_request_context req;
263 dma_addr_t db_dma_address;
266 dma_addr_t mb_dma_address;
279 struct sg_iovec *iov;
280 struct sg_iovec no_iov_iov;
282 struct skd_special_context *skspcl;
285 typedef enum skd_irq_type {
291 #define SKD_MAX_BARS 2
294 volatile void __iomem *mem_map[SKD_MAX_BARS];
295 resource_size_t mem_phys[SKD_MAX_BARS];
296 u32 mem_size[SKD_MAX_BARS];
298 skd_irq_type_t irq_type;
300 struct skd_msix_entry *msix_entries;
302 struct pci_dev *pdev;
303 int pcie_error_reporting_is_enabled;
306 struct gendisk *disk;
307 struct request_queue *queue;
308 struct device *class_dev;
312 atomic_t device_count;
318 enum skd_drvr_state state;
322 u32 cur_max_queue_depth;
323 u32 queue_low_water_mark;
324 u32 dev_max_queue_depth;
326 u32 num_fitmsg_context;
329 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
331 struct skd_fitmsg_context *skmsg_free_list;
332 struct skd_fitmsg_context *skmsg_table;
334 struct skd_request_context *skreq_free_list;
335 struct skd_request_context *skreq_table;
337 struct skd_special_context *skspcl_free_list;
338 struct skd_special_context *skspcl_table;
340 struct skd_special_context internal_skspcl;
341 u32 read_cap_blocksize;
342 u32 read_cap_last_lba;
343 int read_cap_is_valid;
344 int inquiry_is_valid;
345 u8 inq_serial_num[13]; /*12 chars plus null term */
346 u8 id_str[80]; /* holds a composite name (pci + sernum) */
350 struct fit_completion_entry_v1 *skcomp_table;
351 struct fit_comp_error_info *skerr_table;
352 dma_addr_t cq_dma_address;
354 wait_queue_head_t waitq;
356 struct timer_list timer;
367 u32 connect_time_stamp;
369 #define SKD_MAX_CONNECT_RETRIES 16
375 struct work_struct completion_worker;
377 struct bio_list bio_queue;
380 struct list_head flush_list;
383 #define SKD_FLUSH_JOB "skd-flush-jobs"
384 struct kmem_cache *skd_flush_slab;
387 * These commands hold "nonzero size FLUSH bios",
388 * which are enqueud in skdev->flush_list during
389 * completion of "zero size FLUSH commands".
390 * It will be active in biomode.
392 struct skd_flush_cmd {
394 struct list_head flist;
397 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
398 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
399 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
401 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
405 if (likely(skdev->dbg_level < 2))
406 return readl(skdev->mem_map[1] + offset);
409 val = readl(skdev->mem_map[1] + offset);
411 VPRINTK(skdev, "offset %x = %x\n", offset, val);
417 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
420 if (likely(skdev->dbg_level < 2)) {
421 writel(val, skdev->mem_map[1] + offset);
425 writel(val, skdev->mem_map[1] + offset);
427 VPRINTK(skdev, "offset %x = %x\n", offset, val);
431 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
434 if (likely(skdev->dbg_level < 2)) {
435 writeq(val, skdev->mem_map[1] + offset);
439 writeq(val, skdev->mem_map[1] + offset);
441 VPRINTK(skdev, "offset %x = %016llx\n", offset, val);
446 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
447 static int skd_isr_type = SKD_IRQ_DEFAULT;
449 module_param(skd_isr_type, int, 0444);
450 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
451 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
453 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
454 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
456 module_param(skd_max_req_per_msg, int, 0444);
457 MODULE_PARM_DESC(skd_max_req_per_msg,
458 "Maximum SCSI requests packed in a single message."
459 " (1-14, default==1)");
461 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
462 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
463 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
465 module_param(skd_max_queue_depth, int, 0444);
466 MODULE_PARM_DESC(skd_max_queue_depth,
467 "Maximum SCSI requests issued to s1120."
468 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
470 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
471 module_param(skd_sgs_per_request, int, 0444);
472 MODULE_PARM_DESC(skd_sgs_per_request,
473 "Maximum SG elements per block request."
474 " (1-4096, default==256)");
476 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
477 module_param(skd_max_pass_thru, int, 0444);
478 MODULE_PARM_DESC(skd_max_pass_thru,
479 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
481 module_param(skd_dbg_level, int, 0444);
482 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
484 module_param(skd_isr_comp_limit, int, 0444);
485 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
488 module_param(skd_bio, int, 0444);
489 MODULE_PARM_DESC(skd_bio,
490 "Register as a bio device instead of block (0, 1) default=0");
492 /* Major device number dynamically assigned. */
493 static u32 skd_major;
495 static struct skd_device *skd_construct(struct pci_dev *pdev);
496 static void skd_destruct(struct skd_device *skdev);
497 static const struct block_device_operations skd_blockdev_ops;
498 static void skd_send_fitmsg(struct skd_device *skdev,
499 struct skd_fitmsg_context *skmsg);
500 static void skd_send_special_fitmsg(struct skd_device *skdev,
501 struct skd_special_context *skspcl);
502 static void skd_request_fn(struct request_queue *rq);
503 static void skd_end_request(struct skd_device *skdev,
504 struct skd_request_context *skreq, int error);
505 static int skd_preop_sg_list(struct skd_device *skdev,
506 struct skd_request_context *skreq);
507 static void skd_postop_sg_list(struct skd_device *skdev,
508 struct skd_request_context *skreq);
510 static void skd_restart_device(struct skd_device *skdev);
511 static int skd_quiesce_dev(struct skd_device *skdev);
512 static int skd_unquiesce_dev(struct skd_device *skdev);
513 static void skd_release_special(struct skd_device *skdev,
514 struct skd_special_context *skspcl);
515 static void skd_disable_interrupts(struct skd_device *skdev);
516 static void skd_isr_fwstate(struct skd_device *skdev);
517 static void skd_recover_requests(struct skd_device *skdev, int requeue);
518 static void skd_soft_reset(struct skd_device *skdev);
520 static const char *skd_name(struct skd_device *skdev);
521 const char *skd_drive_state_to_str(int state);
522 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
523 static void skd_log_skdev(struct skd_device *skdev, const char *event);
524 static void skd_log_skmsg(struct skd_device *skdev,
525 struct skd_fitmsg_context *skmsg, const char *event);
526 static void skd_log_skreq(struct skd_device *skdev,
527 struct skd_request_context *skreq, const char *event);
529 /* FLUSH FUA flag handling. */
530 static int skd_flush_cmd_enqueue(struct skd_device *, void *);
531 static void *skd_flush_cmd_dequeue(struct skd_device *);
535 *****************************************************************************
536 * READ/WRITE REQUESTS
537 *****************************************************************************
539 static void skd_stop_queue(struct skd_device *skdev)
542 blk_stop_queue(skdev->queue);
544 skdev->queue_stopped = 1;
547 static void skd_unstop_queue(struct skd_device *skdev)
550 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
552 skdev->queue_stopped = 0;
555 static void skd_start_queue(struct skd_device *skdev)
558 blk_start_queue(skdev->queue);
560 pr_err("(%s): Starting queue\n", skd_name(skdev));
561 skdev->queue_stopped = 0;
562 skd_request_fn(skdev->queue);
566 static int skd_queue_stopped(struct skd_device *skdev)
569 return blk_queue_stopped(skdev->queue);
571 return skdev->queue_stopped;
574 static void skd_fail_all_pending_blk(struct skd_device *skdev)
576 struct request_queue *q = skdev->queue;
580 req = blk_peek_request(q);
583 blk_start_request(req);
584 __blk_end_request_all(req, -EIO);
588 static void skd_fail_all_pending_bio(struct skd_device *skdev)
594 bio = bio_list_pop(&skdev->bio_queue);
599 bio_endio(bio, error);
603 static void skd_fail_all_pending(struct skd_device *skdev)
606 skd_fail_all_pending_blk(skdev);
608 skd_fail_all_pending_bio(skdev);
611 static void skd_make_request(struct request_queue *q, struct bio *bio)
613 struct skd_device *skdev = q->queuedata;
616 spin_lock_irqsave(&skdev->lock, flags);
618 bio_list_add(&skdev->bio_queue, bio);
619 skd_request_fn(skdev->queue);
621 spin_unlock_irqrestore(&skdev->lock, flags);
625 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
626 int data_dir, unsigned lba,
629 if (data_dir == READ)
630 scsi_req->cdb[0] = 0x28;
632 scsi_req->cdb[0] = 0x2a;
634 scsi_req->cdb[1] = 0;
635 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
636 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
637 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
638 scsi_req->cdb[5] = (lba & 0xff);
639 scsi_req->cdb[6] = 0;
640 scsi_req->cdb[7] = (count & 0xff00) >> 8;
641 scsi_req->cdb[8] = count & 0xff;
642 scsi_req->cdb[9] = 0;
646 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
647 struct skd_request_context *skreq)
649 skreq->flush_cmd = 1;
651 scsi_req->cdb[0] = 0x35;
652 scsi_req->cdb[1] = 0;
653 scsi_req->cdb[2] = 0;
654 scsi_req->cdb[3] = 0;
655 scsi_req->cdb[4] = 0;
656 scsi_req->cdb[5] = 0;
657 scsi_req->cdb[6] = 0;
658 scsi_req->cdb[7] = 0;
659 scsi_req->cdb[8] = 0;
660 scsi_req->cdb[9] = 0;
664 skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
665 struct skd_request_context *skreq,
673 buf = page_address(page);
674 len = SKD_DISCARD_CDB_LENGTH;
676 scsi_req->cdb[0] = UNMAP;
677 scsi_req->cdb[8] = len;
679 put_unaligned_be16(6 + 16, &buf[0]);
680 put_unaligned_be16(16, &buf[2]);
681 put_unaligned_be64(lba, &buf[8]);
682 put_unaligned_be32(count, &buf[16]);
686 blk_add_request_payload(req, page, len);
689 skreq->bio->bi_io_vec->bv_page = page;
690 skreq->bio->bi_io_vec->bv_offset = 0;
691 skreq->bio->bi_io_vec->bv_len = len;
693 skreq->bio->bi_vcnt = 1;
694 skreq->bio->bi_phys_segments = 1;
698 static void skd_request_fn_not_online(struct request_queue *q);
700 static void skd_request_fn(struct request_queue *q)
702 struct skd_device *skdev = q->queuedata;
703 struct skd_fitmsg_context *skmsg = NULL;
704 struct fit_msg_hdr *fmh = NULL;
705 struct skd_request_context *skreq;
706 struct request *req = NULL;
707 struct bio *bio = NULL;
708 struct skd_scsi_request *scsi_req;
710 unsigned long io_flags;
723 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
724 skd_request_fn_not_online(q);
728 if (skd_queue_stopped(skdev)) {
729 if (skdev->skmsg_free_list == NULL ||
730 skdev->skreq_free_list == NULL ||
731 skdev->in_flight >= skdev->queue_low_water_mark)
732 /* There is still some kind of shortage */
735 skd_unstop_queue(skdev);
740 * - There are no more native requests
741 * - There are already the maximum number of requests in progress
742 * - There are no more skd_request_context entries
743 * - There are no more FIT msg buffers
750 req = blk_peek_request(q);
752 /* Are there any native requests to start? */
756 lba = (u32)blk_rq_pos(req);
757 count = blk_rq_sectors(req);
758 data_dir = rq_data_dir(req);
759 io_flags = req->cmd_flags;
761 if (io_flags & REQ_FLUSH)
764 if (io_flags & REQ_FUA)
768 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
769 req, lba, lba, count, count, data_dir);
771 if (!list_empty(&skdev->flush_list)) {
772 /* Process data part of FLUSH request. */
773 bio = (struct bio *)skd_flush_cmd_dequeue(skdev);
775 VPRINTK(skdev, "processing FLUSH request with data.\n");
777 /* peek at our bio queue */
778 bio = bio_list_peek(&skdev->bio_queue);
781 /* Are there any native requests to start? */
785 lba = (u32)bio->bi_sector;
786 count = bio_sectors(bio);
787 data_dir = bio_data_dir(bio);
788 io_flags = bio->bi_rw;
791 "new bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
792 bio, lba, lba, count, count, data_dir);
794 if (io_flags & REQ_FLUSH)
797 if (io_flags & REQ_FUA)
801 /* At this point we know there is a request
802 * (from our bio q or req q depending on the way
803 * the driver is built do checks for resources.
806 /* Are too many requets already in progress? */
807 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
808 VPRINTK(skdev, "qdepth %d, limit %d\n",
809 skdev->in_flight, skdev->cur_max_queue_depth);
813 /* Is a skd_request_context available? */
814 skreq = skdev->skreq_free_list;
816 VPRINTK(skdev, "Out of req=%p\n", q);
819 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
820 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
822 /* Now we check to see if we can get a fit msg */
824 if (skdev->skmsg_free_list == NULL) {
825 VPRINTK(skdev, "Out of msg\n");
830 skreq->flush_cmd = 0;
832 skreq->sg_byte_count = 0;
833 skreq->discard_page = 0;
836 * OK to now dequeue request from either bio or q.
838 * At this point we are comitted to either start or reject
839 * the native request. Note that skd_request_context is
840 * available but is still at the head of the free list.
843 blk_start_request(req);
845 skreq->fitmsg_id = 0;
847 if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) {
850 skreq->bio = bio_list_pop(&skdev->bio_queue);
851 SKD_ASSERT(skreq->bio == bio);
852 skreq->start_time = jiffies;
853 part_inc_in_flight(&skdev->disk->part0,
857 skreq->fitmsg_id = 0;
860 /* Either a FIT msg is in progress or we have to start one. */
862 /* Are there any FIT msg buffers available? */
863 skmsg = skdev->skmsg_free_list;
865 VPRINTK(skdev, "Out of msg skdev=%p\n", skdev);
868 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
869 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
871 skdev->skmsg_free_list = skmsg->next;
873 skmsg->state = SKD_MSG_STATE_BUSY;
874 skmsg->id += SKD_ID_INCR;
876 /* Initialize the FIT msg header */
877 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
878 memset(fmh, 0, sizeof(*fmh));
879 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
880 skmsg->length = sizeof(*fmh);
883 skreq->fitmsg_id = skmsg->id;
886 * Note that a FIT msg may have just been started
887 * but contains no SoFIT requests yet.
891 * Transcode the request, checking as we go. The outcome of
892 * the transcoding is represented by the error variable.
894 cmd_ptr = &skmsg->msg_buf[skmsg->length];
895 memset(cmd_ptr, 0, 32);
897 be_lba = cpu_to_be32(lba);
898 be_count = cpu_to_be32(count);
899 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
900 cmdctxt = skreq->id + SKD_ID_INCR;
903 scsi_req->hdr.tag = cmdctxt;
904 scsi_req->hdr.sg_list_dma_address = be_dmaa;
906 if (data_dir == READ)
907 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
909 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
911 if (io_flags & REQ_DISCARD) {
912 page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
914 pr_err("request_fn:Page allocation failed.\n");
915 skd_end_request(skdev, skreq, -ENOMEM);
918 skreq->discard_page = 1;
919 skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
921 } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
922 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
923 SKD_ASSERT(skreq->flush_cmd == 1);
926 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
930 scsi_req->cdb[1] |= SKD_FUA_NV;
932 if ((!skd_bio && !req->bio) ||
933 (skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST))
936 error = skd_preop_sg_list(skdev, skreq);
940 * Complete the native request with error.
941 * Note that the request context is still at the
942 * head of the free list, and that the SoFIT request
943 * was encoded into the FIT msg buffer but the FIT
944 * msg length has not been updated. In short, the
945 * only resource that has been allocated but might
946 * not be used is that the FIT msg could be empty.
948 DPRINTK(skdev, "error Out\n");
949 skd_end_request(skdev, skreq, error);
954 scsi_req->hdr.sg_list_len_bytes =
955 cpu_to_be32(skreq->sg_byte_count);
957 /* Complete resource allocations. */
958 skdev->skreq_free_list = skreq->next;
959 skreq->state = SKD_REQ_STATE_BUSY;
960 skreq->id += SKD_ID_INCR;
962 skmsg->length += sizeof(struct skd_scsi_request);
963 fmh->num_protocol_cmds_coalesced++;
966 * Update the active request counts.
967 * Capture the timeout timestamp.
969 skreq->timeout_stamp = skdev->timeout_stamp;
970 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
971 skdev->timeout_slot[timo_slot]++;
973 VPRINTK(skdev, "req=0x%x busy=%d\n",
974 skreq->id, skdev->in_flight);
977 * If the FIT msg buffer is full send it.
979 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
980 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
981 skd_send_fitmsg(skdev, skmsg);
988 * Is a FIT msg in progress? If it is empty put the buffer back
989 * on the free list. If it is non-empty send what we got.
990 * This minimizes latency when there are fewer requests than
991 * what fits in a FIT msg.
994 /* Bigger than just a FIT msg header? */
995 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
996 VPRINTK(skdev, "sending msg=%p, len %d\n",
997 skmsg, skmsg->length);
998 skd_send_fitmsg(skdev, skmsg);
1001 * The FIT msg is empty. It means we got started
1002 * on the msg, but the requests were rejected.
1004 skmsg->state = SKD_MSG_STATE_IDLE;
1005 skmsg->id += SKD_ID_INCR;
1006 skmsg->next = skdev->skmsg_free_list;
1007 skdev->skmsg_free_list = skmsg;
1014 * If req is non-NULL it means there is something to do but
1015 * we are out of a resource.
1017 if (((!skd_bio) && req) ||
1018 ((skd_bio) && bio_list_peek(&skdev->bio_queue)))
1019 skd_stop_queue(skdev);
1022 static void skd_end_request_blk(struct skd_device *skdev,
1023 struct skd_request_context *skreq, int error)
1025 struct request *req = skreq->req;
1026 unsigned int io_flags = req->cmd_flags;
1028 if ((io_flags & REQ_DISCARD) &&
1029 (skreq->discard_page == 1)) {
1030 VPRINTK(skdev, "skd_end_request_blk, free the page!");
1031 free_page((unsigned long)req->buffer);
1035 if (unlikely(error)) {
1036 struct request *req = skreq->req;
1037 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
1038 u32 lba = (u32)blk_rq_pos(req);
1039 u32 count = blk_rq_sectors(req);
1041 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
1042 skd_name(skdev), cmd, lba, count, skreq->id);
1044 VPRINTK(skdev, "id=0x%x error=%d\n", skreq->id, error);
1046 __blk_end_request_all(skreq->req, error);
1049 static int skd_preop_sg_list_blk(struct skd_device *skdev,
1050 struct skd_request_context *skreq)
1052 struct request *req = skreq->req;
1053 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1054 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1055 struct scatterlist *sg = &skreq->sg[0];
1059 skreq->sg_byte_count = 0;
1061 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
1062 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
1064 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
1069 * Map scatterlist to PCI bus addresses.
1070 * Note PCI might change the number of entries.
1072 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
1076 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
1080 for (i = 0; i < n_sg; i++) {
1081 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1082 u32 cnt = sg_dma_len(&sg[i]);
1083 uint64_t dma_addr = sg_dma_address(&sg[i]);
1085 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
1086 sgd->byte_count = cnt;
1087 skreq->sg_byte_count += cnt;
1088 sgd->host_side_addr = dma_addr;
1089 sgd->dev_side_addr = 0;
1092 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
1093 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
1095 if (unlikely(skdev->dbg_level > 1)) {
1096 VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1097 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1098 for (i = 0; i < n_sg; i++) {
1099 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1100 VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x "
1101 "addr=0x%llx next=0x%llx\n",
1102 i, sgd->byte_count, sgd->control,
1103 sgd->host_side_addr, sgd->next_desc_ptr);
1110 static void skd_postop_sg_list_blk(struct skd_device *skdev,
1111 struct skd_request_context *skreq)
1113 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1114 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1117 * restore the next ptr for next IO request so we
1118 * don't have to set it every time.
1120 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
1121 skreq->sksg_dma_address +
1122 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
1123 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
1126 static void skd_end_request_bio(struct skd_device *skdev,
1127 struct skd_request_context *skreq, int error)
1129 struct bio *bio = skreq->bio;
1130 int rw = bio_data_dir(bio);
1131 unsigned long io_flags = bio->bi_rw;
1133 if ((io_flags & REQ_DISCARD) &&
1134 (skreq->discard_page == 1)) {
1135 VPRINTK(skdev, "biomode: skd_end_request: freeing DISCARD page.\n");
1136 free_page((unsigned long)page_address(bio->bi_io_vec->bv_page));
1139 if (unlikely(error)) {
1140 u32 lba = (u32)skreq->bio->bi_sector;
1141 u32 count = bio_sectors(skreq->bio);
1142 char *cmd = (rw == WRITE) ? "write" : "read";
1143 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
1144 skd_name(skdev), cmd, lba, count, skreq->id);
1147 int cpu = part_stat_lock();
1149 if (likely(!error)) {
1150 part_stat_inc(cpu, &skdev->disk->part0, ios[rw]);
1151 part_stat_add(cpu, &skdev->disk->part0, sectors[rw],
1154 part_stat_add(cpu, &skdev->disk->part0, ticks[rw],
1155 jiffies - skreq->start_time);
1156 part_dec_in_flight(&skdev->disk->part0, rw);
1160 VPRINTK(skdev, "id=0x%x error=%d\n", skreq->id, error);
1162 bio_endio(skreq->bio, error);
1165 static int skd_preop_sg_list_bio(struct skd_device *skdev,
1166 struct skd_request_context *skreq)
1168 struct bio *bio = skreq->bio;
1169 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1170 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1173 struct bio_vec *vec;
1174 struct fit_sg_descriptor *sgd;
1178 unsigned int io_flags = 0;
1179 io_flags |= bio->bi_rw;
1181 skreq->sg_byte_count = 0;
1182 n_sg = skreq->n_sg = skreq->bio->bi_vcnt;
1187 if (n_sg > skdev->sgs_per_request) {
1188 pr_err("(%s): sg overflow n=%d\n",
1189 skd_name(skdev), n_sg);
1194 for (i = 0; i < skreq->n_sg; i++) {
1195 vec = bio_iovec_idx(bio, i);
1196 dma_addr = pci_map_page(skdev->pdev,
1198 vec->bv_offset, vec->bv_len, pci_dir);
1199 count = vec->bv_len;
1201 if (count == 0 || count > 64u * 1024u || (count & 3) != 0
1202 || (dma_addr & 3) != 0) {
1204 "(%s): Bad sg ix=%d count=%d addr=0x%llx\n",
1205 skd_name(skdev), i, count, dma_addr);
1209 sgd = &skreq->sksg_list[i];
1211 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
1212 sgd->byte_count = vec->bv_len;
1213 skreq->sg_byte_count += vec->bv_len;
1214 sgd->host_side_addr = dma_addr;
1215 sgd->dev_side_addr = 0; /* not used */
1218 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
1219 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
1222 if (!(io_flags & REQ_DISCARD)) {
1223 count = bio_sectors(bio) << 9u;
1224 if (count != skreq->sg_byte_count) {
1225 pr_err("(%s): mismatch count sg=%d req=%d\n",
1226 skd_name(skdev), skreq->sg_byte_count, count);
1231 if (unlikely(skdev->dbg_level > 1)) {
1232 VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1233 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1234 for (i = 0; i < n_sg; i++) {
1235 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1236 VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x "
1237 "addr=0x%llx next=0x%llx\n",
1238 i, sgd->byte_count, sgd->control,
1239 sgd->host_side_addr, sgd->next_desc_ptr);
1244 skd_postop_sg_list(skdev, skreq);
1252 static int skd_preop_sg_list(struct skd_device *skdev,
1253 struct skd_request_context *skreq)
1256 return skd_preop_sg_list_blk(skdev, skreq);
1258 return skd_preop_sg_list_bio(skdev, skreq);
1261 static void skd_postop_sg_list_bio(struct skd_device *skdev,
1262 struct skd_request_context *skreq)
1264 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1265 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1267 struct fit_sg_descriptor *sgd;
1270 * restore the next ptr for next IO request so we
1271 * don't have to set it every time.
1273 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
1274 skreq->sksg_dma_address +
1275 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
1277 for (i = 0; i < skreq->n_sg; i++) {
1278 sgd = &skreq->sksg_list[i];
1279 pci_unmap_page(skdev->pdev, sgd->host_side_addr,
1280 sgd->byte_count, pci_dir);
1284 static void skd_postop_sg_list(struct skd_device *skdev,
1285 struct skd_request_context *skreq)
1288 skd_postop_sg_list_blk(skdev, skreq);
1290 skd_postop_sg_list_bio(skdev, skreq);
1293 static void skd_end_request(struct skd_device *skdev,
1294 struct skd_request_context *skreq, int error)
1296 if (likely(!skd_bio))
1297 skd_end_request_blk(skdev, skreq, error);
1299 skd_end_request_bio(skdev, skreq, error);
1302 static void skd_request_fn_not_online(struct request_queue *q)
1304 struct skd_device *skdev = q->queuedata;
1307 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1309 skd_log_skdev(skdev, "req_not_online");
1310 switch (skdev->state) {
1311 case SKD_DRVR_STATE_PAUSING:
1312 case SKD_DRVR_STATE_PAUSED:
1313 case SKD_DRVR_STATE_STARTING:
1314 case SKD_DRVR_STATE_RESTARTING:
1315 case SKD_DRVR_STATE_WAIT_BOOT:
1316 /* In case of starting, we haven't started the queue,
1317 * so we can't get here... but requests are
1318 * possibly hanging out waiting for us because we
1319 * reported the dev/skd0 already. They'll wait
1320 * forever if connect doesn't complete.
1321 * What to do??? delay dev/skd0 ??
1323 case SKD_DRVR_STATE_BUSY:
1324 case SKD_DRVR_STATE_BUSY_IMMINENT:
1325 case SKD_DRVR_STATE_BUSY_ERASE:
1326 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1329 case SKD_DRVR_STATE_BUSY_SANITIZE:
1330 case SKD_DRVR_STATE_STOPPING:
1331 case SKD_DRVR_STATE_SYNCING:
1332 case SKD_DRVR_STATE_FAULT:
1333 case SKD_DRVR_STATE_DISAPPEARED:
1339 /* If we get here, terminate all pending block requeusts
1340 * with EIO and any scsi pass thru with appropriate sense
1343 skd_fail_all_pending(skdev);
1347 *****************************************************************************
1349 *****************************************************************************
1352 static void skd_timer_tick_not_online(struct skd_device *skdev);
1354 static void skd_timer_tick(ulong arg)
1356 struct skd_device *skdev = (struct skd_device *)arg;
1359 u32 overdue_timestamp;
1360 unsigned long reqflags;
1363 if (skdev->state == SKD_DRVR_STATE_FAULT)
1364 /* The driver has declared fault, and we want it to
1365 * stay that way until driver is reloaded.
1369 spin_lock_irqsave(&skdev->lock, reqflags);
1371 state = SKD_READL(skdev, FIT_STATUS);
1372 state &= FIT_SR_DRIVE_STATE_MASK;
1373 if (state != skdev->drive_state)
1374 skd_isr_fwstate(skdev);
1376 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1377 skd_timer_tick_not_online(skdev);
1378 goto timer_func_out;
1380 skdev->timeout_stamp++;
1381 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1384 * All requests that happened during the previous use of
1385 * this slot should be done by now. The previous use was
1386 * over 7 seconds ago.
1388 if (skdev->timeout_slot[timo_slot] == 0)
1389 goto timer_func_out;
1391 /* Something is overdue */
1392 overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1394 DPRINTK(skdev, "found %d timeouts, draining busy=%d\n",
1395 skdev->timeout_slot[timo_slot], skdev->in_flight);
1396 pr_err("(%s): Overdue IOs (%d), busy %d\n",
1397 skd_name(skdev), skdev->timeout_slot[timo_slot],
1400 skdev->timer_countdown = SKD_DRAINING_TIMO;
1401 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1402 skdev->timo_slot = timo_slot;
1403 skd_stop_queue(skdev);
1406 mod_timer(&skdev->timer, (jiffies + HZ));
1408 spin_unlock_irqrestore(&skdev->lock, reqflags);
1411 static void skd_timer_tick_not_online(struct skd_device *skdev)
1413 switch (skdev->state) {
1414 case SKD_DRVR_STATE_IDLE:
1415 case SKD_DRVR_STATE_LOAD:
1417 case SKD_DRVR_STATE_BUSY_SANITIZE:
1418 VPRINTK(skdev, "drive busy sanitize[%x], driver[%x]\n",
1419 skdev->drive_state, skdev->state);
1420 /* If we've been in sanitize for 3 seconds, we figure we're not
1421 * going to get anymore completions, so recover requests now
1423 if (skdev->timer_countdown > 0) {
1424 skdev->timer_countdown--;
1427 skd_recover_requests(skdev, 0);
1430 case SKD_DRVR_STATE_BUSY:
1431 case SKD_DRVR_STATE_BUSY_IMMINENT:
1432 case SKD_DRVR_STATE_BUSY_ERASE:
1433 VPRINTK(skdev, "busy[%x], countdown=%d\n",
1434 skdev->state, skdev->timer_countdown);
1435 if (skdev->timer_countdown > 0) {
1436 skdev->timer_countdown--;
1439 DPRINTK(skdev, "busy[%x], timedout=%d, restarting device.",
1440 skdev->state, skdev->timer_countdown);
1441 skd_restart_device(skdev);
1444 case SKD_DRVR_STATE_WAIT_BOOT:
1445 case SKD_DRVR_STATE_STARTING:
1446 if (skdev->timer_countdown > 0) {
1447 skdev->timer_countdown--;
1450 /* For now, we fault the drive. Could attempt resets to
1451 * revcover at some point. */
1452 skdev->state = SKD_DRVR_STATE_FAULT;
1454 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1455 skd_name(skdev), skdev->drive_state);
1457 /*start the queue so we can respond with error to requests */
1458 /* wakeup anyone waiting for startup complete */
1459 skd_start_queue(skdev);
1460 skdev->gendisk_on = -1;
1461 wake_up_interruptible(&skdev->waitq);
1464 case SKD_DRVR_STATE_ONLINE:
1465 /* shouldn't get here. */
1468 case SKD_DRVR_STATE_PAUSING:
1469 case SKD_DRVR_STATE_PAUSED:
1472 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1474 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1476 skdev->timer_countdown,
1478 skdev->timeout_slot[skdev->timo_slot]);
1479 /* if the slot has cleared we can let the I/O continue */
1480 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1481 DPRINTK(skdev, "Slot drained, starting queue.\n");
1482 skdev->state = SKD_DRVR_STATE_ONLINE;
1483 skd_start_queue(skdev);
1486 if (skdev->timer_countdown > 0) {
1487 skdev->timer_countdown--;
1490 skd_restart_device(skdev);
1493 case SKD_DRVR_STATE_RESTARTING:
1494 if (skdev->timer_countdown > 0) {
1495 skdev->timer_countdown--;
1498 /* For now, we fault the drive. Could attempt resets to
1499 * revcover at some point. */
1500 skdev->state = SKD_DRVR_STATE_FAULT;
1501 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1502 skd_name(skdev), skdev->drive_state);
1505 * Recovering does two things:
1506 * 1. completes IO with error
1507 * 2. reclaims dma resources
1508 * When is it safe to recover requests?
1509 * - if the drive state is faulted
1510 * - if the state is still soft reset after out timeout
1511 * - if the drive registers are dead (state = FF)
1512 * If it is "unsafe", we still need to recover, so we will
1513 * disable pci bus mastering and disable our interrupts.
1516 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1517 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1518 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1519 /* It never came out of soft reset. Try to
1520 * recover the requests and then let them
1521 * fail. This is to mitigate hung processes. */
1522 skd_recover_requests(skdev, 0);
1524 pr_err("(%s): Disable BusMaster (%x)\n",
1525 skd_name(skdev), skdev->drive_state);
1526 pci_disable_device(skdev->pdev);
1527 skd_disable_interrupts(skdev);
1528 skd_recover_requests(skdev, 0);
1531 /*start the queue so we can respond with error to requests */
1532 /* wakeup anyone waiting for startup complete */
1533 skd_start_queue(skdev);
1534 skdev->gendisk_on = -1;
1535 wake_up_interruptible(&skdev->waitq);
1538 case SKD_DRVR_STATE_RESUMING:
1539 case SKD_DRVR_STATE_STOPPING:
1540 case SKD_DRVR_STATE_SYNCING:
1541 case SKD_DRVR_STATE_FAULT:
1542 case SKD_DRVR_STATE_DISAPPEARED:
1548 static int skd_start_timer(struct skd_device *skdev)
1552 init_timer(&skdev->timer);
1553 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1555 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1557 pr_err("%s: failed to start timer %d\n",
1562 static void skd_kill_timer(struct skd_device *skdev)
1564 del_timer_sync(&skdev->timer);
1568 *****************************************************************************
1570 *****************************************************************************
1572 static int skd_ioctl_sg_io(struct skd_device *skdev,
1573 fmode_t mode, void __user *argp);
1574 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1575 struct skd_sg_io *sksgio);
1576 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1577 struct skd_sg_io *sksgio);
1578 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1579 struct skd_sg_io *sksgio);
1580 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1581 struct skd_sg_io *sksgio, int dxfer_dir);
1582 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1583 struct skd_sg_io *sksgio);
1584 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1585 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1586 struct skd_sg_io *sksgio);
1587 static int skd_sg_io_put_status(struct skd_device *skdev,
1588 struct skd_sg_io *sksgio);
1590 static void skd_complete_special(struct skd_device *skdev,
1591 volatile struct fit_completion_entry_v1
1593 volatile struct fit_comp_error_info *skerr,
1594 struct skd_special_context *skspcl);
1596 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1597 uint cmd_in, ulong arg)
1600 struct gendisk *disk = bdev->bd_disk;
1601 struct skd_device *skdev = disk->private_data;
1602 void __user *p = (void *)arg;
1604 DPRINTK(skdev, "%s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1605 disk->disk_name, current->comm, mode, cmd_in, arg);
1607 if (!capable(CAP_SYS_ADMIN))
1611 case SG_SET_TIMEOUT:
1612 case SG_GET_TIMEOUT:
1613 case SG_GET_VERSION_NUM:
1614 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1617 rc = skd_ioctl_sg_io(skdev, mode, p);
1625 DPRINTK(skdev, "%s: completion rc %d\n", disk->disk_name, rc);
1629 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1633 struct skd_sg_io sksgio;
1635 memset(&sksgio, 0, sizeof(sksgio));
1638 sksgio.iov = &sksgio.no_iov_iov;
1640 switch (skdev->state) {
1641 case SKD_DRVR_STATE_ONLINE:
1642 case SKD_DRVR_STATE_BUSY_IMMINENT:
1646 DPRINTK(skdev, "drive not online\n");
1651 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1655 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1659 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1663 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1667 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1671 rc = skd_sg_io_await(skdev, &sksgio);
1675 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1679 rc = skd_sg_io_put_status(skdev, &sksgio);
1686 skd_sg_io_release_skspcl(skdev, &sksgio);
1688 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1693 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1694 struct skd_sg_io *sksgio)
1696 struct sg_io_hdr *sgp = &sksgio->sg;
1699 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1700 DPRINTK(skdev, "access sg failed %p\n", sksgio->argp);
1704 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1705 DPRINTK(skdev, "copy_from_user sg failed %p\n", sksgio->argp);
1709 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1710 DPRINTK(skdev, "interface_id invalid 0x%x\n",
1715 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1716 DPRINTK(skdev, "cmd_len invalid %d\n", sgp->cmd_len);
1720 if (sgp->iovec_count > 256) {
1721 DPRINTK(skdev, "iovec_count invalid %d\n", sgp->iovec_count);
1725 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1726 DPRINTK(skdev, "dxfer_len invalid %d\n", sgp->dxfer_len);
1730 switch (sgp->dxfer_direction) {
1735 case SG_DXFER_TO_DEV:
1739 case SG_DXFER_FROM_DEV:
1740 case SG_DXFER_TO_FROM_DEV:
1745 DPRINTK(skdev, "dxfer_dir invalid %d\n", sgp->dxfer_direction);
1749 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1750 DPRINTK(skdev, "copy_from_user cmdp failed %p\n", sgp->cmdp);
1754 if (sgp->mx_sb_len != 0) {
1755 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1756 DPRINTK(skdev, "access sbp failed %p\n", sgp->sbp);
1761 if (sgp->iovec_count == 0) {
1762 sksgio->iov[0].iov_base = sgp->dxferp;
1763 sksgio->iov[0].iov_len = sgp->dxfer_len;
1765 sksgio->dxfer_len = sgp->dxfer_len;
1767 struct sg_iovec *iov;
1768 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1769 size_t iov_data_len;
1771 iov = kmalloc(nbytes, GFP_KERNEL);
1773 DPRINTK(skdev, "alloc iovec failed %d\n",
1778 sksgio->iovcnt = sgp->iovec_count;
1780 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1781 DPRINTK(skdev, "copy_from_user iovec failed %p\n",
1787 * Sum up the vecs, making sure they don't overflow
1790 for (i = 0; i < sgp->iovec_count; i++) {
1791 if (iov_data_len + iov[i].iov_len < iov_data_len)
1793 iov_data_len += iov[i].iov_len;
1796 /* SG_IO howto says that the shorter of the two wins */
1797 if (sgp->dxfer_len < iov_data_len) {
1798 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1801 sksgio->dxfer_len = sgp->dxfer_len;
1803 sksgio->dxfer_len = iov_data_len;
1806 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1807 struct sg_iovec *iov = sksgio->iov;
1808 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1809 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1810 DPRINTK(skdev, "access data failed %p/%d\n",
1811 iov->iov_base, (int)iov->iov_len);
1820 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1821 struct skd_sg_io *sksgio)
1823 struct skd_special_context *skspcl = NULL;
1829 spin_lock_irqsave(&skdev->lock, flags);
1830 skspcl = skdev->skspcl_free_list;
1831 if (skspcl != NULL) {
1832 skdev->skspcl_free_list =
1833 (struct skd_special_context *)skspcl->req.next;
1834 skspcl->req.id += SKD_ID_INCR;
1835 skspcl->req.state = SKD_REQ_STATE_SETUP;
1836 skspcl->orphaned = 0;
1837 skspcl->req.n_sg = 0;
1839 spin_unlock_irqrestore(&skdev->lock, flags);
1841 if (skspcl != NULL) {
1846 DPRINTK(skdev, "blocking\n");
1848 rc = wait_event_interruptible_timeout(
1850 (skdev->skspcl_free_list != NULL),
1851 msecs_to_jiffies(sksgio->sg.timeout));
1853 DPRINTK(skdev, "unblocking, rc=%d\n", rc);
1863 * If we get here rc > 0 meaning the timeout to
1864 * wait_event_interruptible_timeout() had time left, hence the
1865 * sought event -- non-empty free list -- happened.
1866 * Retry the allocation.
1869 sksgio->skspcl = skspcl;
1874 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1875 struct skd_request_context *skreq,
1878 u32 resid = dxfer_len;
1881 * The DMA engine must have aligned addresses and byte counts.
1883 resid += (-resid) & 3;
1884 skreq->sg_byte_count = resid;
1889 u32 nbytes = PAGE_SIZE;
1890 u32 ix = skreq->n_sg;
1891 struct scatterlist *sg = &skreq->sg[ix];
1892 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1898 page = alloc_page(GFP_KERNEL);
1902 sg_set_page(sg, page, nbytes, 0);
1904 /* TODO: This should be going through a pci_???()
1905 * routine to do proper mapping. */
1906 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1907 sksg->byte_count = nbytes;
1909 sksg->host_side_addr = sg_phys(sg);
1911 sksg->dev_side_addr = 0;
1912 sksg->next_desc_ptr = skreq->sksg_dma_address +
1913 (ix + 1) * sizeof(*sksg);
1919 if (skreq->n_sg > 0) {
1920 u32 ix = skreq->n_sg - 1;
1921 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1923 sksg->control = FIT_SGD_CONTROL_LAST;
1924 sksg->next_desc_ptr = 0;
1927 if (unlikely(skdev->dbg_level > 1)) {
1930 VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1931 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1932 for (i = 0; i < skreq->n_sg; i++) {
1933 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1935 VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x "
1936 "addr=0x%llx next=0x%llx\n",
1937 i, sgd->byte_count, sgd->control,
1938 sgd->host_side_addr, sgd->next_desc_ptr);
1945 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1946 struct skd_sg_io *sksgio)
1948 struct skd_special_context *skspcl = sksgio->skspcl;
1949 struct skd_request_context *skreq = &skspcl->req;
1950 u32 dxfer_len = sksgio->dxfer_len;
1953 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1955 * Eventually, errors or not, skd_release_special() is called
1956 * to recover allocations including partial allocations.
1961 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1962 struct skd_sg_io *sksgio, int dxfer_dir)
1964 struct skd_special_context *skspcl = sksgio->skspcl;
1966 struct sg_iovec curiov;
1970 u32 resid = sksgio->dxfer_len;
1974 curiov.iov_base = NULL;
1976 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1977 if (dxfer_dir != SG_DXFER_TO_DEV ||
1978 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1983 u32 nbytes = PAGE_SIZE;
1985 if (curiov.iov_len == 0) {
1986 curiov = sksgio->iov[iov_ix++];
1992 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1993 bufp = page_address(page);
1994 buf_len = PAGE_SIZE;
1997 nbytes = min_t(u32, nbytes, resid);
1998 nbytes = min_t(u32, nbytes, curiov.iov_len);
1999 nbytes = min_t(u32, nbytes, buf_len);
2001 if (dxfer_dir == SG_DXFER_TO_DEV)
2002 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
2004 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
2010 curiov.iov_len -= nbytes;
2011 curiov.iov_base += nbytes;
2018 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
2019 struct skd_sg_io *sksgio)
2021 struct skd_special_context *skspcl = sksgio->skspcl;
2022 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2023 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2025 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
2027 /* Initialize the FIT msg header */
2028 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
2029 fmh->num_protocol_cmds_coalesced = 1;
2031 /* Initialize the SCSI request */
2032 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
2033 scsi_req->hdr.sg_list_dma_address =
2034 cpu_to_be64(skspcl->req.sksg_dma_address);
2035 scsi_req->hdr.tag = skspcl->req.id;
2036 scsi_req->hdr.sg_list_len_bytes =
2037 cpu_to_be32(skspcl->req.sg_byte_count);
2038 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
2040 skspcl->req.state = SKD_REQ_STATE_BUSY;
2041 skd_send_special_fitmsg(skdev, skspcl);
2046 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
2048 unsigned long flags;
2051 rc = wait_event_interruptible_timeout(skdev->waitq,
2052 (sksgio->skspcl->req.state !=
2053 SKD_REQ_STATE_BUSY),
2054 msecs_to_jiffies(sksgio->sg.
2057 spin_lock_irqsave(&skdev->lock, flags);
2059 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
2060 DPRINTK(skdev, "skspcl %p aborted\n", sksgio->skspcl);
2062 /* Build check cond, sense and let command finish. */
2063 /* For a timeout, we must fabricate completion and sense
2064 * data to complete the command */
2065 sksgio->skspcl->req.completion.status =
2066 SAM_STAT_CHECK_CONDITION;
2068 memset(&sksgio->skspcl->req.err_info, 0,
2069 sizeof(sksgio->skspcl->req.err_info));
2070 sksgio->skspcl->req.err_info.type = 0x70;
2071 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
2072 sksgio->skspcl->req.err_info.code = 0x44;
2073 sksgio->skspcl->req.err_info.qual = 0;
2075 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
2076 /* No longer on the adapter. We finish. */
2079 /* Something's gone wrong. Still busy. Timeout or
2080 * user interrupted (control-C). Mark as an orphan
2081 * so it will be disposed when completed. */
2082 sksgio->skspcl->orphaned = 1;
2083 sksgio->skspcl = NULL;
2085 DPRINTK(skdev, "timed out %p (%u ms)\n", sksgio,
2086 sksgio->sg.timeout);
2089 DPRINTK(skdev, "cntlc %p\n", sksgio);
2094 spin_unlock_irqrestore(&skdev->lock, flags);
2099 static int skd_sg_io_put_status(struct skd_device *skdev,
2100 struct skd_sg_io *sksgio)
2102 struct sg_io_hdr *sgp = &sksgio->sg;
2103 struct skd_special_context *skspcl = sksgio->skspcl;
2106 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
2108 sgp->status = skspcl->req.completion.status;
2109 resid = sksgio->dxfer_len - nb;
2111 sgp->masked_status = sgp->status & STATUS_MASK;
2112 sgp->msg_status = 0;
2113 sgp->host_status = 0;
2114 sgp->driver_status = 0;
2116 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
2117 sgp->info |= SG_INFO_CHECK;
2119 DPRINTK(skdev, "status %x masked %x resid 0x%x\n", sgp->status,
2120 sgp->masked_status, sgp->resid);
2122 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
2123 if (sgp->mx_sb_len > 0) {
2124 struct fit_comp_error_info *ei = &skspcl->req.err_info;
2125 u32 nbytes = sizeof(*ei);
2127 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
2129 sgp->sb_len_wr = nbytes;
2131 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
2132 DPRINTK(skdev, "copy_to_user sense failed %p\n",
2139 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
2140 DPRINTK(skdev, "copy_to_user sg failed %p\n", sksgio->argp);
2147 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
2148 struct skd_sg_io *sksgio)
2150 struct skd_special_context *skspcl = sksgio->skspcl;
2152 if (skspcl != NULL) {
2155 sksgio->skspcl = NULL;
2157 spin_lock_irqsave(&skdev->lock, flags);
2158 skd_release_special(skdev, skspcl);
2159 spin_unlock_irqrestore(&skdev->lock, flags);
2166 *****************************************************************************
2167 * INTERNAL REQUESTS -- generated by driver itself
2168 *****************************************************************************
2171 static int skd_format_internal_skspcl(struct skd_device *skdev)
2173 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2174 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
2175 struct fit_msg_hdr *fmh;
2176 uint64_t dma_address;
2177 struct skd_scsi_request *scsi;
2179 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
2180 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
2181 fmh->num_protocol_cmds_coalesced = 1;
2183 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
2184 memset(scsi, 0, sizeof(*scsi));
2185 dma_address = skspcl->req.sksg_dma_address;
2186 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
2187 sgd->control = FIT_SGD_CONTROL_LAST;
2188 sgd->byte_count = 0;
2189 sgd->host_side_addr = skspcl->db_dma_address;
2190 sgd->dev_side_addr = 0;
2191 sgd->next_desc_ptr = 0LL;
2196 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
2198 static void skd_send_internal_skspcl(struct skd_device *skdev,
2199 struct skd_special_context *skspcl,
2202 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
2203 struct skd_scsi_request *scsi;
2204 unsigned char *buf = skspcl->data_buf;
2207 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
2209 * A refresh is already in progress.
2210 * Just wait for it to finish.
2214 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
2215 skspcl->req.state = SKD_REQ_STATE_BUSY;
2216 skspcl->req.id += SKD_ID_INCR;
2218 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
2219 scsi->hdr.tag = skspcl->req.id;
2221 memset(scsi->cdb, 0, sizeof(scsi->cdb));
2224 case TEST_UNIT_READY:
2225 scsi->cdb[0] = TEST_UNIT_READY;
2226 sgd->byte_count = 0;
2227 scsi->hdr.sg_list_len_bytes = 0;
2231 scsi->cdb[0] = READ_CAPACITY;
2232 sgd->byte_count = SKD_N_READ_CAP_BYTES;
2233 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2237 scsi->cdb[0] = INQUIRY;
2238 scsi->cdb[1] = 0x01; /* evpd */
2239 scsi->cdb[2] = 0x80; /* serial number page */
2240 scsi->cdb[4] = 0x10;
2241 sgd->byte_count = 16;
2242 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2245 case SYNCHRONIZE_CACHE:
2246 scsi->cdb[0] = SYNCHRONIZE_CACHE;
2247 sgd->byte_count = 0;
2248 scsi->hdr.sg_list_len_bytes = 0;
2252 scsi->cdb[0] = WRITE_BUFFER;
2253 scsi->cdb[1] = 0x02;
2254 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
2255 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
2256 sgd->byte_count = WR_BUF_SIZE;
2257 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2258 /* fill incrementing byte pattern */
2259 for (i = 0; i < sgd->byte_count; i++)
2264 scsi->cdb[0] = READ_BUFFER;
2265 scsi->cdb[1] = 0x02;
2266 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
2267 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
2268 sgd->byte_count = WR_BUF_SIZE;
2269 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2270 memset(skspcl->data_buf, 0, sgd->byte_count);
2274 SKD_ASSERT("Don't know what to send");
2278 skd_send_special_fitmsg(skdev, skspcl);
2281 static void skd_refresh_device_data(struct skd_device *skdev)
2283 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2285 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
2288 static int skd_chk_read_buf(struct skd_device *skdev,
2289 struct skd_special_context *skspcl)
2291 unsigned char *buf = skspcl->data_buf;
2294 /* check for incrementing byte pattern */
2295 for (i = 0; i < WR_BUF_SIZE; i++)
2296 if (buf[i] != (i & 0xFF))
2302 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
2303 u8 code, u8 qual, u8 fruc)
2305 /* If the check condition is of special interest, log a message */
2306 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
2307 && (code == 0x04) && (qual == 0x06)) {
2308 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
2309 "ascq/fruc %02x/%02x/%02x/%02x\n",
2310 skd_name(skdev), key, code, qual, fruc);
2314 static void skd_complete_internal(struct skd_device *skdev,
2315 volatile struct fit_completion_entry_v1
2317 volatile struct fit_comp_error_info *skerr,
2318 struct skd_special_context *skspcl)
2320 u8 *buf = skspcl->data_buf;
2323 struct skd_scsi_request *scsi =
2324 (struct skd_scsi_request *)&skspcl->msg_buf[64];
2326 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2328 DPRINTK(skdev, "complete internal %x\n", scsi->cdb[0]);
2330 skspcl->req.completion = *skcomp;
2331 skspcl->req.state = SKD_REQ_STATE_IDLE;
2332 skspcl->req.id += SKD_ID_INCR;
2334 status = skspcl->req.completion.status;
2336 skd_log_check_status(skdev, status, skerr->key, skerr->code,
2337 skerr->qual, skerr->fruc);
2339 switch (scsi->cdb[0]) {
2340 case TEST_UNIT_READY:
2341 if (status == SAM_STAT_GOOD)
2342 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2343 else if ((status == SAM_STAT_CHECK_CONDITION) &&
2344 (skerr->key == MEDIUM_ERROR))
2345 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2347 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2348 VPRINTK(skdev, "TUR failed, don't send anymore"
2349 "state 0x%x\n", skdev->state);
2352 DPRINTK(skdev, "**** TUR failed, retry skerr\n");
2353 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2358 if (status == SAM_STAT_GOOD)
2359 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2361 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2362 VPRINTK(skdev, "write buffer failed, don't send"
2363 " anymore state 0x%x\n", skdev->state);
2367 "**** write buffer failed, retry skerr\n");
2368 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2373 if (status == SAM_STAT_GOOD) {
2374 if (skd_chk_read_buf(skdev, skspcl) == 0)
2375 skd_send_internal_skspcl(skdev, skspcl,
2379 "(%s):*** W/R Buffer mismatch %d ***\n",
2380 skd_name(skdev), skdev->connect_retries);
2381 if (skdev->connect_retries <
2382 SKD_MAX_CONNECT_RETRIES) {
2383 skdev->connect_retries++;
2384 skd_soft_reset(skdev);
2387 "(%s): W/R Buffer Connect Error\n",
2394 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2396 "read buffer failed, don't send anymore"
2397 "state 0x%x\n", skdev->state);
2401 "**** read buffer failed, retry skerr\n");
2402 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2407 skdev->read_cap_is_valid = 0;
2408 if (status == SAM_STAT_GOOD) {
2409 skdev->read_cap_last_lba =
2410 (buf[0] << 24) | (buf[1] << 16) |
2411 (buf[2] << 8) | buf[3];
2412 skdev->read_cap_blocksize =
2413 (buf[4] << 24) | (buf[5] << 16) |
2414 (buf[6] << 8) | buf[7];
2416 DPRINTK(skdev, "last lba %d, bs %d\n",
2417 skdev->read_cap_last_lba,
2418 skdev->read_cap_blocksize);
2420 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2422 skdev->read_cap_is_valid = 1;
2424 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2425 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2426 (skerr->key == MEDIUM_ERROR)) {
2427 skdev->read_cap_last_lba = ~0;
2428 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2430 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
2431 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2433 DPRINTK(skdev, "**** READCAP failed, retry TUR\n");
2434 skd_send_internal_skspcl(skdev, skspcl,
2440 skdev->inquiry_is_valid = 0;
2441 if (status == SAM_STAT_GOOD) {
2442 skdev->inquiry_is_valid = 1;
2444 for (i = 0; i < 12; i++)
2445 skdev->inq_serial_num[i] = buf[i + 4];
2446 skdev->inq_serial_num[12] = 0;
2449 if (skd_unquiesce_dev(skdev) < 0)
2450 DPRINTK(skdev, "**** failed, to ONLINE device\n");
2451 /* connection is complete */
2452 skdev->connect_retries = 0;
2455 case SYNCHRONIZE_CACHE:
2456 if (status == SAM_STAT_GOOD)
2457 skdev->sync_done = 1;
2459 skdev->sync_done = -1;
2460 wake_up_interruptible(&skdev->waitq);
2464 SKD_ASSERT("we didn't send this");
2469 *****************************************************************************
2471 *****************************************************************************
2474 static void skd_send_fitmsg(struct skd_device *skdev,
2475 struct skd_fitmsg_context *skmsg)
2478 struct fit_msg_hdr *fmh;
2480 VPRINTK(skdev, "dma address 0x%llx, busy=%d\n",
2481 skmsg->mb_dma_address, skdev->in_flight);
2482 VPRINTK(skdev, "msg_buf 0x%p, offset %x\n",
2483 skmsg->msg_buf, skmsg->offset);
2485 qcmd = skmsg->mb_dma_address;
2486 qcmd |= FIT_QCMD_QID_NORMAL;
2488 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2489 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2491 if (unlikely(skdev->dbg_level > 1)) {
2492 u8 *bp = (u8 *)skmsg->msg_buf;
2494 for (i = 0; i < skmsg->length; i += 8) {
2495 VPRINTK(skdev, " msg[%2d] %02x %02x %02x %02x "
2496 "%02x %02x %02x %02x\n",
2497 i, bp[i + 0], bp[i + 1], bp[i + 2],
2498 bp[i + 3], bp[i + 4], bp[i + 5],
2499 bp[i + 6], bp[i + 7]);
2505 if (skmsg->length > 256)
2506 qcmd |= FIT_QCMD_MSGSIZE_512;
2507 else if (skmsg->length > 128)
2508 qcmd |= FIT_QCMD_MSGSIZE_256;
2509 else if (skmsg->length > 64)
2510 qcmd |= FIT_QCMD_MSGSIZE_128;
2513 * This makes no sense because the FIT msg header is
2514 * 64 bytes. If the msg is only 64 bytes long it has
2517 qcmd |= FIT_QCMD_MSGSIZE_64;
2519 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2523 static void skd_send_special_fitmsg(struct skd_device *skdev,
2524 struct skd_special_context *skspcl)
2528 if (unlikely(skdev->dbg_level > 1)) {
2529 u8 *bp = (u8 *)skspcl->msg_buf;
2532 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2534 " spcl[%2d] %02x %02x %02x %02x "
2535 "%02x %02x %02x %02x\n", i,
2536 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2537 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2542 VPRINTK(skdev, "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2543 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2544 skspcl->req.sksg_dma_address);
2545 for (i = 0; i < skspcl->req.n_sg; i++) {
2546 struct fit_sg_descriptor *sgd =
2547 &skspcl->req.sksg_list[i];
2549 VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x "
2550 "addr=0x%llx next=0x%llx\n",
2551 i, sgd->byte_count, sgd->control,
2552 sgd->host_side_addr, sgd->next_desc_ptr);
2557 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2558 * and one 64-byte SSDI command.
2560 qcmd = skspcl->mb_dma_address;
2561 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2563 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2567 *****************************************************************************
2569 *****************************************************************************
2572 static void skd_complete_other(struct skd_device *skdev,
2573 volatile struct fit_completion_entry_v1 *skcomp,
2574 volatile struct fit_comp_error_info *skerr);
2577 static void skd_requeue_request(struct skd_device *skdev,
2578 struct skd_request_context *skreq);
2587 enum skd_check_status_action action;
2590 static struct sns_info skd_chkstat_table[] = {
2592 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2593 SKD_CHECK_STATUS_REPORT_GOOD },
2596 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2597 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2598 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2599 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2600 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2601 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2603 /* Retry (with limits) */
2604 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2605 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2606 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2607 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2608 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2609 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2610 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2611 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2613 /* Busy (or about to be) */
2614 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2615 SKD_CHECK_STATUS_BUSY_IMMINENT },
2619 * Look up status and sense data to decide how to handle the error
2621 * mask says which fields must match e.g., mask=0x18 means check
2622 * type and stat, ignore key, asc, ascq.
2625 static enum skd_check_status_action skd_check_status(struct skd_device *skdev,
2627 volatile struct fit_comp_error_info *skerr)
2631 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2632 skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2635 VPRINTK(skdev, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x "
2636 "fruc=%02x\n", skerr->type, cmp_status, skerr->key,
2637 skerr->code, skerr->qual, skerr->fruc);
2639 /* Does the info match an entry in the good category? */
2640 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2641 for (i = 0; i < n; i++) {
2642 struct sns_info *sns = &skd_chkstat_table[i];
2644 if (sns->mask & 0x10)
2645 if (skerr->type != sns->type)
2648 if (sns->mask & 0x08)
2649 if (cmp_status != sns->stat)
2652 if (sns->mask & 0x04)
2653 if (skerr->key != sns->key)
2656 if (sns->mask & 0x02)
2657 if (skerr->code != sns->asc)
2660 if (sns->mask & 0x01)
2661 if (skerr->qual != sns->ascq)
2664 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2665 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2667 skd_name(skdev), skerr->key,
2668 skerr->code, skerr->qual);
2673 /* No other match, so nonzero status means error,
2674 * zero status means good
2677 DPRINTK(skdev, "status check: error\n");
2678 return SKD_CHECK_STATUS_REPORT_ERROR;
2681 DPRINTK(skdev, "status check good default\n");
2682 return SKD_CHECK_STATUS_REPORT_GOOD;
2685 static void skd_resolve_req_exception(struct skd_device *skdev,
2686 struct skd_request_context *skreq)
2688 u8 cmp_status = skreq->completion.status;
2690 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2691 case SKD_CHECK_STATUS_REPORT_GOOD:
2692 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2693 skd_end_request(skdev, skreq, 0);
2696 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2697 skd_log_skreq(skdev, skreq, "retry(busy)");
2698 skd_requeue_request(skdev, skreq);
2699 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2700 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2701 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2702 skd_quiesce_dev(skdev);
2705 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2707 if ((unsigned long) ++skreq->req->special <
2709 skd_log_skreq(skdev, skreq, "retry");
2710 skd_requeue_request(skdev, skreq);
2714 /* fall through to report error */
2716 case SKD_CHECK_STATUS_REPORT_ERROR:
2718 skd_end_request(skdev, skreq, -EIO);
2723 static void skd_requeue_request(struct skd_device *skdev,
2724 struct skd_request_context *skreq)
2727 blk_requeue_request(skdev->queue, skreq->req);
2729 bio_list_add_head(&skdev->bio_queue, skreq->bio);
2736 /* assume spinlock is already held */
2737 static void skd_release_skreq(struct skd_device *skdev,
2738 struct skd_request_context *skreq)
2741 struct skd_fitmsg_context *skmsg;
2746 * Reclaim the FIT msg buffer if this is
2747 * the first of the requests it carried to
2748 * be completed. The FIT msg buffer used to
2749 * send this request cannot be reused until
2750 * we are sure the s1120 card has copied
2751 * it to its memory. The FIT msg might have
2752 * contained several requests. As soon as
2753 * any of them are completed we know that
2754 * the entire FIT msg was transferred.
2755 * Only the first completed request will
2756 * match the FIT msg buffer id. The FIT
2757 * msg buffer id is immediately updated.
2758 * When subsequent requests complete the FIT
2759 * msg buffer id won't match, so we know
2760 * quite cheaply that it is already done.
2762 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2763 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2765 skmsg = &skdev->skmsg_table[msg_slot];
2766 if (skmsg->id == skreq->fitmsg_id) {
2767 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2768 SKD_ASSERT(skmsg->outstanding > 0);
2769 skmsg->outstanding--;
2770 if (skmsg->outstanding == 0) {
2771 skmsg->state = SKD_MSG_STATE_IDLE;
2772 skmsg->id += SKD_ID_INCR;
2773 skmsg->next = skdev->skmsg_free_list;
2774 skdev->skmsg_free_list = skmsg;
2779 * Decrease the number of active requests.
2780 * Also decrements the count in the timeout slot.
2782 SKD_ASSERT(skdev->in_flight > 0);
2783 skdev->in_flight -= 1;
2785 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2786 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2787 skdev->timeout_slot[timo_slot] -= 1;
2792 if (likely(!skd_bio))
2799 * Reclaim the skd_request_context
2801 skreq->state = SKD_REQ_STATE_IDLE;
2802 skreq->id += SKD_ID_INCR;
2803 skreq->next = skdev->skreq_free_list;
2804 skdev->skreq_free_list = skreq;
2807 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2809 static void skd_do_inq_page_00(struct skd_device *skdev,
2810 volatile struct fit_completion_entry_v1 *skcomp,
2811 volatile struct fit_comp_error_info *skerr,
2812 uint8_t *cdb, uint8_t *buf)
2814 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2816 /* Caller requested "supported pages". The driver needs to insert
2819 VPRINTK(skdev, "skd_do_driver_inquiry: modify supported pages.\n");
2821 /* If the device rejected the request because the CDB was
2822 * improperly formed, then just leave.
2824 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2825 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2828 /* Get the amount of space the caller allocated */
2829 max_bytes = (cdb[3] << 8) | cdb[4];
2831 /* Get the number of pages actually returned by the device */
2832 drive_pages = (buf[2] << 8) | buf[3];
2833 drive_bytes = drive_pages + 4;
2834 new_size = drive_pages + 1;
2836 /* Supported pages must be in numerical order, so find where
2837 * the driver page needs to be inserted into the list of
2838 * pages returned by the device.
2840 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2841 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2842 return; /* Device using this page code. abort */
2843 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2847 if (insert_pt < max_bytes) {
2850 /* Shift everything up one byte to make room. */
2851 for (u = new_size + 3; u > insert_pt; u--)
2852 buf[u] = buf[u - 1];
2853 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2855 /* SCSI byte order increment of num_returned_bytes by 1 */
2856 skcomp->num_returned_bytes =
2857 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2858 skcomp->num_returned_bytes =
2859 be32_to_cpu(skcomp->num_returned_bytes);
2862 /* update page length field to reflect the driver's page too */
2863 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2864 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2867 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2873 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2876 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2878 pci_bus_speed = linksta & 0xF;
2879 pci_lanes = (linksta & 0x3F0) >> 4;
2881 *speed = STEC_LINK_UNKNOWN;
2886 switch (pci_bus_speed) {
2888 *speed = STEC_LINK_2_5GTS;
2891 *speed = STEC_LINK_5GTS;
2894 *speed = STEC_LINK_8GTS;
2897 *speed = STEC_LINK_UNKNOWN;
2901 if (pci_lanes <= 0x20)
2907 static void skd_do_inq_page_da(struct skd_device *skdev,
2908 volatile struct fit_completion_entry_v1 *skcomp,
2909 volatile struct fit_comp_error_info *skerr,
2910 uint8_t *cdb, uint8_t *buf)
2913 struct driver_inquiry_data inq;
2916 VPRINTK(skdev, "skd_do_driver_inquiry: return driver page\n");
2918 memset(&inq, 0, sizeof(inq));
2920 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2922 if (skdev->pdev && skdev->pdev->bus) {
2923 skd_get_link_info(skdev->pdev,
2924 &inq.pcie_link_speed, &inq.pcie_link_lanes);
2925 inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
2926 inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
2927 inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
2929 pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
2930 inq.pcie_vendor_id = cpu_to_be16(val);
2932 pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
2933 inq.pcie_device_id = cpu_to_be16(val);
2935 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
2937 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2939 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
2940 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2942 inq.pcie_bus_number = 0xFFFF;
2943 inq.pcie_device_number = 0xFF;
2944 inq.pcie_function_number = 0xFF;
2945 inq.pcie_link_speed = 0xFF;
2946 inq.pcie_link_lanes = 0xFF;
2947 inq.pcie_vendor_id = 0xFFFF;
2948 inq.pcie_device_id = 0xFFFF;
2949 inq.pcie_subsystem_vendor_id = 0xFFFF;
2950 inq.pcie_subsystem_device_id = 0xFFFF;
2953 /* Driver version, fixed lenth, padded with spaces on the right */
2954 inq.driver_version_length = sizeof(inq.driver_version);
2955 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2956 memcpy(inq.driver_version, DRV_VER_COMPL,
2957 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2959 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2961 /* Clear the error set by the device */
2962 skcomp->status = SAM_STAT_GOOD;
2963 memset((void *)skerr, 0, sizeof(*skerr));
2965 /* copy response into output buffer */
2966 max_bytes = (cdb[3] << 8) | cdb[4];
2967 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2969 skcomp->num_returned_bytes =
2970 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2973 static void skd_do_driver_inq(struct skd_device *skdev,
2974 volatile struct fit_completion_entry_v1 *skcomp,
2975 volatile struct fit_comp_error_info *skerr,
2976 uint8_t *cdb, uint8_t *buf)
2980 else if (cdb[0] != INQUIRY)
2981 return; /* Not an INQUIRY */
2982 else if ((cdb[1] & 1) == 0)
2983 return; /* EVPD not set */
2984 else if (cdb[2] == 0)
2985 /* Need to add driver's page to supported pages list */
2986 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2987 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2988 /* Caller requested driver's page */
2989 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2992 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
3001 static void skd_process_scsi_inq(struct skd_device *skdev,
3002 volatile struct fit_completion_entry_v1
3004 volatile struct fit_comp_error_info *skerr,
3005 struct skd_special_context *skspcl)
3008 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
3009 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
3011 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
3012 skspcl->req.sg_data_dir);
3013 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
3016 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
3020 static int skd_isr_completion_posted(struct skd_device *skdev,
3021 int limit, int *enqueued)
3023 volatile struct fit_completion_entry_v1 *skcmp = NULL;
3024 volatile struct fit_comp_error_info *skerr;
3027 struct skd_request_context *skreq;
3038 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
3040 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
3041 cmp_cycle = skcmp->cycle;
3042 cmp_cntxt = skcmp->tag;
3043 cmp_status = skcmp->status;
3044 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
3046 skerr = &skdev->skerr_table[skdev->skcomp_ix];
3049 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
3050 "busy=%d rbytes=0x%x proto=%d\n", skdev->skcomp_cycle,
3051 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
3052 skdev->in_flight, cmp_bytes, skdev->proto_ver);
3054 if (cmp_cycle != skdev->skcomp_cycle) {
3055 VPRINTK(skdev, "end of completions\n");
3059 * Update the completion queue head index and possibly
3060 * the completion cycle count. 8-bit wrap-around.
3063 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
3064 skdev->skcomp_ix = 0;
3065 skdev->skcomp_cycle++;
3069 * The command context is a unique 32-bit ID. The low order
3070 * bits help locate the request. The request is usually a
3071 * r/w request (see skd_start() above) or a special request.
3074 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
3076 /* Is this other than a r/w request? */
3077 if (req_slot >= skdev->num_req_context) {
3079 * This is not a completion for a r/w request.
3081 skd_complete_other(skdev, skcmp, skerr);
3085 skreq = &skdev->skreq_table[req_slot];
3088 * Make sure the request ID for the slot matches.
3090 if (skreq->id != req_id) {
3091 DPRINTK(skdev, "mismatch comp_id=0x%x req_id=0x%x\n",
3094 u16 new_id = cmp_cntxt;
3095 pr_err("(%s): Completion mismatch "
3096 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
3097 skd_name(skdev), req_id,
3104 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
3106 if (skreq->state == SKD_REQ_STATE_ABORTED) {
3107 DPRINTK(skdev, "reclaim req %p id=%04x\n",
3109 /* a previously timed out command can
3110 * now be cleaned up */
3111 skd_release_skreq(skdev, skreq);
3115 skreq->completion = *skcmp;
3116 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
3117 skreq->err_info = *skerr;
3118 skd_log_check_status(skdev, cmp_status, skerr->key,
3119 skerr->code, skerr->qual,
3122 /* Release DMA resources for the request. */
3123 if (skreq->n_sg > 0)
3124 skd_postop_sg_list(skdev, skreq);
3126 if (((!skd_bio) && !skreq->req) ||
3127 ((skd_bio) && !skreq->bio)) {
3128 DPRINTK(skdev, "NULL backptr skdreq %p, "
3129 "req=0x%x req_id=0x%x\n",
3130 skreq, skreq->id, req_id);
3133 * Capture the outcome and post it back to the
3136 if (likely(cmp_status == SAM_STAT_GOOD)) {
3137 if (unlikely(skreq->flush_cmd)) {
3139 /* if empty size bio, we are all done */
3140 if (bio_sectors(skreq->bio) == 0) {
3141 skd_end_request(skdev, skreq, 0);
3143 ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio);
3145 pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret);
3146 skd_end_request(skdev, skreq, ret);
3152 skd_end_request(skdev, skreq, 0);
3155 skd_end_request(skdev, skreq, 0);
3158 skd_resolve_req_exception(skdev, skreq);
3163 * Release the skreq, its FIT msg (if one), timeout slot,
3166 skd_release_skreq(skdev, skreq);
3168 /* skd_isr_comp_limit equal zero means no limit */
3170 if (++processed >= limit) {
3177 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
3178 && (skdev->in_flight) == 0) {
3179 skdev->state = SKD_DRVR_STATE_PAUSED;
3180 wake_up_interruptible(&skdev->waitq);
3186 static void skd_complete_other(struct skd_device *skdev,
3187 volatile struct fit_completion_entry_v1 *skcomp,
3188 volatile struct fit_comp_error_info *skerr)
3193 struct skd_special_context *skspcl;
3195 req_id = skcomp->tag;
3196 req_table = req_id & SKD_ID_TABLE_MASK;
3197 req_slot = req_id & SKD_ID_SLOT_MASK;
3199 DPRINTK(skdev, "table=0x%x id=0x%x slot=%d\n", req_table, req_id,
3203 * Based on the request id, determine how to dispatch this completion.
3204 * This swich/case is finding the good cases and forwarding the
3205 * completion entry. Errors are reported below the switch.
3207 switch (req_table) {
3208 case SKD_ID_RW_REQUEST:
3210 * The caller, skd_completion_posted_isr() above,
3211 * handles r/w requests. The only way we get here
3212 * is if the req_slot is out of bounds.
3216 case SKD_ID_SPECIAL_REQUEST:
3218 * Make sure the req_slot is in bounds and that the id
3221 if (req_slot < skdev->n_special) {
3222 skspcl = &skdev->skspcl_table[req_slot];
3223 if (skspcl->req.id == req_id &&
3224 skspcl->req.state == SKD_REQ_STATE_BUSY) {
3225 skd_complete_special(skdev,
3226 skcomp, skerr, skspcl);
3232 case SKD_ID_INTERNAL:
3233 if (req_slot == 0) {
3234 skspcl = &skdev->internal_skspcl;
3235 if (skspcl->req.id == req_id &&
3236 skspcl->req.state == SKD_REQ_STATE_BUSY) {
3237 skd_complete_internal(skdev,
3238 skcomp, skerr, skspcl);
3244 case SKD_ID_FIT_MSG:
3246 * These id's should never appear in a completion record.
3252 * These id's should never appear anywhere;
3258 * If we get here it is a bad or stale id.
3262 static void skd_complete_special(struct skd_device *skdev,
3263 volatile struct fit_completion_entry_v1
3265 volatile struct fit_comp_error_info *skerr,
3266 struct skd_special_context *skspcl)
3268 DPRINTK(skdev, " completing special request %p\n", skspcl);
3269 if (skspcl->orphaned) {
3270 /* Discard orphaned request */
3271 /* ?: Can this release directly or does it need
3272 * to use a worker? */
3273 DPRINTK(skdev, "release orphaned %p\n", skspcl);
3274 skd_release_special(skdev, skspcl);
3278 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
3280 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
3281 skspcl->req.completion = *skcomp;
3282 skspcl->req.err_info = *skerr;
3284 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
3285 skerr->code, skerr->qual, skerr->fruc);
3287 wake_up_interruptible(&skdev->waitq);
3290 /* assume spinlock is already held */
3291 static void skd_release_special(struct skd_device *skdev,
3292 struct skd_special_context *skspcl)
3294 int i, was_depleted;
3296 for (i = 0; i < skspcl->req.n_sg; i++) {
3298 struct page *page = sg_page(&skspcl->req.sg[i]);
3302 was_depleted = (skdev->skspcl_free_list == NULL);
3304 skspcl->req.state = SKD_REQ_STATE_IDLE;
3305 skspcl->req.id += SKD_ID_INCR;
3307 (struct skd_request_context *)skdev->skspcl_free_list;
3308 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
3311 DPRINTK(skdev, "skspcl was depleted\n");
3312 /* Free list was depleted. Their might be waiters. */
3313 wake_up_interruptible(&skdev->waitq);
3317 static void skd_reset_skcomp(struct skd_device *skdev)
3320 struct fit_completion_entry_v1 *skcomp;
3322 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3323 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3325 memset(skdev->skcomp_table, 0, nbytes);
3327 skdev->skcomp_ix = 0;
3328 skdev->skcomp_cycle = 1;
3332 *****************************************************************************
3334 *****************************************************************************
3336 static void skd_completion_worker(struct work_struct *work)
3338 struct skd_device *skdev =
3339 container_of(work, struct skd_device, completion_worker);
3340 unsigned long flags;
3341 int flush_enqueued = 0;
3343 spin_lock_irqsave(&skdev->lock, flags);
3346 * pass in limit=0, which means no limit..
3347 * process everything in compq
3349 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3350 skd_request_fn(skdev->queue);
3352 spin_unlock_irqrestore(&skdev->lock, flags);
3355 static void skd_isr_msg_from_dev(struct skd_device *skdev);
3358 static skd_isr(int irq, void *ptr)
3360 struct skd_device *skdev;
3365 int flush_enqueued = 0;
3367 skdev = (struct skd_device *)ptr;
3368 spin_lock(&skdev->lock);
3371 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3373 ack = FIT_INT_DEF_MASK;
3376 VPRINTK(skdev, "intstat=0x%x ack=0x%x\n", intstat, ack);
3378 /* As long as there is an int pending on device, keep
3379 * running loop. When none, get out, but if we've never
3380 * done any processing, call completion handler?
3383 /* No interrupts on device, but run the completion
3387 if (likely (skdev->state
3388 == SKD_DRVR_STATE_ONLINE))
3395 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3397 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3398 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3399 if (intstat & FIT_ISH_COMPLETION_POSTED) {
3401 * If we have already deferred completion
3402 * processing, don't bother running it again
3406 skd_isr_completion_posted(skdev,
3407 skd_isr_comp_limit, &flush_enqueued);
3410 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3411 skd_isr_fwstate(skdev);
3412 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3414 SKD_DRVR_STATE_DISAPPEARED) {
3415 spin_unlock(&skdev->lock);
3420 if (intstat & FIT_ISH_MSG_FROM_DEV)
3421 skd_isr_msg_from_dev(skdev);
3425 if (unlikely(flush_enqueued))
3426 skd_request_fn(skdev->queue);
3429 schedule_work(&skdev->completion_worker);
3430 else if (!flush_enqueued)
3431 skd_request_fn(skdev->queue);
3433 spin_unlock(&skdev->lock);
3439 static void skd_drive_fault(struct skd_device *skdev)
3441 skdev->state = SKD_DRVR_STATE_FAULT;
3442 pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3445 static void skd_drive_disappeared(struct skd_device *skdev)
3447 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3448 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3451 static void skd_isr_fwstate(struct skd_device *skdev)
3456 int prev_driver_state = skdev->state;
3458 sense = SKD_READL(skdev, FIT_STATUS);
3459 state = sense & FIT_SR_DRIVE_STATE_MASK;
3461 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3463 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3464 skd_drive_state_to_str(state), state);
3466 skdev->drive_state = state;
3468 switch (skdev->drive_state) {
3469 case FIT_SR_DRIVE_INIT:
3470 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3471 skd_disable_interrupts(skdev);
3474 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3475 skd_recover_requests(skdev, 0);
3476 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3477 skdev->timer_countdown = SKD_STARTING_TIMO;
3478 skdev->state = SKD_DRVR_STATE_STARTING;
3479 skd_soft_reset(skdev);
3482 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3483 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3484 skdev->last_mtd = mtd;
3487 case FIT_SR_DRIVE_ONLINE:
3488 skdev->cur_max_queue_depth = skd_max_queue_depth;
3489 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3490 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3492 skdev->queue_low_water_mark =
3493 skdev->cur_max_queue_depth * 2 / 3 + 1;
3494 if (skdev->queue_low_water_mark < 1)
3495 skdev->queue_low_water_mark = 1;
3497 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3499 skdev->cur_max_queue_depth,
3500 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3502 skd_refresh_device_data(skdev);
3505 case FIT_SR_DRIVE_BUSY:
3506 skdev->state = SKD_DRVR_STATE_BUSY;
3507 skdev->timer_countdown = SKD_BUSY_TIMO;
3508 skd_quiesce_dev(skdev);
3510 case FIT_SR_DRIVE_BUSY_SANITIZE:
3511 /* set timer for 3 seconds, we'll abort any unfinished
3512 * commands after that expires
3514 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3515 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3516 skd_start_queue(skdev);
3518 case FIT_SR_DRIVE_BUSY_ERASE:
3519 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3520 skdev->timer_countdown = SKD_BUSY_TIMO;
3522 case FIT_SR_DRIVE_OFFLINE:
3523 skdev->state = SKD_DRVR_STATE_IDLE;
3525 case FIT_SR_DRIVE_SOFT_RESET:
3526 switch (skdev->state) {
3527 case SKD_DRVR_STATE_STARTING:
3528 case SKD_DRVR_STATE_RESTARTING:
3529 /* Expected by a caller of skd_soft_reset() */
3532 skdev->state = SKD_DRVR_STATE_RESTARTING;
3536 case FIT_SR_DRIVE_FW_BOOTING:
3537 VPRINTK(skdev, "ISR FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
3538 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3539 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3542 case FIT_SR_DRIVE_DEGRADED:
3543 case FIT_SR_PCIE_LINK_DOWN:
3544 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3547 case FIT_SR_DRIVE_FAULT:
3548 skd_drive_fault(skdev);
3549 skd_recover_requests(skdev, 0);
3550 skd_start_queue(skdev);
3553 /* PCIe bus returned all Fs? */
3555 pr_info("(%s): state=0x%x sense=0x%x\n",
3556 skd_name(skdev), state, sense);
3557 skd_drive_disappeared(skdev);
3558 skd_recover_requests(skdev, 0);
3559 skd_start_queue(skdev);
3563 * Uknown FW State. Wait for a state we recognize.
3567 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3569 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3570 skd_skdev_state_to_str(skdev->state), skdev->state);
3573 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3577 for (i = 0; i < skdev->num_req_context; i++) {
3578 struct skd_request_context *skreq = &skdev->skreq_table[i];
3580 if (skreq->state == SKD_REQ_STATE_BUSY) {
3581 skd_log_skreq(skdev, skreq, "recover");
3583 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3585 SKD_ASSERT(skreq->req != NULL);
3587 SKD_ASSERT(skreq->bio != NULL);
3589 /* Release DMA resources for the request. */
3590 if (skreq->n_sg > 0)
3591 skd_postop_sg_list(skdev, skreq);
3595 (unsigned long) ++skreq->req->special <
3597 skd_requeue_request(skdev, skreq);
3599 skd_end_request(skdev, skreq, -EIO);
3601 skd_end_request(skdev, skreq, -EIO);
3608 skreq->state = SKD_REQ_STATE_IDLE;
3609 skreq->id += SKD_ID_INCR;
3614 skreq[-1].next = skreq;
3617 skdev->skreq_free_list = skdev->skreq_table;
3619 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3620 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3622 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3623 skd_log_skmsg(skdev, skmsg, "salvaged");
3624 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3625 skmsg->state = SKD_MSG_STATE_IDLE;
3626 skmsg->id += SKD_ID_INCR;
3629 skmsg[-1].next = skmsg;
3632 skdev->skmsg_free_list = skdev->skmsg_table;
3634 for (i = 0; i < skdev->n_special; i++) {
3635 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3637 /* If orphaned, reclaim it because it has already been reported
3638 * to the process as an error (it was just waiting for
3639 * a completion that didn't come, and now it will never come)
3640 * If busy, change to a state that will cause it to error
3641 * out in the wait routine and let it do the normal
3642 * reporting and reclaiming
3644 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3645 if (skspcl->orphaned) {
3646 DPRINTK(skdev, "orphaned %p\n", skspcl);
3647 skd_release_special(skdev, skspcl);
3649 DPRINTK(skdev, "not orphaned %p\n", skspcl);
3650 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3654 skdev->skspcl_free_list = skdev->skspcl_table;
3656 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3657 skdev->timeout_slot[i] = 0;
3659 skdev->in_flight = 0;
3662 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3668 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3670 DPRINTK(skdev, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd);
3672 /* ignore any mtd that is an ack for something we didn't send */
3673 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3676 switch (FIT_MXD_TYPE(mfd)) {
3677 case FIT_MTD_FITFW_INIT:
3678 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3680 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3681 pr_err("(%s): protocol mismatch\n",
3683 pr_err("(%s): got=%d support=%d\n",
3684 skdev->name, skdev->proto_ver,
3685 FIT_PROTOCOL_VERSION_1);
3686 pr_err("(%s): please upgrade driver\n",
3688 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3689 skd_soft_reset(skdev);
3692 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3693 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3694 skdev->last_mtd = mtd;
3697 case FIT_MTD_GET_CMDQ_DEPTH:
3698 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3699 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3700 SKD_N_COMPLETION_ENTRY);
3701 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3702 skdev->last_mtd = mtd;
3705 case FIT_MTD_SET_COMPQ_DEPTH:
3706 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3707 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3708 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3709 skdev->last_mtd = mtd;
3712 case FIT_MTD_SET_COMPQ_ADDR:
3713 skd_reset_skcomp(skdev);
3714 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3715 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3716 skdev->last_mtd = mtd;
3719 case FIT_MTD_CMD_LOG_HOST_ID:
3720 skdev->connect_time_stamp = get_seconds();
3721 data = skdev->connect_time_stamp & 0xFFFF;
3722 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3723 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3724 skdev->last_mtd = mtd;
3727 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3728 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3729 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3730 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3731 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3732 skdev->last_mtd = mtd;
3735 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3736 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3737 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3738 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3739 skdev->last_mtd = mtd;
3741 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3743 skdev->connect_time_stamp, skdev->drive_jiffies);
3746 case FIT_MTD_ARM_QUEUE:
3747 skdev->last_mtd = 0;
3749 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3758 static void skd_disable_interrupts(struct skd_device *skdev)
3762 sense = SKD_READL(skdev, FIT_CONTROL);
3763 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3764 SKD_WRITEL(skdev, sense, FIT_CONTROL);
3765 DPRINTK(skdev, "sense 0x%x\n", sense);
3767 /* Note that the 1s is written. A 1-bit means
3768 * disable, a 0 means enable.
3770 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3773 static void skd_enable_interrupts(struct skd_device *skdev)
3777 /* unmask interrupts first */
3778 val = FIT_ISH_FW_STATE_CHANGE +
3779 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3781 /* Note that the compliment of mask is written. A 1-bit means
3782 * disable, a 0 means enable. */
3783 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3784 DPRINTK(skdev, "interrupt mask=0x%x\n", ~val);
3786 val = SKD_READL(skdev, FIT_CONTROL);
3787 val |= FIT_CR_ENABLE_INTERRUPTS;
3788 DPRINTK(skdev, "control=0x%x\n", val);
3789 SKD_WRITEL(skdev, val, FIT_CONTROL);
3793 *****************************************************************************
3794 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3795 *****************************************************************************
3798 static void skd_soft_reset(struct skd_device *skdev)
3802 val = SKD_READL(skdev, FIT_CONTROL);
3803 val |= (FIT_CR_SOFT_RESET);
3804 DPRINTK(skdev, "control=0x%x\n", val);
3805 SKD_WRITEL(skdev, val, FIT_CONTROL);
3808 static void skd_start_device(struct skd_device *skdev)
3810 unsigned long flags;
3814 spin_lock_irqsave(&skdev->lock, flags);
3816 /* ack all ghost interrupts */
3817 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3819 sense = SKD_READL(skdev, FIT_STATUS);
3821 DPRINTK(skdev, "initial status=0x%x\n", sense);
3823 state = sense & FIT_SR_DRIVE_STATE_MASK;
3824 skdev->drive_state = state;
3825 skdev->last_mtd = 0;
3827 skdev->state = SKD_DRVR_STATE_STARTING;
3828 skdev->timer_countdown = SKD_STARTING_TIMO;
3830 skd_enable_interrupts(skdev);
3832 switch (skdev->drive_state) {
3833 case FIT_SR_DRIVE_OFFLINE:
3834 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3837 case FIT_SR_DRIVE_FW_BOOTING:
3838 VPRINTK(skdev, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
3839 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3840 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3843 case FIT_SR_DRIVE_BUSY_SANITIZE:
3844 pr_info("(%s): Start: BUSY_SANITIZE\n",
3846 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3847 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3850 case FIT_SR_DRIVE_BUSY_ERASE:
3851 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3852 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3853 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3856 case FIT_SR_DRIVE_INIT:
3857 case FIT_SR_DRIVE_ONLINE:
3858 skd_soft_reset(skdev);
3861 case FIT_SR_DRIVE_BUSY:
3862 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3863 skdev->state = SKD_DRVR_STATE_BUSY;
3864 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3867 case FIT_SR_DRIVE_SOFT_RESET:
3868 pr_err("(%s) drive soft reset in prog\n",
3872 case FIT_SR_DRIVE_FAULT:
3873 /* Fault state is bad...soft reset won't do it...
3874 * Hard reset, maybe, but does it work on device?
3875 * For now, just fault so the system doesn't hang.
3877 skd_drive_fault(skdev);
3878 /*start the queue so we can respond with error to requests */
3879 VPRINTK(skdev, "starting %s queue\n", skdev->name);
3880 skd_start_queue(skdev);
3881 skdev->gendisk_on = -1;
3882 wake_up_interruptible(&skdev->waitq);
3886 /* Most likely the device isn't there or isn't responding
3887 * to the BAR1 addresses. */
3888 skd_drive_disappeared(skdev);
3889 /*start the queue so we can respond with error to requests */
3890 VPRINTK(skdev, "starting %s queue to error-out reqs\n",
3892 skd_start_queue(skdev);
3893 skdev->gendisk_on = -1;
3894 wake_up_interruptible(&skdev->waitq);
3898 pr_err("(%s) Start: unknown state %x\n",
3899 skd_name(skdev), skdev->drive_state);
3903 state = SKD_READL(skdev, FIT_CONTROL);
3904 DPRINTK(skdev, "FIT Control Status=0x%x\n", state);
3906 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3907 DPRINTK(skdev, "Intr Status=0x%x\n", state);
3909 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3910 DPRINTK(skdev, "Intr Mask=0x%x\n", state);
3912 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3913 DPRINTK(skdev, "Msg from Dev=0x%x\n", state);
3915 state = SKD_READL(skdev, FIT_HW_VERSION);
3916 DPRINTK(skdev, "HW version=0x%x\n", state);
3918 spin_unlock_irqrestore(&skdev->lock, flags);
3921 static void skd_stop_device(struct skd_device *skdev)
3923 unsigned long flags;
3924 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3928 spin_lock_irqsave(&skdev->lock, flags);
3930 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3931 pr_err("(%s): skd_stop_device not online no sync\n",
3936 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3937 pr_err("(%s): skd_stop_device no special\n",
3942 skdev->state = SKD_DRVR_STATE_SYNCING;
3943 skdev->sync_done = 0;
3945 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3947 spin_unlock_irqrestore(&skdev->lock, flags);
3949 wait_event_interruptible_timeout(skdev->waitq,
3950 (skdev->sync_done), (10 * HZ));
3952 spin_lock_irqsave(&skdev->lock, flags);
3954 switch (skdev->sync_done) {
3956 pr_err("(%s): skd_stop_device no sync\n",
3960 pr_err("(%s): skd_stop_device sync done\n",
3964 pr_err("(%s): skd_stop_device sync error\n",
3969 skdev->state = SKD_DRVR_STATE_STOPPING;
3970 spin_unlock_irqrestore(&skdev->lock, flags);
3972 skd_kill_timer(skdev);
3974 spin_lock_irqsave(&skdev->lock, flags);
3975 skd_disable_interrupts(skdev);
3977 /* ensure all ints on device are cleared */
3978 /* soft reset the device to unload with a clean slate */
3979 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3980 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3982 spin_unlock_irqrestore(&skdev->lock, flags);
3984 /* poll every 100ms, 1 second timeout */
3985 for (i = 0; i < 10; i++) {
3987 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3988 if (dev_state == FIT_SR_DRIVE_INIT)
3990 set_current_state(TASK_INTERRUPTIBLE);
3991 schedule_timeout(msecs_to_jiffies(100));
3994 if (dev_state != FIT_SR_DRIVE_INIT)
3995 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3996 skd_name(skdev), dev_state);
3999 /* assume spinlock is held */
4000 static void skd_restart_device(struct skd_device *skdev)
4004 /* ack all ghost interrupts */
4005 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
4007 state = SKD_READL(skdev, FIT_STATUS);
4009 DPRINTK(skdev, "drive status=0x%x\n", state);
4011 state &= FIT_SR_DRIVE_STATE_MASK;
4012 skdev->drive_state = state;
4013 skdev->last_mtd = 0;
4015 skdev->state = SKD_DRVR_STATE_RESTARTING;
4016 skdev->timer_countdown = SKD_RESTARTING_TIMO;
4018 skd_soft_reset(skdev);
4021 /* assume spinlock is held */
4022 static int skd_quiesce_dev(struct skd_device *skdev)
4026 switch (skdev->state) {
4027 case SKD_DRVR_STATE_BUSY:
4028 case SKD_DRVR_STATE_BUSY_IMMINENT:
4029 VPRINTK(skdev, "stopping %s queue\n", skdev->name);
4030 skd_stop_queue(skdev);
4032 case SKD_DRVR_STATE_ONLINE:
4033 case SKD_DRVR_STATE_STOPPING:
4034 case SKD_DRVR_STATE_SYNCING:
4035 case SKD_DRVR_STATE_PAUSING:
4036 case SKD_DRVR_STATE_PAUSED:
4037 case SKD_DRVR_STATE_STARTING:
4038 case SKD_DRVR_STATE_RESTARTING:
4039 case SKD_DRVR_STATE_RESUMING:
4042 VPRINTK(skdev, "state [%d] not implemented\n", skdev->state);
4047 /* assume spinlock is held */
4048 static int skd_unquiesce_dev(struct skd_device *skdev)
4050 int prev_driver_state = skdev->state;
4052 skd_log_skdev(skdev, "unquiesce");
4053 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
4054 DPRINTK(skdev, "**** device already ONLINE\n");
4057 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
4059 * If there has been an state change to other than
4060 * ONLINE, we will rely on controller state change
4061 * to come back online and restart the queue.
4062 * The BUSY state means that driver is ready to
4063 * continue normal processing but waiting for controller
4064 * to become available.
4066 skdev->state = SKD_DRVR_STATE_BUSY;
4067 DPRINTK(skdev, "drive BUSY state\n");
4072 * Drive has just come online, driver is either in startup,
4073 * paused performing a task, or bust waiting for hardware.
4075 switch (skdev->state) {
4076 case SKD_DRVR_STATE_PAUSED:
4077 case SKD_DRVR_STATE_BUSY:
4078 case SKD_DRVR_STATE_BUSY_IMMINENT:
4079 case SKD_DRVR_STATE_BUSY_ERASE:
4080 case SKD_DRVR_STATE_STARTING:
4081 case SKD_DRVR_STATE_RESTARTING:
4082 case SKD_DRVR_STATE_FAULT:
4083 case SKD_DRVR_STATE_IDLE:
4084 case SKD_DRVR_STATE_LOAD:
4085 skdev->state = SKD_DRVR_STATE_ONLINE;
4086 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
4088 skd_skdev_state_to_str(prev_driver_state),
4089 prev_driver_state, skd_skdev_state_to_str(skdev->state),
4091 DPRINTK(skdev, "**** device ONLINE...starting block queue\n");
4092 VPRINTK(skdev, "starting %s queue\n", skdev->name);
4093 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
4094 skd_start_queue(skdev);
4095 skdev->gendisk_on = 1;
4096 wake_up_interruptible(&skdev->waitq);
4099 case SKD_DRVR_STATE_DISAPPEARED:
4101 DPRINTK(skdev, "**** driver state %d, not implemented \n",
4109 *****************************************************************************
4110 * PCIe MSI/MSI-X INTERRUPT HANDLERS
4111 *****************************************************************************
4114 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
4116 struct skd_device *skdev = skd_host_data;
4117 unsigned long flags;
4119 spin_lock_irqsave(&skdev->lock, flags);
4120 VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST));
4121 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
4122 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
4123 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
4124 spin_unlock_irqrestore(&skdev->lock, flags);
4128 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
4130 struct skd_device *skdev = skd_host_data;
4131 unsigned long flags;
4133 spin_lock_irqsave(&skdev->lock, flags);
4134 VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST));
4135 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
4136 skd_isr_fwstate(skdev);
4137 spin_unlock_irqrestore(&skdev->lock, flags);
4141 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
4143 struct skd_device *skdev = skd_host_data;
4144 unsigned long flags;
4145 int flush_enqueued = 0;
4148 spin_lock_irqsave(&skdev->lock, flags);
4149 VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST));
4150 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
4151 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
4155 skd_request_fn(skdev->queue);
4158 schedule_work(&skdev->completion_worker);
4159 else if (!flush_enqueued)
4160 skd_request_fn(skdev->queue);
4162 spin_unlock_irqrestore(&skdev->lock, flags);
4167 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
4169 struct skd_device *skdev = skd_host_data;
4170 unsigned long flags;
4172 spin_lock_irqsave(&skdev->lock, flags);
4173 VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST));
4174 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
4175 skd_isr_msg_from_dev(skdev);
4176 spin_unlock_irqrestore(&skdev->lock, flags);
4180 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
4182 struct skd_device *skdev = skd_host_data;
4183 unsigned long flags;
4185 spin_lock_irqsave(&skdev->lock, flags);
4186 VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST));
4187 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
4188 spin_unlock_irqrestore(&skdev->lock, flags);
4193 *****************************************************************************
4194 * PCIe MSI/MSI-X SETUP
4195 *****************************************************************************
4198 struct skd_msix_entry {
4202 struct skd_device *rsp;
4206 struct skd_init_msix_entry {
4208 irq_handler_t handler;
4211 #define SKD_MAX_MSIX_COUNT 13
4212 #define SKD_MIN_MSIX_COUNT 7
4213 #define SKD_BASE_MSIX_IRQ 4
4215 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
4216 { "(DMA 0)", skd_reserved_isr },
4217 { "(DMA 1)", skd_reserved_isr },
4218 { "(DMA 2)", skd_reserved_isr },
4219 { "(DMA 3)", skd_reserved_isr },
4220 { "(State Change)", skd_statec_isr },
4221 { "(COMPL_Q)", skd_comp_q },
4222 { "(MSG)", skd_msg_isr },
4223 { "(Reserved)", skd_reserved_isr },
4224 { "(Reserved)", skd_reserved_isr },
4225 { "(Queue Full 0)", skd_qfull_isr },
4226 { "(Queue Full 1)", skd_qfull_isr },
4227 { "(Queue Full 2)", skd_qfull_isr },
4228 { "(Queue Full 3)", skd_qfull_isr },
4231 static void skd_release_msix(struct skd_device *skdev)
4233 struct skd_msix_entry *qentry;
4236 if (skdev->msix_entries == NULL)
4238 for (i = 0; i < skdev->msix_count; i++) {
4239 qentry = &skdev->msix_entries[i];
4240 skdev = qentry->rsp;
4242 if (qentry->have_irq)
4243 devm_free_irq(&skdev->pdev->dev,
4244 qentry->vector, qentry->rsp);
4246 pci_disable_msix(skdev->pdev);
4247 kfree(skdev->msix_entries);
4248 skdev->msix_count = 0;
4249 skdev->msix_entries = NULL;
4252 static int skd_acquire_msix(struct skd_device *skdev)
4255 struct pci_dev *pdev;
4256 struct msix_entry *entries = NULL;
4257 struct skd_msix_entry *qentry;
4260 skdev->msix_count = SKD_MAX_MSIX_COUNT;
4261 entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
4266 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
4267 entries[i].entry = i;
4269 rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
4273 if (rc < SKD_MIN_MSIX_COUNT) {
4274 pr_err("(%s): failed to enable MSI-X %d\n",
4275 skd_name(skdev), rc);
4278 DPRINTK(skdev, "%s: <%s> allocated %d MSI-X vectors\n",
4279 pci_name(pdev), skdev->name, rc);
4281 skdev->msix_count = rc;
4282 rc = pci_enable_msix(pdev, entries, skdev->msix_count);
4284 pr_err("(%s): failed to enable MSI-X "
4285 "support (%d) %d\n",
4286 skd_name(skdev), skdev->msix_count, rc);
4290 skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
4291 skdev->msix_count, GFP_KERNEL);
4292 if (!skdev->msix_entries) {
4294 skdev->msix_count = 0;
4295 pr_err("(%s): msix table allocation error\n",
4300 qentry = skdev->msix_entries;
4301 for (i = 0; i < skdev->msix_count; i++) {
4302 qentry->vector = entries[i].vector;
4303 qentry->entry = entries[i].entry;
4305 qentry->have_irq = 0;
4306 DPRINTK(skdev, "%s: <%s> msix (%d) vec %d, entry %x\n",
4307 pci_name(pdev), skdev->name,
4308 i, qentry->vector, qentry->entry);
4312 /* Enable MSI-X vectors for the base queue */
4313 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
4314 qentry = &skdev->msix_entries[i];
4315 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
4316 "%s%d-msix %s", DRV_NAME, skdev->devno,
4317 msix_entries[i].name);
4318 rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
4319 msix_entries[i].handler, 0,
4320 qentry->isr_name, skdev);
4322 pr_err("(%s): Unable to register(%d) MSI-X "
4324 skd_name(skdev), rc, i, qentry->isr_name);
4327 qentry->have_irq = 1;
4328 qentry->rsp = skdev;
4331 DPRINTK(skdev, "%s: <%s> msix %d irq(s) enabled\n",
4332 pci_name(pdev), skdev->name, skdev->msix_count);
4338 skd_release_msix(skdev);
4342 static int skd_acquire_irq(struct skd_device *skdev)
4345 struct pci_dev *pdev;
4348 skdev->msix_count = 0;
4351 switch (skdev->irq_type) {
4353 rc = skd_acquire_msix(skdev);
4355 pr_info("(%s): MSI-X %d irqs enabled\n",
4356 skd_name(skdev), skdev->msix_count);
4359 "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4360 skd_name(skdev), rc);
4361 skdev->irq_type = SKD_IRQ_MSI;
4362 goto RETRY_IRQ_TYPE;
4366 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4367 DRV_NAME, skdev->devno);
4368 rc = pci_enable_msi(pdev);
4370 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4371 skdev->isr_name, skdev);
4373 pci_disable_msi(pdev);
4375 "(%s): failed to allocate the MSI interrupt %d\n",
4376 skd_name(skdev), rc);
4377 goto RETRY_IRQ_LEGACY;
4379 pr_info("(%s): MSI irq %d enabled\n",
4380 skd_name(skdev), pdev->irq);
4384 "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4385 skd_name(skdev), rc);
4386 skdev->irq_type = SKD_IRQ_LEGACY;
4387 goto RETRY_IRQ_TYPE;
4390 case SKD_IRQ_LEGACY:
4391 snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4392 "%s%d-legacy", DRV_NAME, skdev->devno);
4393 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4394 IRQF_SHARED, skdev->isr_name, skdev);
4396 pr_info("(%s): LEGACY irq %d enabled\n",
4397 skd_name(skdev), pdev->irq);
4399 pr_err("(%s): request LEGACY irq error %d\n",
4400 skd_name(skdev), rc);
4403 pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4404 skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4405 skdev->irq_type = SKD_IRQ_LEGACY;
4406 goto RETRY_IRQ_TYPE;
4411 static void skd_release_irq(struct skd_device *skdev)
4413 switch (skdev->irq_type) {
4415 skd_release_msix(skdev);
4418 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4419 pci_disable_msi(skdev->pdev);
4421 case SKD_IRQ_LEGACY:
4422 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4425 pr_err("(%s): wrong irq type %d!",
4426 skd_name(skdev), skdev->irq_type);
4432 *****************************************************************************
4434 *****************************************************************************
4437 static int skd_cons_skcomp(struct skd_device *skdev);
4438 static int skd_cons_skmsg(struct skd_device *skdev);
4439 static int skd_cons_skreq(struct skd_device *skdev);
4440 static int skd_cons_skspcl(struct skd_device *skdev);
4441 static int skd_cons_sksb(struct skd_device *skdev);
4442 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4444 dma_addr_t *ret_dma_addr);
4445 static int skd_cons_disk(struct skd_device *skdev);
4447 #define SKD_N_DEV_TABLE 16u
4448 static u32 skd_next_devno;
4450 static struct skd_device *skd_construct(struct pci_dev *pdev)
4452 struct skd_device *skdev;
4453 int blk_major = skd_major;
4456 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4459 pr_err(PFX "(%s): memory alloc failure\n",
4464 skdev->state = SKD_DRVR_STATE_LOAD;
4466 skdev->devno = skd_next_devno++;
4467 skdev->major = blk_major;
4468 skdev->irq_type = skd_isr_type;
4469 sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4470 skdev->dev_max_queue_depth = 0;
4472 skdev->num_req_context = skd_max_queue_depth;
4473 skdev->num_fitmsg_context = skd_max_queue_depth;
4474 skdev->n_special = skd_max_pass_thru;
4475 skdev->cur_max_queue_depth = 1;
4476 skdev->queue_low_water_mark = 1;
4477 skdev->proto_ver = 99;
4478 skdev->sgs_per_request = skd_sgs_per_request;
4479 skdev->dbg_level = skd_dbg_level;
4482 bio_list_init(&skdev->bio_queue);
4485 atomic_set(&skdev->device_count, 0);
4487 spin_lock_init(&skdev->lock);
4489 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4490 INIT_LIST_HEAD(&skdev->flush_list);
4492 VPRINTK(skdev, "skcomp\n");
4493 rc = skd_cons_skcomp(skdev);
4497 VPRINTK(skdev, "skmsg\n");
4498 rc = skd_cons_skmsg(skdev);
4502 VPRINTK(skdev, "skreq\n");
4503 rc = skd_cons_skreq(skdev);
4507 VPRINTK(skdev, "skspcl\n");
4508 rc = skd_cons_skspcl(skdev);
4512 VPRINTK(skdev, "sksb\n");
4513 rc = skd_cons_sksb(skdev);
4517 VPRINTK(skdev, "disk\n");
4518 rc = skd_cons_disk(skdev);
4524 DPRINTK(skdev, "VICTORY\n");
4528 DPRINTK(skdev, "construct failed\n");
4529 skd_destruct(skdev);
4533 static int skd_cons_skcomp(struct skd_device *skdev)
4536 struct fit_completion_entry_v1 *skcomp;
4539 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4540 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4542 VPRINTK(skdev, "comp pci_alloc, total bytes %d entries %d\n", nbytes,
4543 SKD_N_COMPLETION_ENTRY);
4545 skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
4546 &skdev->cq_dma_address);
4548 if (skcomp == NULL) {
4553 memset(skcomp, 0, nbytes);
4555 skdev->skcomp_table = skcomp;
4556 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4558 SKD_N_COMPLETION_ENTRY);
4564 static int skd_cons_skmsg(struct skd_device *skdev)
4569 VPRINTK(skdev, "skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4570 sizeof(struct skd_fitmsg_context),
4571 skdev->num_fitmsg_context,
4572 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4574 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4575 *skdev->num_fitmsg_context, GFP_KERNEL);
4576 if (skdev->skmsg_table == NULL) {
4581 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4582 struct skd_fitmsg_context *skmsg;
4584 skmsg = &skdev->skmsg_table[i];
4586 skmsg->id = i + SKD_ID_FIT_MSG;
4588 skmsg->state = SKD_MSG_STATE_IDLE;
4589 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4590 SKD_N_FITMSG_BYTES + 64,
4591 &skmsg->mb_dma_address);
4593 if (skmsg->msg_buf == NULL) {
4598 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4599 (~FIT_QCMD_BASE_ADDRESS_MASK));
4600 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4601 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4602 FIT_QCMD_BASE_ADDRESS_MASK);
4603 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4604 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4605 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4607 skmsg->next = &skmsg[1];
4610 /* Free list is in order starting with the 0th entry. */
4611 skdev->skmsg_table[i - 1].next = NULL;
4612 skdev->skmsg_free_list = skdev->skmsg_table;
4618 static int skd_cons_skreq(struct skd_device *skdev)
4623 VPRINTK(skdev, "skreq_table kzalloc, struct %lu, count %u total %lu\n",
4624 sizeof(struct skd_request_context),
4625 skdev->num_req_context,
4626 sizeof(struct skd_request_context) * skdev->num_req_context);
4628 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4629 * skdev->num_req_context, GFP_KERNEL);
4630 if (skdev->skreq_table == NULL) {
4635 VPRINTK(skdev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4636 skdev->sgs_per_request, sizeof(struct scatterlist),
4637 skdev->sgs_per_request * sizeof(struct scatterlist));
4639 for (i = 0; i < skdev->num_req_context; i++) {
4640 struct skd_request_context *skreq;
4642 skreq = &skdev->skreq_table[i];
4644 skreq->id = i + SKD_ID_RW_REQUEST;
4645 skreq->state = SKD_REQ_STATE_IDLE;
4647 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4648 skdev->sgs_per_request, GFP_KERNEL);
4649 if (skreq->sg == NULL) {
4653 sg_init_table(skreq->sg, skdev->sgs_per_request);
4655 skreq->sksg_list = skd_cons_sg_list(skdev,
4656 skdev->sgs_per_request,
4657 &skreq->sksg_dma_address);
4659 if (skreq->sksg_list == NULL) {
4664 skreq->next = &skreq[1];
4667 /* Free list is in order starting with the 0th entry. */
4668 skdev->skreq_table[i - 1].next = NULL;
4669 skdev->skreq_free_list = skdev->skreq_table;
4675 static int skd_cons_skspcl(struct skd_device *skdev)
4680 VPRINTK(skdev, "skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4681 sizeof(struct skd_special_context),
4683 sizeof(struct skd_special_context) * skdev->n_special);
4685 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4686 * skdev->n_special, GFP_KERNEL);
4687 if (skdev->skspcl_table == NULL) {
4692 for (i = 0; i < skdev->n_special; i++) {
4693 struct skd_special_context *skspcl;
4695 skspcl = &skdev->skspcl_table[i];
4697 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4698 skspcl->req.state = SKD_REQ_STATE_IDLE;
4700 skspcl->req.next = &skspcl[1].req;
4702 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4704 skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4705 &skspcl->mb_dma_address);
4706 if (skspcl->msg_buf == NULL) {
4711 memset(skspcl->msg_buf, 0, nbytes);
4713 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4714 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4715 if (skspcl->req.sg == NULL) {
4720 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4721 SKD_N_SG_PER_SPECIAL,
4724 if (skspcl->req.sksg_list == NULL) {
4730 /* Free list is in order starting with the 0th entry. */
4731 skdev->skspcl_table[i - 1].req.next = NULL;
4732 skdev->skspcl_free_list = skdev->skspcl_table;
4740 static int skd_cons_sksb(struct skd_device *skdev)
4743 struct skd_special_context *skspcl;
4746 skspcl = &skdev->internal_skspcl;
4748 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4749 skspcl->req.state = SKD_REQ_STATE_IDLE;
4751 nbytes = SKD_N_INTERNAL_BYTES;
4753 skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4754 &skspcl->db_dma_address);
4755 if (skspcl->data_buf == NULL) {
4760 memset(skspcl->data_buf, 0, nbytes);
4762 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4763 skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4764 &skspcl->mb_dma_address);
4765 if (skspcl->msg_buf == NULL) {
4770 memset(skspcl->msg_buf, 0, nbytes);
4772 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4773 &skspcl->req.sksg_dma_address);
4774 if (skspcl->req.sksg_list == NULL) {
4779 if (!skd_format_internal_skspcl(skdev)) {
4788 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4790 dma_addr_t *ret_dma_addr)
4792 struct fit_sg_descriptor *sg_list;
4795 nbytes = sizeof(*sg_list) * n_sg;
4797 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4799 if (sg_list != NULL) {
4800 uint64_t dma_address = *ret_dma_addr;
4803 memset(sg_list, 0, nbytes);
4805 for (i = 0; i < n_sg - 1; i++) {
4807 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4809 sg_list[i].next_desc_ptr = dma_address + ndp_off;
4811 sg_list[i].next_desc_ptr = 0LL;
4817 static int skd_cons_disk(struct skd_device *skdev)
4820 struct gendisk *disk;
4821 struct request_queue *q;
4822 unsigned long flags;
4824 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4831 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4833 disk->major = skdev->major;
4834 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4835 disk->fops = &skd_blockdev_ops;
4836 disk->private_data = skdev;
4839 q = blk_init_queue(skd_request_fn, &skdev->lock);
4841 q = blk_alloc_queue(GFP_KERNEL);
4842 q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE;
4852 q->queuedata = skdev;
4855 q->queue_lock = &skdev->lock;
4856 blk_queue_make_request(q, skd_make_request);
4859 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4860 blk_queue_max_segments(q, skdev->sgs_per_request);
4861 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4863 /* set sysfs ptimal_io_size to 8K */
4864 blk_queue_io_opt(q, 8192);
4866 /* DISCARD Flag initialization. */
4867 q->limits.discard_granularity = 8192;
4868 q->limits.discard_alignment = 0;
4869 q->limits.max_discard_sectors = UINT_MAX >> 9;
4870 q->limits.discard_zeroes_data = 1;
4871 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4872 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4874 spin_lock_irqsave(&skdev->lock, flags);
4875 VPRINTK(skdev, "stopping %s queue\n", skdev->name);
4876 skd_stop_queue(skdev);
4877 spin_unlock_irqrestore(&skdev->lock, flags);
4884 *****************************************************************************
4886 *****************************************************************************
4889 static void skd_free_skcomp(struct skd_device *skdev);
4890 static void skd_free_skmsg(struct skd_device *skdev);
4891 static void skd_free_skreq(struct skd_device *skdev);
4892 static void skd_free_skspcl(struct skd_device *skdev);
4893 static void skd_free_sksb(struct skd_device *skdev);
4894 static void skd_free_sg_list(struct skd_device *skdev,
4895 struct fit_sg_descriptor *sg_list,
4896 u32 n_sg, dma_addr_t dma_addr);
4897 static void skd_free_disk(struct skd_device *skdev);
4899 static void skd_destruct(struct skd_device *skdev)
4905 VPRINTK(skdev, "disk\n");
4906 skd_free_disk(skdev);
4908 VPRINTK(skdev, "sksb\n");
4909 skd_free_sksb(skdev);
4911 VPRINTK(skdev, "skspcl\n");
4912 skd_free_skspcl(skdev);
4914 VPRINTK(skdev, "skreq\n");
4915 skd_free_skreq(skdev);
4917 VPRINTK(skdev, "skmsg\n");
4918 skd_free_skmsg(skdev);
4920 VPRINTK(skdev, "skcomp\n");
4921 skd_free_skcomp(skdev);
4923 VPRINTK(skdev, "skdev\n");
4927 static void skd_free_skcomp(struct skd_device *skdev)
4929 if (skdev->skcomp_table != NULL) {
4932 nbytes = sizeof(skdev->skcomp_table[0]) *
4933 SKD_N_COMPLETION_ENTRY;
4934 pci_free_consistent(skdev->pdev, nbytes,
4935 skdev->skcomp_table, skdev->cq_dma_address);
4938 skdev->skcomp_table = NULL;
4939 skdev->cq_dma_address = 0;
4942 static void skd_free_skmsg(struct skd_device *skdev)
4946 if (skdev->skmsg_table == NULL)
4949 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4950 struct skd_fitmsg_context *skmsg;
4952 skmsg = &skdev->skmsg_table[i];
4954 if (skmsg->msg_buf != NULL) {
4955 skmsg->msg_buf += skmsg->offset;
4956 skmsg->mb_dma_address += skmsg->offset;
4957 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4959 skmsg->mb_dma_address);
4961 skmsg->msg_buf = NULL;
4962 skmsg->mb_dma_address = 0;
4965 kfree(skdev->skmsg_table);
4966 skdev->skmsg_table = NULL;
4969 static void skd_free_skreq(struct skd_device *skdev)
4973 if (skdev->skreq_table == NULL)
4976 for (i = 0; i < skdev->num_req_context; i++) {
4977 struct skd_request_context *skreq;
4979 skreq = &skdev->skreq_table[i];
4981 skd_free_sg_list(skdev, skreq->sksg_list,
4982 skdev->sgs_per_request,
4983 skreq->sksg_dma_address);
4985 skreq->sksg_list = NULL;
4986 skreq->sksg_dma_address = 0;
4991 kfree(skdev->skreq_table);
4992 skdev->skreq_table = NULL;
4995 static void skd_free_skspcl(struct skd_device *skdev)
5000 if (skdev->skspcl_table == NULL)
5003 for (i = 0; i < skdev->n_special; i++) {
5004 struct skd_special_context *skspcl;
5006 skspcl = &skdev->skspcl_table[i];
5008 if (skspcl->msg_buf != NULL) {
5009 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
5010 pci_free_consistent(skdev->pdev, nbytes,
5012 skspcl->mb_dma_address);
5015 skspcl->msg_buf = NULL;
5016 skspcl->mb_dma_address = 0;
5018 skd_free_sg_list(skdev, skspcl->req.sksg_list,
5019 SKD_N_SG_PER_SPECIAL,
5020 skspcl->req.sksg_dma_address);
5022 skspcl->req.sksg_list = NULL;
5023 skspcl->req.sksg_dma_address = 0;
5025 kfree(skspcl->req.sg);
5028 kfree(skdev->skspcl_table);
5029 skdev->skspcl_table = NULL;
5032 static void skd_free_sksb(struct skd_device *skdev)
5034 struct skd_special_context *skspcl;
5037 skspcl = &skdev->internal_skspcl;
5039 if (skspcl->data_buf != NULL) {
5040 nbytes = SKD_N_INTERNAL_BYTES;
5042 pci_free_consistent(skdev->pdev, nbytes,
5043 skspcl->data_buf, skspcl->db_dma_address);
5046 skspcl->data_buf = NULL;
5047 skspcl->db_dma_address = 0;
5049 if (skspcl->msg_buf != NULL) {
5050 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
5051 pci_free_consistent(skdev->pdev, nbytes,
5052 skspcl->msg_buf, skspcl->mb_dma_address);
5055 skspcl->msg_buf = NULL;
5056 skspcl->mb_dma_address = 0;
5058 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
5059 skspcl->req.sksg_dma_address);
5061 skspcl->req.sksg_list = NULL;
5062 skspcl->req.sksg_dma_address = 0;
5065 static void skd_free_sg_list(struct skd_device *skdev,
5066 struct fit_sg_descriptor *sg_list,
5067 u32 n_sg, dma_addr_t dma_addr)
5069 if (sg_list != NULL) {
5072 nbytes = sizeof(*sg_list) * n_sg;
5074 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
5078 static void skd_free_disk(struct skd_device *skdev)
5080 struct gendisk *disk = skdev->disk;
5083 struct request_queue *q = disk->queue;
5085 if (disk->flags & GENHD_FL_UP)
5088 blk_cleanup_queue(q);
5097 *****************************************************************************
5098 * BLOCK DEVICE (BDEV) GLUE
5099 *****************************************************************************
5102 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5104 struct skd_device *skdev;
5107 skdev = bdev->bd_disk->private_data;
5109 DPRINTK(skdev, "%s: CMD[%s] getgeo device\n",
5110 bdev->bd_disk->disk_name, current->comm);
5112 if (skdev->read_cap_is_valid) {
5113 capacity = get_capacity(skdev->disk);
5116 geo->cylinders = (capacity) / (255 * 64);
5123 static int skd_bdev_attach(struct skd_device *skdev)
5125 DPRINTK(skdev, "add_disk\n");
5126 add_disk(skdev->disk);
5130 static const struct block_device_operations skd_blockdev_ops = {
5131 .owner = THIS_MODULE,
5132 .ioctl = skd_bdev_ioctl,
5133 .getgeo = skd_bdev_getgeo,
5138 *****************************************************************************
5140 *****************************************************************************
5143 static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
5144 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
5145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5146 { 0 } /* terminate list */
5149 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
5151 static char *skd_pci_info(struct skd_device *skdev, char *str)
5155 strcpy(str, "PCIe (");
5156 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
5161 uint16_t pcie_lstat, lspeed, lwidth;
5164 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
5165 lspeed = pcie_lstat & (0xF);
5166 lwidth = (pcie_lstat & 0x3F0) >> 4;
5169 strcat(str, "2.5GT/s ");
5170 else if (lspeed == 2)
5171 strcat(str, "5.0GT/s ");
5173 strcat(str, "<unknown> ");
5174 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
5180 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5185 struct skd_device *skdev;
5187 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
5188 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
5189 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
5190 pci_name(pdev), pdev->vendor, pdev->device);
5192 rc = pci_enable_device(pdev);
5195 rc = pci_request_regions(pdev, DRV_NAME);
5198 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5200 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5202 pr_err("(%s): consistent DMA mask error %d\n",
5203 pci_name(pdev), rc);
5206 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
5209 pr_err("(%s): DMA mask error %d\n",
5210 pci_name(pdev), rc);
5211 goto err_out_regions;
5215 skdev = skd_construct(pdev);
5217 goto err_out_regions;
5219 skd_pci_info(skdev, pci_str);
5220 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
5222 pci_set_master(pdev);
5223 rc = pci_enable_pcie_error_reporting(pdev);
5226 "(%s): bad enable of PCIe error reporting rc=%d\n",
5227 skd_name(skdev), rc);
5228 skdev->pcie_error_reporting_is_enabled = 0;
5230 skdev->pcie_error_reporting_is_enabled = 1;
5233 pci_set_drvdata(pdev, skdev);
5235 skdev->disk->driverfs_dev = &pdev->dev;
5237 for (i = 0; i < SKD_MAX_BARS; i++) {
5238 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5239 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5240 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5241 skdev->mem_size[i]);
5242 if (!skdev->mem_map[i]) {
5243 pr_err("(%s): Unable to map adapter memory!\n",
5246 goto err_out_iounmap;
5248 DPRINTK(skdev, "mem_map=%p, phyd=%016llx, size=%d\n",
5250 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5253 rc = skd_acquire_irq(skdev);
5255 pr_err("(%s): interrupt resource error %d\n",
5256 skd_name(skdev), rc);
5257 goto err_out_iounmap;
5260 rc = skd_start_timer(skdev);
5264 init_waitqueue_head(&skdev->waitq);
5266 skd_start_device(skdev);
5268 rc = wait_event_interruptible_timeout(skdev->waitq,
5269 (skdev->gendisk_on),
5270 (SKD_START_WAIT_SECONDS * HZ));
5271 if (skdev->gendisk_on > 0) {
5272 /* device came on-line after reset */
5273 skd_bdev_attach(skdev);
5276 /* we timed out, something is wrong with the device,
5277 don't add the disk structure */
5279 "(%s): error: waiting for s1120 timed out %d!\n",
5280 skd_name(skdev), rc);
5281 /* in case of no error; we timeout with ENXIO */
5288 #ifdef SKD_VMK_POLL_HANDLER
5289 if (skdev->irq_type == SKD_IRQ_MSIX) {
5290 /* MSIX completion handler is being used for coredump */
5291 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
5292 skdev->msix_entries[5].vector,
5295 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
5296 skdev->pdev->irq, skd_isr,
5299 #endif /* SKD_VMK_POLL_HANDLER */
5304 skd_stop_device(skdev);
5305 skd_release_irq(skdev);
5308 for (i = 0; i < SKD_MAX_BARS; i++)
5309 if (skdev->mem_map[i])
5310 iounmap(skdev->mem_map[i]);
5312 if (skdev->pcie_error_reporting_is_enabled)
5313 pci_disable_pcie_error_reporting(pdev);
5315 skd_destruct(skdev);
5318 pci_release_regions(pdev);
5321 pci_disable_device(pdev);
5322 pci_set_drvdata(pdev, NULL);
5326 static void skd_pci_remove(struct pci_dev *pdev)
5329 struct skd_device *skdev;
5331 skdev = pci_get_drvdata(pdev);
5333 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5336 skd_stop_device(skdev);
5337 skd_release_irq(skdev);
5339 for (i = 0; i < SKD_MAX_BARS; i++)
5340 if (skdev->mem_map[i])
5341 iounmap((u32 *)skdev->mem_map[i]);
5343 if (skdev->pcie_error_reporting_is_enabled)
5344 pci_disable_pcie_error_reporting(pdev);
5346 skd_destruct(skdev);
5348 pci_release_regions(pdev);
5349 pci_disable_device(pdev);
5350 pci_set_drvdata(pdev, NULL);
5355 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5358 struct skd_device *skdev;
5360 skdev = pci_get_drvdata(pdev);
5362 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5366 skd_stop_device(skdev);
5368 skd_release_irq(skdev);
5370 for (i = 0; i < SKD_MAX_BARS; i++)
5371 if (skdev->mem_map[i])
5372 iounmap((u32 *)skdev->mem_map[i]);
5374 if (skdev->pcie_error_reporting_is_enabled)
5375 pci_disable_pcie_error_reporting(pdev);
5377 pci_release_regions(pdev);
5378 pci_save_state(pdev);
5379 pci_disable_device(pdev);
5380 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5384 static int skd_pci_resume(struct pci_dev *pdev)
5388 struct skd_device *skdev;
5390 skdev = pci_get_drvdata(pdev);
5392 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5396 pci_set_power_state(pdev, PCI_D0);
5397 pci_enable_wake(pdev, PCI_D0, 0);
5398 pci_restore_state(pdev);
5400 rc = pci_enable_device(pdev);
5403 rc = pci_request_regions(pdev, DRV_NAME);
5406 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5408 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5410 pr_err("(%s): consistent DMA mask error %d\n",
5411 pci_name(pdev), rc);
5414 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5417 pr_err("(%s): DMA mask error %d\n",
5418 pci_name(pdev), rc);
5419 goto err_out_regions;
5423 pci_set_master(pdev);
5424 rc = pci_enable_pcie_error_reporting(pdev);
5426 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5428 skdev->pcie_error_reporting_is_enabled = 0;
5430 skdev->pcie_error_reporting_is_enabled = 1;
5432 for (i = 0; i < SKD_MAX_BARS; i++) {
5434 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5435 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5436 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5437 skdev->mem_size[i]);
5438 if (!skdev->mem_map[i]) {
5439 pr_err("(%s): Unable to map adapter memory!\n",
5442 goto err_out_iounmap;
5444 DPRINTK(skdev, "mem_map=%p, phyd=%016llx, size=%d\n",
5446 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5448 rc = skd_acquire_irq(skdev);
5451 pr_err("(%s): interrupt resource error %d\n",
5452 pci_name(pdev), rc);
5453 goto err_out_iounmap;
5456 rc = skd_start_timer(skdev);
5460 init_waitqueue_head(&skdev->waitq);
5462 skd_start_device(skdev);
5467 skd_stop_device(skdev);
5468 skd_release_irq(skdev);
5471 for (i = 0; i < SKD_MAX_BARS; i++)
5472 if (skdev->mem_map[i])
5473 iounmap(skdev->mem_map[i]);
5475 if (skdev->pcie_error_reporting_is_enabled)
5476 pci_disable_pcie_error_reporting(pdev);
5479 pci_release_regions(pdev);
5482 pci_disable_device(pdev);
5486 static void skd_pci_shutdown(struct pci_dev *pdev)
5488 struct skd_device *skdev;
5490 pr_err("skd_pci_shutdown called\n");
5492 skdev = pci_get_drvdata(pdev);
5494 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5498 pr_err("%s: calling stop\n", skd_name(skdev));
5499 skd_stop_device(skdev);
5502 static struct pci_driver skd_driver = {
5504 .id_table = skd_pci_tbl,
5505 .probe = skd_pci_probe,
5506 .remove = skd_pci_remove,
5507 .suspend = skd_pci_suspend,
5508 .resume = skd_pci_resume,
5509 .shutdown = skd_pci_shutdown,
5513 *****************************************************************************
5515 *****************************************************************************
5518 static const char *skd_name(struct skd_device *skdev)
5520 memset(skdev->id_str, 0, sizeof(skdev->id_str));
5522 if (skdev->inquiry_is_valid)
5523 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5524 skdev->name, skdev->inq_serial_num,
5525 pci_name(skdev->pdev));
5527 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5528 skdev->name, pci_name(skdev->pdev));
5530 return skdev->id_str;
5533 const char *skd_drive_state_to_str(int state)
5536 case FIT_SR_DRIVE_OFFLINE:
5538 case FIT_SR_DRIVE_INIT:
5540 case FIT_SR_DRIVE_ONLINE:
5542 case FIT_SR_DRIVE_BUSY:
5544 case FIT_SR_DRIVE_FAULT:
5546 case FIT_SR_DRIVE_DEGRADED:
5548 case FIT_SR_PCIE_LINK_DOWN:
5550 case FIT_SR_DRIVE_SOFT_RESET:
5551 return "SOFT_RESET";
5552 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5554 case FIT_SR_DRIVE_INIT_FAULT:
5555 return "INIT_FAULT";
5556 case FIT_SR_DRIVE_BUSY_SANITIZE:
5557 return "BUSY_SANITIZE";
5558 case FIT_SR_DRIVE_BUSY_ERASE:
5559 return "BUSY_ERASE";
5560 case FIT_SR_DRIVE_FW_BOOTING:
5561 return "FW_BOOTING";
5567 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5570 case SKD_DRVR_STATE_LOAD:
5572 case SKD_DRVR_STATE_IDLE:
5574 case SKD_DRVR_STATE_BUSY:
5576 case SKD_DRVR_STATE_STARTING:
5578 case SKD_DRVR_STATE_ONLINE:
5580 case SKD_DRVR_STATE_PAUSING:
5582 case SKD_DRVR_STATE_PAUSED:
5584 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5585 return "DRAINING_TIMEOUT";
5586 case SKD_DRVR_STATE_RESTARTING:
5587 return "RESTARTING";
5588 case SKD_DRVR_STATE_RESUMING:
5590 case SKD_DRVR_STATE_STOPPING:
5592 case SKD_DRVR_STATE_SYNCING:
5594 case SKD_DRVR_STATE_FAULT:
5596 case SKD_DRVR_STATE_DISAPPEARED:
5597 return "DISAPPEARED";
5598 case SKD_DRVR_STATE_BUSY_ERASE:
5599 return "BUSY_ERASE";
5600 case SKD_DRVR_STATE_BUSY_SANITIZE:
5601 return "BUSY_SANITIZE";
5602 case SKD_DRVR_STATE_BUSY_IMMINENT:
5603 return "BUSY_IMMINENT";
5604 case SKD_DRVR_STATE_WAIT_BOOT:
5612 const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5615 case SKD_MSG_STATE_IDLE:
5617 case SKD_MSG_STATE_BUSY:
5624 const char *skd_skreq_state_to_str(enum skd_req_state state)
5627 case SKD_REQ_STATE_IDLE:
5629 case SKD_REQ_STATE_SETUP:
5631 case SKD_REQ_STATE_BUSY:
5633 case SKD_REQ_STATE_COMPLETED:
5635 case SKD_REQ_STATE_TIMEOUT:
5637 case SKD_REQ_STATE_ABORTED:
5644 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5646 DPRINTK(skdev, "(%s) skdev=%p event='%s'\n", skdev->name, skdev, event);
5647 DPRINTK(skdev, " drive_state=%s(%d) driver_state=%s(%d)\n",
5648 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5649 skd_skdev_state_to_str(skdev->state), skdev->state);
5650 DPRINTK(skdev, " busy=%d limit=%d dev=%d lowat=%d\n",
5651 skdev->in_flight, skdev->cur_max_queue_depth,
5652 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5653 DPRINTK(skdev, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
5654 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5657 static void skd_log_skmsg(struct skd_device *skdev,
5658 struct skd_fitmsg_context *skmsg, const char *event)
5660 DPRINTK(skdev, "(%s) skmsg=%p event='%s'\n", skdev->name, skmsg, event);
5661 DPRINTK(skdev, " state=%s(%d) id=0x%04x length=%d\n",
5662 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5663 skmsg->id, skmsg->length);
5666 static void skd_log_skreq(struct skd_device *skdev,
5667 struct skd_request_context *skreq, const char *event)
5669 DPRINTK(skdev, "(%s) skreq=%p event='%s'\n", skdev->name, skreq, event);
5670 DPRINTK(skdev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5671 skd_skreq_state_to_str(skreq->state), skreq->state,
5672 skreq->id, skreq->fitmsg_id);
5673 DPRINTK(skdev, " timo=0x%x sg_dir=%d n_sg=%d\n",
5674 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5677 if (skreq->req != NULL) {
5678 struct request *req = skreq->req;
5679 u32 lba = (u32)blk_rq_pos(req);
5680 u32 count = blk_rq_sectors(req);
5683 " req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5684 req, lba, lba, count, count,
5685 (int)rq_data_dir(req));
5687 DPRINTK(skdev, " req=NULL\n");
5689 if (skreq->bio != NULL) {
5690 struct bio *bio = skreq->bio;
5691 u32 lba = (u32)bio->bi_sector;
5692 u32 count = bio_sectors(bio);
5695 " bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5696 bio, lba, lba, count, count,
5697 (int)bio_data_dir(bio));
5699 DPRINTK(skdev, " req=NULL\n");
5704 *****************************************************************************
5706 *****************************************************************************
5709 static int __init skd_init(void)
5713 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5715 switch (skd_isr_type) {
5716 case SKD_IRQ_LEGACY:
5721 pr_info("skd_isr_type %d invalid, re-set to %d\n",
5722 skd_isr_type, SKD_IRQ_DEFAULT);
5723 skd_isr_type = SKD_IRQ_DEFAULT;
5726 skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
5727 sizeof(struct skd_flush_cmd),
5730 if (!skd_flush_slab) {
5731 pr_err("failed to allocated flush slab.\n");
5735 if (skd_max_queue_depth < 1
5736 || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5738 "skd_max_queue_depth %d invalid, re-set to %d\n",
5739 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5740 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5743 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5745 "skd_max_req_per_msg %d invalid, re-set to %d\n",
5746 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5747 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5750 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5752 "skd_sg_per_request %d invalid, re-set to %d\n",
5753 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5754 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5757 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5758 pr_info("skd_dbg_level %d invalid, re-set to %d\n",
5763 if (skd_isr_comp_limit < 0) {
5764 pr_info("skd_isr_comp_limit %d invalid, set to %d\n",
5765 skd_isr_comp_limit, 0);
5766 skd_isr_comp_limit = 0;
5769 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5770 pr_info("skd_max_pass_thru %d invalid, re-set to %d\n",
5771 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5772 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5775 /* Obtain major device number. */
5776 rc = register_blkdev(0, DRV_NAME);
5782 return pci_register_driver(&skd_driver);
5786 static void __exit skd_exit(void)
5788 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5790 unregister_blkdev(skd_major, DRV_NAME);
5791 pci_unregister_driver(&skd_driver);
5793 kmem_cache_destroy(skd_flush_slab);
5797 skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
5799 struct skd_flush_cmd *item;
5801 item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
5803 pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
5808 list_add_tail(&item->flist, &skdev->flush_list);
5813 skd_flush_cmd_dequeue(struct skd_device *skdev)
5816 struct skd_flush_cmd *item;
5818 item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
5819 list_del_init(&item->flist);
5821 kmem_cache_free(skd_flush_slab, item);
5825 module_init(skd_init);
5826 module_exit(skd_exit);