2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
22 #include <scsi/scsicam.h>
29 struct access_method {
30 void (*submit_command)(struct ctlr_info *h,
31 struct CommandList *c);
32 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
33 bool (*intr_pending)(struct ctlr_info *h);
34 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
37 /* for SAS hosts and SAS expanders */
38 struct hpsa_sas_node {
39 struct device *parent_dev;
40 struct list_head port_list_head;
43 struct hpsa_sas_port {
44 struct list_head port_list_entry;
46 struct sas_port *port;
48 struct list_head phy_list_head;
49 struct hpsa_sas_node *parent_node;
50 struct sas_rphy *rphy;
54 struct list_head phy_list_entry;
56 struct hpsa_sas_port *parent_port;
60 struct hpsa_scsi_dev_t {
62 int bus, target, lun; /* as presented to the OS */
63 unsigned char scsi3addr[8]; /* as presented to the HW */
64 u8 physical_device : 1;
66 u8 removed : 1; /* device is marked for death */
67 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
68 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
71 unsigned char model[16]; /* bytes 16-31 of inquiry data */
72 unsigned char rev; /* byte 2 of inquiry data */
73 unsigned char raid_level; /* from inquiry page 0xC1 */
74 unsigned char volume_offline; /* discovered via TUR or VPD */
75 u16 queue_depth; /* max queue_depth for this device */
76 atomic_t reset_cmds_out; /* Count of commands to-be affected */
77 atomic_t ioaccel_cmds_out; /* Only used for physical devices
78 * counts commands sent to physical
79 * device via "ioaccel" path.
86 u16 phys_connector[8];
87 int offload_config; /* I/O accel RAID offload configured */
88 int offload_enabled; /* I/O accel RAID offload enabled */
89 int offload_to_be_enabled;
90 int hba_ioaccel_enabled;
91 int offload_to_mirror; /* Send next I/O accelerator RAID
92 * offload request to mirror drive
94 struct raid_map_data raid_map; /* I/O accelerator RAID map */
97 * Pointers from logical drive map indices to the phys drives that
98 * make those logical drives. Note, multiple logical drives may
99 * share physical drives. You can have for instance 5 physical
100 * drives with 3 logical drives each using those same 5 physical
101 * disks. We need these pointers for counting i/o's out to physical
102 * devices in order to honor physical device queue depth limits.
104 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
107 struct hpsa_sas_port *sas_port;
108 int external; /* 1-from external array 0-not <0-unknown */
111 struct reply_queue_buffer {
120 struct bmic_controller_parameters {
122 u8 enable_command_list_verification;
123 u8 backed_out_write_drives;
124 u16 stripes_for_parity;
125 u8 parity_distribution_mode_flags;
126 u16 max_driver_requests;
127 u16 elevator_trend_count;
129 u8 force_scan_complete;
130 u8 scsi_transfer_mode;
134 u8 host_sdb_asic_fix;
135 u8 pdpi_burst_from_host_disabled;
136 char software_name[64];
137 char hardware_name[32];
139 u8 snapshot_priority;
141 u8 post_prompt_timeout;
142 u8 automatic_drive_slamming;
145 u8 cache_nvram_flags;
146 u8 drive_config_flags;
148 u8 temp_warning_level;
149 u8 temp_shutdown_level;
150 u8 temp_condition_reset;
151 u8 max_coalesce_commands;
152 u32 max_coalesce_delay;
163 struct pci_dev *pdev;
168 int nr_cmds; /* Number of commands allowed on this controller */
169 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
170 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
171 struct CfgTable __iomem *cfgtable;
172 int interrupts_enabled;
174 atomic_t commands_outstanding;
175 # define PERF_MODE_INT 0
176 # define DOORBELL_INT 1
177 # define SIMPLE_MODE_INT 2
178 # define MEMQ_MODE_INT 3
179 unsigned int intr[MAX_REPLY_QUEUES];
180 unsigned int msix_vector;
181 unsigned int msi_vector;
182 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
183 struct access_method access;
185 /* queue and queue Info */
190 u8 max_cmd_sg_entries;
192 struct SGDescriptor **cmd_sg_list;
193 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
195 /* pointers to command and error info pool */
196 struct CommandList *cmd_pool;
197 dma_addr_t cmd_pool_dhandle;
198 struct io_accel1_cmd *ioaccel_cmd_pool;
199 dma_addr_t ioaccel_cmd_pool_dhandle;
200 struct io_accel2_cmd *ioaccel2_cmd_pool;
201 dma_addr_t ioaccel2_cmd_pool_dhandle;
202 struct ErrorInfo *errinfo_pool;
203 dma_addr_t errinfo_pool_dhandle;
204 unsigned long *cmd_pool_bits;
206 spinlock_t scan_lock;
207 wait_queue_head_t scan_wait_queue;
209 struct Scsi_Host *scsi_host;
210 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
211 int ndevices; /* number of used elements in .dev[] array. */
212 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
214 * Performant mode tables.
218 struct TransTable_struct __iomem *transtable;
219 unsigned long transMethod;
221 /* cap concurrent passthrus at some reasonable maximum */
222 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
223 atomic_t passthru_cmds_avail;
226 * Performant mode completion buffers
228 size_t reply_queue_size;
229 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
231 u32 *blockFetchTable;
232 u32 *ioaccel1_blockFetchTable;
233 u32 *ioaccel2_blockFetchTable;
234 u32 __iomem *ioaccel2_bft2_regs;
235 unsigned char *hba_inquiry_data;
240 u64 last_intr_timestamp;
242 u64 last_heartbeat_timestamp;
243 u32 heartbeat_sample_interval;
244 atomic_t firmware_flash_in_progress;
245 u32 __percpu *lockup_detected;
246 struct delayed_work monitor_ctlr_work;
247 struct delayed_work rescan_ctlr_work;
248 int remove_in_progress;
249 /* Address of h->q[x] is passed to intr handler to know which queue */
250 u8 q[MAX_REPLY_QUEUES];
251 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
252 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
253 #define HPSATMF_BITS_SUPPORTED (1 << 0)
254 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
255 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
256 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
257 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
258 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
259 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
260 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
261 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
262 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
263 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
264 #define HPSATMF_MASK_SUPPORTED (1 << 16)
265 #define HPSATMF_LOG_LUN_RESET (1 << 17)
266 #define HPSATMF_LOG_NEX_RESET (1 << 18)
267 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
268 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
269 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
270 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
271 #define HPSATMF_LOG_QRY_TASK (1 << 23)
272 #define HPSATMF_LOG_QRY_TSET (1 << 24)
273 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
275 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
276 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
277 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
278 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
279 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
280 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
281 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
283 #define RESCAN_REQUIRED_EVENT_BITS \
284 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
285 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
286 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
287 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
288 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
289 spinlock_t offline_device_lock;
290 struct list_head offline_device_list;
291 int acciopath_status;
293 int raid_offload_debug;
294 int discovery_polling;
295 struct ReportLUNdata *lastlogicals;
296 int needs_abort_tags_swizzled;
297 struct workqueue_struct *resubmit_wq;
298 struct workqueue_struct *rescan_ctlr_wq;
299 atomic_t abort_cmds_available;
300 wait_queue_head_t abort_cmd_wait_queue;
301 wait_queue_head_t event_sync_wait_queue;
302 struct mutex reset_mutex;
303 u8 reset_in_progress;
304 struct hpsa_sas_node *sas_host;
307 struct offline_device_entry {
308 unsigned char scsi3addr[8];
309 struct list_head offline_list;
312 #define HPSA_ABORT_MSG 0
313 #define HPSA_DEVICE_RESET_MSG 1
314 #define HPSA_RESET_TYPE_CONTROLLER 0x00
315 #define HPSA_RESET_TYPE_BUS 0x01
316 #define HPSA_RESET_TYPE_LUN 0x04
317 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
318 #define HPSA_MSG_SEND_RETRY_LIMIT 10
319 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
321 /* Maximum time in seconds driver will wait for command completions
322 * when polling before giving up.
324 #define HPSA_MAX_POLL_TIME_SECS (20)
326 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
327 * how many times to retry TEST UNIT READY on a device
328 * while waiting for it to become ready before giving up.
329 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
330 * between sending TURs while waiting for a device
333 #define HPSA_TUR_RETRY_LIMIT (20)
334 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
336 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
337 * to become ready, in seconds, before giving up on it.
338 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
339 * between polling the board to see if it is ready, in
340 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
341 * HPSA_BOARD_READY_ITERATIONS are derived from those.
343 #define HPSA_BOARD_READY_WAIT_SECS (120)
344 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
345 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
346 #define HPSA_BOARD_READY_POLL_INTERVAL \
347 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
348 #define HPSA_BOARD_READY_ITERATIONS \
349 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
350 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
351 #define HPSA_BOARD_NOT_READY_ITERATIONS \
352 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
353 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
354 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
355 #define HPSA_POST_RESET_NOOP_RETRIES (12)
357 /* Defining the diffent access_menthods */
359 * Memory mapped FIFO interface (SMART 53xx cards)
361 #define SA5_DOORBELL 0x20
362 #define SA5_REQUEST_PORT_OFFSET 0x40
363 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
364 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
365 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
366 #define SA5_REPLY_PORT_OFFSET 0x44
367 #define SA5_INTR_STATUS 0x30
368 #define SA5_SCRATCHPAD_OFFSET 0xB0
370 #define SA5_CTCFG_OFFSET 0xB4
371 #define SA5_CTMEM_OFFSET 0xB8
373 #define SA5_INTR_OFF 0x08
374 #define SA5B_INTR_OFF 0x04
375 #define SA5_INTR_PENDING 0x08
376 #define SA5B_INTR_PENDING 0x04
377 #define FIFO_EMPTY 0xffffffff
378 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
380 #define HPSA_ERROR_BIT 0x02
382 /* Performant mode flags */
383 #define SA5_PERF_INTR_PENDING 0x04
384 #define SA5_PERF_INTR_OFF 0x05
385 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
386 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
387 #define SA5_OUTDB_CLEAR 0xA0
388 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
389 #define SA5_OUTDB_STATUS 0x9C
392 #define HPSA_INTR_ON 1
393 #define HPSA_INTR_OFF 0
396 * Inbound Post Queue offsets for IO Accelerator Mode 2
398 #define IOACCEL2_INBOUND_POSTQ_32 0x48
399 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
400 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
402 #define HPSA_PHYSICAL_DEVICE_BUS 0
403 #define HPSA_RAID_VOLUME_BUS 1
404 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
405 #define HPSA_HBA_BUS 0
406 #define HPSA_LEGACY_HBA_BUS 3
409 Send the command to the hardware
411 static void SA5_submit_command(struct ctlr_info *h,
412 struct CommandList *c)
414 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
415 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
418 static void SA5_submit_command_no_read(struct ctlr_info *h,
419 struct CommandList *c)
421 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
424 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
425 struct CommandList *c)
427 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
431 * This card is the opposite of the other cards.
432 * 0 turns interrupts on...
433 * 0x08 turns them off...
435 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
437 if (val) { /* Turn interrupts on */
438 h->interrupts_enabled = 1;
439 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
440 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
441 } else { /* Turn them off */
442 h->interrupts_enabled = 0;
444 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
445 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
449 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
451 if (val) { /* turn on interrupts */
452 h->interrupts_enabled = 1;
453 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
454 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
456 h->interrupts_enabled = 0;
457 writel(SA5_PERF_INTR_OFF,
458 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
459 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
463 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
465 struct reply_queue_buffer *rq = &h->reply_queue[q];
466 unsigned long register_value = FIFO_EMPTY;
468 /* msi auto clears the interrupt pending bit. */
469 if (unlikely(!(h->msi_vector || h->msix_vector))) {
470 /* flush the controller write of the reply queue by reading
471 * outbound doorbell status register.
473 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
474 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
475 /* Do a read in order to flush the write to the controller
478 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
481 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
482 register_value = rq->head[rq->current_entry];
484 atomic_dec(&h->commands_outstanding);
486 register_value = FIFO_EMPTY;
488 /* Check for wraparound */
489 if (rq->current_entry == h->max_commands) {
490 rq->current_entry = 0;
493 return register_value;
497 * returns value read from hardware.
498 * returns FIFO_EMPTY if there is nothing to read
500 static unsigned long SA5_completed(struct ctlr_info *h,
501 __attribute__((unused)) u8 q)
503 unsigned long register_value
504 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
506 if (register_value != FIFO_EMPTY)
507 atomic_dec(&h->commands_outstanding);
510 if (register_value != FIFO_EMPTY)
511 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
514 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
517 return register_value;
520 * Returns true if an interrupt is pending..
522 static bool SA5_intr_pending(struct ctlr_info *h)
524 unsigned long register_value =
525 readl(h->vaddr + SA5_INTR_STATUS);
526 return register_value & SA5_INTR_PENDING;
529 static bool SA5_performant_intr_pending(struct ctlr_info *h)
531 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
536 /* Read outbound doorbell to flush */
537 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
538 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
541 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
543 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
545 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
547 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
551 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
552 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
553 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
554 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
556 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
559 struct reply_queue_buffer *rq = &h->reply_queue[q];
561 BUG_ON(q >= h->nreply_queues);
563 register_value = rq->head[rq->current_entry];
564 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
565 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
566 if (++rq->current_entry == rq->size)
567 rq->current_entry = 0;
571 * Don't really need to write the new index after each command,
572 * but with current driver design this is easiest.
575 writel((q << 24) | rq->current_entry, h->vaddr +
576 IOACCEL_MODE1_CONSUMER_INDEX);
577 atomic_dec(&h->commands_outstanding);
579 return (unsigned long) register_value;
582 static struct access_method SA5_access = {
589 static struct access_method SA5_ioaccel_mode1_access = {
591 SA5_performant_intr_mask,
592 SA5_ioaccel_mode1_intr_pending,
593 SA5_ioaccel_mode1_completed,
596 static struct access_method SA5_ioaccel_mode2_access = {
597 SA5_submit_command_ioaccel2,
598 SA5_performant_intr_mask,
599 SA5_performant_intr_pending,
600 SA5_performant_completed,
603 static struct access_method SA5_performant_access = {
605 SA5_performant_intr_mask,
606 SA5_performant_intr_pending,
607 SA5_performant_completed,
610 static struct access_method SA5_performant_access_no_read = {
611 SA5_submit_command_no_read,
612 SA5_performant_intr_mask,
613 SA5_performant_intr_pending,
614 SA5_performant_completed,
620 struct access_method *access;