2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
63 #define HPSA_DRIVER_VERSION "3.4.16-0"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
85 static int hpsa_allow_any;
86 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_allow_any,
88 "Allow hpsa driver to access unknown HP Smart Array hardware");
89 static int hpsa_simple_mode;
90 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(hpsa_simple_mode,
92 "Use 'simple mode' rather than 'performant mode'");
94 /* define the PCI info for the cards we can control */
95 static const struct pci_device_id hpsa_pci_device_id[] = {
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
154 /* board_id = Subsystem Device ID & Vendor ID
155 * product = Marketing Name for the board
156 * access = Address of the struct of function pointers
158 static struct board_type products[] = {
159 {0x3241103C, "Smart Array P212", &SA5_access},
160 {0x3243103C, "Smart Array P410", &SA5_access},
161 {0x3245103C, "Smart Array P410i", &SA5_access},
162 {0x3247103C, "Smart Array P411", &SA5_access},
163 {0x3249103C, "Smart Array P812", &SA5_access},
164 {0x324A103C, "Smart Array P712m", &SA5_access},
165 {0x324B103C, "Smart Array P711m", &SA5_access},
166 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
167 {0x3350103C, "Smart Array P222", &SA5_access},
168 {0x3351103C, "Smart Array P420", &SA5_access},
169 {0x3352103C, "Smart Array P421", &SA5_access},
170 {0x3353103C, "Smart Array P822", &SA5_access},
171 {0x3354103C, "Smart Array P420i", &SA5_access},
172 {0x3355103C, "Smart Array P220i", &SA5_access},
173 {0x3356103C, "Smart Array P721m", &SA5_access},
174 {0x1921103C, "Smart Array P830i", &SA5_access},
175 {0x1922103C, "Smart Array P430", &SA5_access},
176 {0x1923103C, "Smart Array P431", &SA5_access},
177 {0x1924103C, "Smart Array P830", &SA5_access},
178 {0x1926103C, "Smart Array P731m", &SA5_access},
179 {0x1928103C, "Smart Array P230i", &SA5_access},
180 {0x1929103C, "Smart Array P530", &SA5_access},
181 {0x21BD103C, "Smart Array P244br", &SA5_access},
182 {0x21BE103C, "Smart Array P741m", &SA5_access},
183 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
184 {0x21C0103C, "Smart Array P440ar", &SA5_access},
185 {0x21C1103C, "Smart Array P840ar", &SA5_access},
186 {0x21C2103C, "Smart Array P440", &SA5_access},
187 {0x21C3103C, "Smart Array P441", &SA5_access},
188 {0x21C4103C, "Smart Array", &SA5_access},
189 {0x21C5103C, "Smart Array P841", &SA5_access},
190 {0x21C6103C, "Smart HBA H244br", &SA5_access},
191 {0x21C7103C, "Smart HBA H240", &SA5_access},
192 {0x21C8103C, "Smart HBA H241", &SA5_access},
193 {0x21C9103C, "Smart Array", &SA5_access},
194 {0x21CA103C, "Smart Array P246br", &SA5_access},
195 {0x21CB103C, "Smart Array P840", &SA5_access},
196 {0x21CC103C, "Smart Array", &SA5_access},
197 {0x21CD103C, "Smart Array", &SA5_access},
198 {0x21CE103C, "Smart HBA", &SA5_access},
199 {0x05809005, "SmartHBA-SA", &SA5_access},
200 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
201 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
202 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
203 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
204 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
205 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
206 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
207 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
208 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
209 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
210 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
213 static struct scsi_transport_template *hpsa_sas_transport_template;
214 static int hpsa_add_sas_host(struct ctlr_info *h);
215 static void hpsa_delete_sas_host(struct ctlr_info *h);
216 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
217 struct hpsa_scsi_dev_t *device);
218 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
219 static struct hpsa_scsi_dev_t
220 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
221 struct sas_rphy *rphy);
223 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
224 static const struct scsi_cmnd hpsa_cmd_busy;
225 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
226 static const struct scsi_cmnd hpsa_cmd_idle;
227 static int number_of_controllers;
229 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
230 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
231 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
234 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
238 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
239 static struct CommandList *cmd_alloc(struct ctlr_info *h);
240 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
241 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
242 struct scsi_cmnd *scmd);
243 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
244 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
246 static void hpsa_free_cmd_pool(struct ctlr_info *h);
247 #define VPD_PAGE (1 << 8)
248 #define HPSA_SIMPLE_ERROR_BITS 0x03
250 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
251 static void hpsa_scan_start(struct Scsi_Host *);
252 static int hpsa_scan_finished(struct Scsi_Host *sh,
253 unsigned long elapsed_time);
254 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
256 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
257 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
258 static int hpsa_slave_alloc(struct scsi_device *sdev);
259 static int hpsa_slave_configure(struct scsi_device *sdev);
260 static void hpsa_slave_destroy(struct scsi_device *sdev);
262 static void hpsa_update_scsi_devices(struct ctlr_info *h);
263 static int check_for_unit_attention(struct ctlr_info *h,
264 struct CommandList *c);
265 static void check_ioctl_unit_attention(struct ctlr_info *h,
266 struct CommandList *c);
267 /* performant mode helper functions */
268 static void calc_bucket_map(int *bucket, int num_buckets,
269 int nsgs, int min_blocks, u32 *bucket_map);
270 static void hpsa_free_performant_mode(struct ctlr_info *h);
271 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
272 static inline u32 next_command(struct ctlr_info *h, u8 q);
273 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
274 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
276 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
277 unsigned long *memory_bar);
278 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
279 static int wait_for_device_to_become_ready(struct ctlr_info *h,
280 unsigned char lunaddr[],
282 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
284 static inline void finish_cmd(struct CommandList *c);
285 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
286 #define BOARD_NOT_READY 0
287 #define BOARD_READY 1
288 static void hpsa_drain_accel_commands(struct ctlr_info *h);
289 static void hpsa_flush_cache(struct ctlr_info *h);
290 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
291 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
292 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
293 static void hpsa_command_resubmit_worker(struct work_struct *work);
294 static u32 lockup_detected(struct ctlr_info *h);
295 static int detect_controller_lockup(struct ctlr_info *h);
296 static void hpsa_disable_rld_caching(struct ctlr_info *h);
297 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
298 struct ReportExtendedLUNdata *buf, int bufsize);
299 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
300 unsigned char scsi3addr[], u8 page);
301 static int hpsa_luns_changed(struct ctlr_info *h);
302 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
303 struct hpsa_scsi_dev_t *dev,
304 unsigned char *scsi3addr);
306 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
308 unsigned long *priv = shost_priv(sdev->host);
309 return (struct ctlr_info *) *priv;
312 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
314 unsigned long *priv = shost_priv(sh);
315 return (struct ctlr_info *) *priv;
318 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
320 return c->scsi_cmd == SCSI_CMD_IDLE;
323 static inline bool hpsa_is_pending_event(struct CommandList *c)
325 return c->abort_pending || c->reset_pending;
328 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
329 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
330 u8 *sense_key, u8 *asc, u8 *ascq)
332 struct scsi_sense_hdr sshdr;
339 if (sense_data_len < 1)
342 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
344 *sense_key = sshdr.sense_key;
350 static int check_for_unit_attention(struct ctlr_info *h,
351 struct CommandList *c)
353 u8 sense_key, asc, ascq;
356 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
357 sense_len = sizeof(c->err_info->SenseInfo);
359 sense_len = c->err_info->SenseLen;
361 decode_sense_data(c->err_info->SenseInfo, sense_len,
362 &sense_key, &asc, &ascq);
363 if (sense_key != UNIT_ATTENTION || asc == 0xff)
368 dev_warn(&h->pdev->dev,
369 "%s: a state change detected, command retried\n",
373 dev_warn(&h->pdev->dev,
374 "%s: LUN failure detected\n", h->devname);
376 case REPORT_LUNS_CHANGED:
377 dev_warn(&h->pdev->dev,
378 "%s: report LUN data changed\n", h->devname);
380 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
381 * target (array) devices.
385 dev_warn(&h->pdev->dev,
386 "%s: a power on or device reset detected\n",
389 case UNIT_ATTENTION_CLEARED:
390 dev_warn(&h->pdev->dev,
391 "%s: unit attention cleared by another initiator\n",
395 dev_warn(&h->pdev->dev,
396 "%s: unknown unit attention detected\n",
403 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
405 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
406 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
407 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
409 dev_warn(&h->pdev->dev, HPSA "device busy");
413 static u32 lockup_detected(struct ctlr_info *h);
414 static ssize_t host_show_lockup_detected(struct device *dev,
415 struct device_attribute *attr, char *buf)
419 struct Scsi_Host *shost = class_to_shost(dev);
421 h = shost_to_hba(shost);
422 ld = lockup_detected(h);
424 return sprintf(buf, "ld=%d\n", ld);
427 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
428 struct device_attribute *attr,
429 const char *buf, size_t count)
433 struct Scsi_Host *shost = class_to_shost(dev);
436 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
438 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
439 strncpy(tmpbuf, buf, len);
441 if (sscanf(tmpbuf, "%d", &status) != 1)
443 h = shost_to_hba(shost);
444 h->acciopath_status = !!status;
445 dev_warn(&h->pdev->dev,
446 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
447 h->acciopath_status ? "enabled" : "disabled");
451 static ssize_t host_store_raid_offload_debug(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
455 int debug_level, len;
457 struct Scsi_Host *shost = class_to_shost(dev);
460 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
462 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
463 strncpy(tmpbuf, buf, len);
465 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
469 h = shost_to_hba(shost);
470 h->raid_offload_debug = debug_level;
471 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
472 h->raid_offload_debug);
476 static ssize_t host_store_rescan(struct device *dev,
477 struct device_attribute *attr,
478 const char *buf, size_t count)
481 struct Scsi_Host *shost = class_to_shost(dev);
482 h = shost_to_hba(shost);
483 hpsa_scan_start(h->scsi_host);
487 static ssize_t host_show_firmware_revision(struct device *dev,
488 struct device_attribute *attr, char *buf)
491 struct Scsi_Host *shost = class_to_shost(dev);
492 unsigned char *fwrev;
494 h = shost_to_hba(shost);
495 if (!h->hba_inquiry_data)
497 fwrev = &h->hba_inquiry_data[32];
498 return snprintf(buf, 20, "%c%c%c%c\n",
499 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
502 static ssize_t host_show_commands_outstanding(struct device *dev,
503 struct device_attribute *attr, char *buf)
505 struct Scsi_Host *shost = class_to_shost(dev);
506 struct ctlr_info *h = shost_to_hba(shost);
508 return snprintf(buf, 20, "%d\n",
509 atomic_read(&h->commands_outstanding));
512 static ssize_t host_show_transport_mode(struct device *dev,
513 struct device_attribute *attr, char *buf)
516 struct Scsi_Host *shost = class_to_shost(dev);
518 h = shost_to_hba(shost);
519 return snprintf(buf, 20, "%s\n",
520 h->transMethod & CFGTBL_Trans_Performant ?
521 "performant" : "simple");
524 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
525 struct device_attribute *attr, char *buf)
528 struct Scsi_Host *shost = class_to_shost(dev);
530 h = shost_to_hba(shost);
531 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
532 (h->acciopath_status == 1) ? "enabled" : "disabled");
535 /* List of controllers which cannot be hard reset on kexec with reset_devices */
536 static u32 unresettable_controller[] = {
537 0x324a103C, /* Smart Array P712m */
538 0x324b103C, /* Smart Array P711m */
539 0x3223103C, /* Smart Array P800 */
540 0x3234103C, /* Smart Array P400 */
541 0x3235103C, /* Smart Array P400i */
542 0x3211103C, /* Smart Array E200i */
543 0x3212103C, /* Smart Array E200 */
544 0x3213103C, /* Smart Array E200i */
545 0x3214103C, /* Smart Array E200i */
546 0x3215103C, /* Smart Array E200i */
547 0x3237103C, /* Smart Array E500 */
548 0x323D103C, /* Smart Array P700m */
549 0x40800E11, /* Smart Array 5i */
550 0x409C0E11, /* Smart Array 6400 */
551 0x409D0E11, /* Smart Array 6400 EM */
552 0x40700E11, /* Smart Array 5300 */
553 0x40820E11, /* Smart Array 532 */
554 0x40830E11, /* Smart Array 5312 */
555 0x409A0E11, /* Smart Array 641 */
556 0x409B0E11, /* Smart Array 642 */
557 0x40910E11, /* Smart Array 6i */
560 /* List of controllers which cannot even be soft reset */
561 static u32 soft_unresettable_controller[] = {
562 0x40800E11, /* Smart Array 5i */
563 0x40700E11, /* Smart Array 5300 */
564 0x40820E11, /* Smart Array 532 */
565 0x40830E11, /* Smart Array 5312 */
566 0x409A0E11, /* Smart Array 641 */
567 0x409B0E11, /* Smart Array 642 */
568 0x40910E11, /* Smart Array 6i */
569 /* Exclude 640x boards. These are two pci devices in one slot
570 * which share a battery backed cache module. One controls the
571 * cache, the other accesses the cache through the one that controls
572 * it. If we reset the one controlling the cache, the other will
573 * likely not be happy. Just forbid resetting this conjoined mess.
574 * The 640x isn't really supported by hpsa anyway.
576 0x409C0E11, /* Smart Array 6400 */
577 0x409D0E11, /* Smart Array 6400 EM */
580 static u32 needs_abort_tags_swizzled[] = {
581 0x323D103C, /* Smart Array P700m */
582 0x324a103C, /* Smart Array P712m */
583 0x324b103C, /* SmartArray P711m */
586 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
590 for (i = 0; i < nelems; i++)
591 if (a[i] == board_id)
596 static int ctlr_is_hard_resettable(u32 board_id)
598 return !board_id_in_array(unresettable_controller,
599 ARRAY_SIZE(unresettable_controller), board_id);
602 static int ctlr_is_soft_resettable(u32 board_id)
604 return !board_id_in_array(soft_unresettable_controller,
605 ARRAY_SIZE(soft_unresettable_controller), board_id);
608 static int ctlr_is_resettable(u32 board_id)
610 return ctlr_is_hard_resettable(board_id) ||
611 ctlr_is_soft_resettable(board_id);
614 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
616 return board_id_in_array(needs_abort_tags_swizzled,
617 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
620 static ssize_t host_show_resettable(struct device *dev,
621 struct device_attribute *attr, char *buf)
624 struct Scsi_Host *shost = class_to_shost(dev);
626 h = shost_to_hba(shost);
627 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
630 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
632 return (scsi3addr[3] & 0xC0) == 0x40;
635 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
636 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
638 #define HPSA_RAID_0 0
639 #define HPSA_RAID_4 1
640 #define HPSA_RAID_1 2 /* also used for RAID 10 */
641 #define HPSA_RAID_5 3 /* also used for RAID 50 */
642 #define HPSA_RAID_51 4
643 #define HPSA_RAID_6 5 /* also used for RAID 60 */
644 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
645 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
646 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
648 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
650 return !device->physical_device;
653 static ssize_t raid_level_show(struct device *dev,
654 struct device_attribute *attr, char *buf)
657 unsigned char rlevel;
659 struct scsi_device *sdev;
660 struct hpsa_scsi_dev_t *hdev;
663 sdev = to_scsi_device(dev);
664 h = sdev_to_hba(sdev);
665 spin_lock_irqsave(&h->lock, flags);
666 hdev = sdev->hostdata;
668 spin_unlock_irqrestore(&h->lock, flags);
672 /* Is this even a logical drive? */
673 if (!is_logical_device(hdev)) {
674 spin_unlock_irqrestore(&h->lock, flags);
675 l = snprintf(buf, PAGE_SIZE, "N/A\n");
679 rlevel = hdev->raid_level;
680 spin_unlock_irqrestore(&h->lock, flags);
681 if (rlevel > RAID_UNKNOWN)
682 rlevel = RAID_UNKNOWN;
683 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
687 static ssize_t lunid_show(struct device *dev,
688 struct device_attribute *attr, char *buf)
691 struct scsi_device *sdev;
692 struct hpsa_scsi_dev_t *hdev;
694 unsigned char lunid[8];
696 sdev = to_scsi_device(dev);
697 h = sdev_to_hba(sdev);
698 spin_lock_irqsave(&h->lock, flags);
699 hdev = sdev->hostdata;
701 spin_unlock_irqrestore(&h->lock, flags);
704 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
705 spin_unlock_irqrestore(&h->lock, flags);
706 return snprintf(buf, 20, "0x%8phN\n", lunid);
709 static ssize_t unique_id_show(struct device *dev,
710 struct device_attribute *attr, char *buf)
713 struct scsi_device *sdev;
714 struct hpsa_scsi_dev_t *hdev;
716 unsigned char sn[16];
718 sdev = to_scsi_device(dev);
719 h = sdev_to_hba(sdev);
720 spin_lock_irqsave(&h->lock, flags);
721 hdev = sdev->hostdata;
723 spin_unlock_irqrestore(&h->lock, flags);
726 memcpy(sn, hdev->device_id, sizeof(sn));
727 spin_unlock_irqrestore(&h->lock, flags);
728 return snprintf(buf, 16 * 2 + 2,
729 "%02X%02X%02X%02X%02X%02X%02X%02X"
730 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
731 sn[0], sn[1], sn[2], sn[3],
732 sn[4], sn[5], sn[6], sn[7],
733 sn[8], sn[9], sn[10], sn[11],
734 sn[12], sn[13], sn[14], sn[15]);
737 static ssize_t sas_address_show(struct device *dev,
738 struct device_attribute *attr, char *buf)
741 struct scsi_device *sdev;
742 struct hpsa_scsi_dev_t *hdev;
746 sdev = to_scsi_device(dev);
747 h = sdev_to_hba(sdev);
748 spin_lock_irqsave(&h->lock, flags);
749 hdev = sdev->hostdata;
750 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
751 spin_unlock_irqrestore(&h->lock, flags);
754 sas_address = hdev->sas_address;
755 spin_unlock_irqrestore(&h->lock, flags);
757 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
760 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
761 struct device_attribute *attr, char *buf)
764 struct scsi_device *sdev;
765 struct hpsa_scsi_dev_t *hdev;
769 sdev = to_scsi_device(dev);
770 h = sdev_to_hba(sdev);
771 spin_lock_irqsave(&h->lock, flags);
772 hdev = sdev->hostdata;
774 spin_unlock_irqrestore(&h->lock, flags);
777 offload_enabled = hdev->offload_enabled;
778 spin_unlock_irqrestore(&h->lock, flags);
779 return snprintf(buf, 20, "%d\n", offload_enabled);
783 static ssize_t path_info_show(struct device *dev,
784 struct device_attribute *attr, char *buf)
787 struct scsi_device *sdev;
788 struct hpsa_scsi_dev_t *hdev;
794 u8 path_map_index = 0;
796 unsigned char phys_connector[2];
798 sdev = to_scsi_device(dev);
799 h = sdev_to_hba(sdev);
800 spin_lock_irqsave(&h->devlock, flags);
801 hdev = sdev->hostdata;
803 spin_unlock_irqrestore(&h->devlock, flags);
808 for (i = 0; i < MAX_PATHS; i++) {
809 path_map_index = 1<<i;
810 if (i == hdev->active_path_index)
812 else if (hdev->path_map & path_map_index)
817 output_len += scnprintf(buf + output_len,
818 PAGE_SIZE - output_len,
819 "[%d:%d:%d:%d] %20.20s ",
820 h->scsi_host->host_no,
821 hdev->bus, hdev->target, hdev->lun,
822 scsi_device_type(hdev->devtype));
824 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
825 output_len += scnprintf(buf + output_len,
826 PAGE_SIZE - output_len,
832 memcpy(&phys_connector, &hdev->phys_connector[i],
833 sizeof(phys_connector));
834 if (phys_connector[0] < '0')
835 phys_connector[0] = '0';
836 if (phys_connector[1] < '0')
837 phys_connector[1] = '0';
838 output_len += scnprintf(buf + output_len,
839 PAGE_SIZE - output_len,
842 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
843 hdev->expose_device) {
844 if (box == 0 || box == 0xFF) {
845 output_len += scnprintf(buf + output_len,
846 PAGE_SIZE - output_len,
850 output_len += scnprintf(buf + output_len,
851 PAGE_SIZE - output_len,
852 "BOX: %hhu BAY: %hhu %s\n",
855 } else if (box != 0 && box != 0xFF) {
856 output_len += scnprintf(buf + output_len,
857 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
860 output_len += scnprintf(buf + output_len,
861 PAGE_SIZE - output_len, "%s\n", active);
864 spin_unlock_irqrestore(&h->devlock, flags);
868 static ssize_t host_show_ctlr_num(struct device *dev,
869 struct device_attribute *attr, char *buf)
872 struct Scsi_Host *shost = class_to_shost(dev);
874 h = shost_to_hba(shost);
875 return snprintf(buf, 20, "%d\n", h->ctlr);
878 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
879 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
880 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
881 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
882 static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
883 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
884 host_show_hp_ssd_smart_path_enabled, NULL);
885 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
886 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
887 host_show_hp_ssd_smart_path_status,
888 host_store_hp_ssd_smart_path_status);
889 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
890 host_store_raid_offload_debug);
891 static DEVICE_ATTR(firmware_revision, S_IRUGO,
892 host_show_firmware_revision, NULL);
893 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
894 host_show_commands_outstanding, NULL);
895 static DEVICE_ATTR(transport_mode, S_IRUGO,
896 host_show_transport_mode, NULL);
897 static DEVICE_ATTR(resettable, S_IRUGO,
898 host_show_resettable, NULL);
899 static DEVICE_ATTR(lockup_detected, S_IRUGO,
900 host_show_lockup_detected, NULL);
901 static DEVICE_ATTR(ctlr_num, S_IRUGO,
902 host_show_ctlr_num, NULL);
904 static struct device_attribute *hpsa_sdev_attrs[] = {
905 &dev_attr_raid_level,
908 &dev_attr_hp_ssd_smart_path_enabled,
910 &dev_attr_sas_address,
914 static struct device_attribute *hpsa_shost_attrs[] = {
916 &dev_attr_firmware_revision,
917 &dev_attr_commands_outstanding,
918 &dev_attr_transport_mode,
919 &dev_attr_resettable,
920 &dev_attr_hp_ssd_smart_path_status,
921 &dev_attr_raid_offload_debug,
922 &dev_attr_lockup_detected,
927 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
928 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
930 static struct scsi_host_template hpsa_driver_template = {
931 .module = THIS_MODULE,
934 .queuecommand = hpsa_scsi_queue_command,
935 .scan_start = hpsa_scan_start,
936 .scan_finished = hpsa_scan_finished,
937 .change_queue_depth = hpsa_change_queue_depth,
939 .use_clustering = ENABLE_CLUSTERING,
940 .eh_abort_handler = hpsa_eh_abort_handler,
941 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
943 .slave_alloc = hpsa_slave_alloc,
944 .slave_configure = hpsa_slave_configure,
945 .slave_destroy = hpsa_slave_destroy,
947 .compat_ioctl = hpsa_compat_ioctl,
949 .sdev_attrs = hpsa_sdev_attrs,
950 .shost_attrs = hpsa_shost_attrs,
955 static inline u32 next_command(struct ctlr_info *h, u8 q)
958 struct reply_queue_buffer *rq = &h->reply_queue[q];
960 if (h->transMethod & CFGTBL_Trans_io_accel1)
961 return h->access.command_completed(h, q);
963 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
964 return h->access.command_completed(h, q);
966 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
967 a = rq->head[rq->current_entry];
969 atomic_dec(&h->commands_outstanding);
973 /* Check for wraparound */
974 if (rq->current_entry == h->max_commands) {
975 rq->current_entry = 0;
982 * There are some special bits in the bus address of the
983 * command that we have to set for the controller to know
984 * how to process the command:
986 * Normal performant mode:
987 * bit 0: 1 means performant mode, 0 means simple mode.
988 * bits 1-3 = block fetch table entry
989 * bits 4-6 = command type (== 0)
992 * bit 0 = "performant mode" bit.
993 * bits 1-3 = block fetch table entry
994 * bits 4-6 = command type (== 110)
995 * (command type is needed because ioaccel1 mode
996 * commands are submitted through the same register as normal
997 * mode commands, so this is how the controller knows whether
998 * the command is normal mode or ioaccel1 mode.)
1001 * bit 0 = "performant mode" bit.
1002 * bits 1-4 = block fetch table entry (note extra bit)
1003 * bits 4-6 = not needed, because ioaccel2 mode has
1004 * a separate special register for submitting commands.
1008 * set_performant_mode: Modify the tag for cciss performant
1009 * set bit 0 for pull model, bits 3-1 for block fetch
1012 #define DEFAULT_REPLY_QUEUE (-1)
1013 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1016 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1017 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1018 if (unlikely(!h->msix_vectors))
1020 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1021 c->Header.ReplyQueue =
1022 raw_smp_processor_id() % h->nreply_queues;
1024 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
1028 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1029 struct CommandList *c,
1032 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1035 * Tell the controller to post the reply to the queue for this
1036 * processor. This seems to give the best I/O throughput.
1038 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1039 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1041 cp->ReplyQueue = reply_queue % h->nreply_queues;
1043 * Set the bits in the address sent down to include:
1044 * - performant mode bit (bit 0)
1045 * - pull count (bits 1-3)
1046 * - command type (bits 4-6)
1048 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1049 IOACCEL1_BUSADDR_CMDTYPE;
1052 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1053 struct CommandList *c,
1056 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1057 &h->ioaccel2_cmd_pool[c->cmdindex];
1059 /* Tell the controller to post the reply to the queue for this
1060 * processor. This seems to give the best I/O throughput.
1062 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1063 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1065 cp->reply_queue = reply_queue % h->nreply_queues;
1066 /* Set the bits in the address sent down to include:
1067 * - performant mode bit not used in ioaccel mode 2
1068 * - pull count (bits 0-3)
1069 * - command type isn't needed for ioaccel2
1071 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1074 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1075 struct CommandList *c,
1078 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1081 * Tell the controller to post the reply to the queue for this
1082 * processor. This seems to give the best I/O throughput.
1084 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1085 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1087 cp->reply_queue = reply_queue % h->nreply_queues;
1089 * Set the bits in the address sent down to include:
1090 * - performant mode bit not used in ioaccel mode 2
1091 * - pull count (bits 0-3)
1092 * - command type isn't needed for ioaccel2
1094 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1097 static int is_firmware_flash_cmd(u8 *cdb)
1099 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1103 * During firmware flash, the heartbeat register may not update as frequently
1104 * as it should. So we dial down lockup detection during firmware flash. and
1105 * dial it back up when firmware flash completes.
1107 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1108 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1109 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1110 struct CommandList *c)
1112 if (!is_firmware_flash_cmd(c->Request.CDB))
1114 atomic_inc(&h->firmware_flash_in_progress);
1115 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1118 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1119 struct CommandList *c)
1121 if (is_firmware_flash_cmd(c->Request.CDB) &&
1122 atomic_dec_and_test(&h->firmware_flash_in_progress))
1123 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1126 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1127 struct CommandList *c, int reply_queue)
1129 dial_down_lockup_detection_during_fw_flash(h, c);
1130 atomic_inc(&h->commands_outstanding);
1131 switch (c->cmd_type) {
1133 set_ioaccel1_performant_mode(h, c, reply_queue);
1134 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1137 set_ioaccel2_performant_mode(h, c, reply_queue);
1138 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1141 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1142 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1145 set_performant_mode(h, c, reply_queue);
1146 h->access.submit_command(h, c);
1150 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1152 if (unlikely(hpsa_is_pending_event(c)))
1153 return finish_cmd(c);
1155 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1158 static inline int is_hba_lunid(unsigned char scsi3addr[])
1160 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1163 static inline int is_scsi_rev_5(struct ctlr_info *h)
1165 if (!h->hba_inquiry_data)
1167 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1172 static int hpsa_find_target_lun(struct ctlr_info *h,
1173 unsigned char scsi3addr[], int bus, int *target, int *lun)
1175 /* finds an unused bus, target, lun for a new physical device
1176 * assumes h->devlock is held
1179 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1181 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1183 for (i = 0; i < h->ndevices; i++) {
1184 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1185 __set_bit(h->dev[i]->target, lun_taken);
1188 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1189 if (i < HPSA_MAX_DEVICES) {
1198 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1199 struct hpsa_scsi_dev_t *dev, char *description)
1201 #define LABEL_SIZE 25
1202 char label[LABEL_SIZE];
1204 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1207 switch (dev->devtype) {
1209 snprintf(label, LABEL_SIZE, "controller");
1211 case TYPE_ENCLOSURE:
1212 snprintf(label, LABEL_SIZE, "enclosure");
1217 snprintf(label, LABEL_SIZE, "external");
1218 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1219 snprintf(label, LABEL_SIZE, "%s",
1220 raid_label[PHYSICAL_DRIVE]);
1222 snprintf(label, LABEL_SIZE, "RAID-%s",
1223 dev->raid_level > RAID_UNKNOWN ? "?" :
1224 raid_label[dev->raid_level]);
1227 snprintf(label, LABEL_SIZE, "rom");
1230 snprintf(label, LABEL_SIZE, "tape");
1232 case TYPE_MEDIUM_CHANGER:
1233 snprintf(label, LABEL_SIZE, "changer");
1236 snprintf(label, LABEL_SIZE, "UNKNOWN");
1240 dev_printk(level, &h->pdev->dev,
1241 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1242 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1244 scsi_device_type(dev->devtype),
1248 dev->offload_config ? '+' : '-',
1249 dev->offload_enabled ? '+' : '-',
1250 dev->expose_device);
1253 /* Add an entry into h->dev[] array. */
1254 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1255 struct hpsa_scsi_dev_t *device,
1256 struct hpsa_scsi_dev_t *added[], int *nadded)
1258 /* assumes h->devlock is held */
1259 int n = h->ndevices;
1261 unsigned char addr1[8], addr2[8];
1262 struct hpsa_scsi_dev_t *sd;
1264 if (n >= HPSA_MAX_DEVICES) {
1265 dev_err(&h->pdev->dev, "too many devices, some will be "
1270 /* physical devices do not have lun or target assigned until now. */
1271 if (device->lun != -1)
1272 /* Logical device, lun is already assigned. */
1275 /* If this device a non-zero lun of a multi-lun device
1276 * byte 4 of the 8-byte LUN addr will contain the logical
1277 * unit no, zero otherwise.
1279 if (device->scsi3addr[4] == 0) {
1280 /* This is not a non-zero lun of a multi-lun device */
1281 if (hpsa_find_target_lun(h, device->scsi3addr,
1282 device->bus, &device->target, &device->lun) != 0)
1287 /* This is a non-zero lun of a multi-lun device.
1288 * Search through our list and find the device which
1289 * has the same 8 byte LUN address, excepting byte 4 and 5.
1290 * Assign the same bus and target for this new LUN.
1291 * Use the logical unit number from the firmware.
1293 memcpy(addr1, device->scsi3addr, 8);
1296 for (i = 0; i < n; i++) {
1298 memcpy(addr2, sd->scsi3addr, 8);
1301 /* differ only in byte 4 and 5? */
1302 if (memcmp(addr1, addr2, 8) == 0) {
1303 device->bus = sd->bus;
1304 device->target = sd->target;
1305 device->lun = device->scsi3addr[4];
1309 if (device->lun == -1) {
1310 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1311 " suspect firmware bug or unsupported hardware "
1312 "configuration.\n");
1320 added[*nadded] = device;
1322 hpsa_show_dev_msg(KERN_INFO, h, device,
1323 device->expose_device ? "added" : "masked");
1324 device->offload_to_be_enabled = device->offload_enabled;
1325 device->offload_enabled = 0;
1329 /* Update an entry in h->dev[] array. */
1330 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1331 int entry, struct hpsa_scsi_dev_t *new_entry)
1333 int offload_enabled;
1334 /* assumes h->devlock is held */
1335 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1337 /* Raid level changed. */
1338 h->dev[entry]->raid_level = new_entry->raid_level;
1340 /* Raid offload parameters changed. Careful about the ordering. */
1341 if (new_entry->offload_config && new_entry->offload_enabled) {
1343 * if drive is newly offload_enabled, we want to copy the
1344 * raid map data first. If previously offload_enabled and
1345 * offload_config were set, raid map data had better be
1346 * the same as it was before. if raid map data is changed
1347 * then it had better be the case that
1348 * h->dev[entry]->offload_enabled is currently 0.
1350 h->dev[entry]->raid_map = new_entry->raid_map;
1351 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1353 if (new_entry->hba_ioaccel_enabled) {
1354 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1355 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1357 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1358 h->dev[entry]->offload_config = new_entry->offload_config;
1359 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1360 h->dev[entry]->queue_depth = new_entry->queue_depth;
1363 * We can turn off ioaccel offload now, but need to delay turning
1364 * it on until we can update h->dev[entry]->phys_disk[], but we
1365 * can't do that until all the devices are updated.
1367 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1368 if (!new_entry->offload_enabled)
1369 h->dev[entry]->offload_enabled = 0;
1371 offload_enabled = h->dev[entry]->offload_enabled;
1372 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1373 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1374 h->dev[entry]->offload_enabled = offload_enabled;
1377 /* Replace an entry from h->dev[] array. */
1378 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1379 int entry, struct hpsa_scsi_dev_t *new_entry,
1380 struct hpsa_scsi_dev_t *added[], int *nadded,
1381 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1383 /* assumes h->devlock is held */
1384 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1385 removed[*nremoved] = h->dev[entry];
1389 * New physical devices won't have target/lun assigned yet
1390 * so we need to preserve the values in the slot we are replacing.
1392 if (new_entry->target == -1) {
1393 new_entry->target = h->dev[entry]->target;
1394 new_entry->lun = h->dev[entry]->lun;
1397 h->dev[entry] = new_entry;
1398 added[*nadded] = new_entry;
1400 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1401 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1402 new_entry->offload_enabled = 0;
1405 /* Remove an entry from h->dev[] array. */
1406 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1407 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1409 /* assumes h->devlock is held */
1411 struct hpsa_scsi_dev_t *sd;
1413 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1416 removed[*nremoved] = h->dev[entry];
1419 for (i = entry; i < h->ndevices-1; i++)
1420 h->dev[i] = h->dev[i+1];
1422 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1425 #define SCSI3ADDR_EQ(a, b) ( \
1426 (a)[7] == (b)[7] && \
1427 (a)[6] == (b)[6] && \
1428 (a)[5] == (b)[5] && \
1429 (a)[4] == (b)[4] && \
1430 (a)[3] == (b)[3] && \
1431 (a)[2] == (b)[2] && \
1432 (a)[1] == (b)[1] && \
1435 static void fixup_botched_add(struct ctlr_info *h,
1436 struct hpsa_scsi_dev_t *added)
1438 /* called when scsi_add_device fails in order to re-adjust
1439 * h->dev[] to match the mid layer's view.
1441 unsigned long flags;
1444 spin_lock_irqsave(&h->lock, flags);
1445 for (i = 0; i < h->ndevices; i++) {
1446 if (h->dev[i] == added) {
1447 for (j = i; j < h->ndevices-1; j++)
1448 h->dev[j] = h->dev[j+1];
1453 spin_unlock_irqrestore(&h->lock, flags);
1457 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1458 struct hpsa_scsi_dev_t *dev2)
1460 /* we compare everything except lun and target as these
1461 * are not yet assigned. Compare parts likely
1464 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1465 sizeof(dev1->scsi3addr)) != 0)
1467 if (memcmp(dev1->device_id, dev2->device_id,
1468 sizeof(dev1->device_id)) != 0)
1470 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1472 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1474 if (dev1->devtype != dev2->devtype)
1476 if (dev1->bus != dev2->bus)
1481 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1482 struct hpsa_scsi_dev_t *dev2)
1484 /* Device attributes that can change, but don't mean
1485 * that the device is a different device, nor that the OS
1486 * needs to be told anything about the change.
1488 if (dev1->raid_level != dev2->raid_level)
1490 if (dev1->offload_config != dev2->offload_config)
1492 if (dev1->offload_enabled != dev2->offload_enabled)
1494 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1495 if (dev1->queue_depth != dev2->queue_depth)
1500 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1501 * and return needle location in *index. If scsi3addr matches, but not
1502 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1503 * location in *index.
1504 * In the case of a minor device attribute change, such as RAID level, just
1505 * return DEVICE_UPDATED, along with the updated device's location in index.
1506 * If needle not found, return DEVICE_NOT_FOUND.
1508 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1509 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1513 #define DEVICE_NOT_FOUND 0
1514 #define DEVICE_CHANGED 1
1515 #define DEVICE_SAME 2
1516 #define DEVICE_UPDATED 3
1518 return DEVICE_NOT_FOUND;
1520 for (i = 0; i < haystack_size; i++) {
1521 if (haystack[i] == NULL) /* previously removed. */
1523 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1525 if (device_is_the_same(needle, haystack[i])) {
1526 if (device_updated(needle, haystack[i]))
1527 return DEVICE_UPDATED;
1530 /* Keep offline devices offline */
1531 if (needle->volume_offline)
1532 return DEVICE_NOT_FOUND;
1533 return DEVICE_CHANGED;
1538 return DEVICE_NOT_FOUND;
1541 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1542 unsigned char scsi3addr[])
1544 struct offline_device_entry *device;
1545 unsigned long flags;
1547 /* Check to see if device is already on the list */
1548 spin_lock_irqsave(&h->offline_device_lock, flags);
1549 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1550 if (memcmp(device->scsi3addr, scsi3addr,
1551 sizeof(device->scsi3addr)) == 0) {
1552 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1556 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1558 /* Device is not on the list, add it. */
1559 device = kmalloc(sizeof(*device), GFP_KERNEL);
1561 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1564 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1565 spin_lock_irqsave(&h->offline_device_lock, flags);
1566 list_add_tail(&device->offline_list, &h->offline_device_list);
1567 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1570 /* Print a message explaining various offline volume states */
1571 static void hpsa_show_volume_status(struct ctlr_info *h,
1572 struct hpsa_scsi_dev_t *sd)
1574 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1575 dev_info(&h->pdev->dev,
1576 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1577 h->scsi_host->host_no,
1578 sd->bus, sd->target, sd->lun);
1579 switch (sd->volume_offline) {
1582 case HPSA_LV_UNDERGOING_ERASE:
1583 dev_info(&h->pdev->dev,
1584 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1585 h->scsi_host->host_no,
1586 sd->bus, sd->target, sd->lun);
1588 case HPSA_LV_NOT_AVAILABLE:
1589 dev_info(&h->pdev->dev,
1590 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1591 h->scsi_host->host_no,
1592 sd->bus, sd->target, sd->lun);
1594 case HPSA_LV_UNDERGOING_RPI:
1595 dev_info(&h->pdev->dev,
1596 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1597 h->scsi_host->host_no,
1598 sd->bus, sd->target, sd->lun);
1600 case HPSA_LV_PENDING_RPI:
1601 dev_info(&h->pdev->dev,
1602 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1603 h->scsi_host->host_no,
1604 sd->bus, sd->target, sd->lun);
1606 case HPSA_LV_ENCRYPTED_NO_KEY:
1607 dev_info(&h->pdev->dev,
1608 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1609 h->scsi_host->host_no,
1610 sd->bus, sd->target, sd->lun);
1612 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1613 dev_info(&h->pdev->dev,
1614 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1615 h->scsi_host->host_no,
1616 sd->bus, sd->target, sd->lun);
1618 case HPSA_LV_UNDERGOING_ENCRYPTION:
1619 dev_info(&h->pdev->dev,
1620 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1621 h->scsi_host->host_no,
1622 sd->bus, sd->target, sd->lun);
1624 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1625 dev_info(&h->pdev->dev,
1626 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1627 h->scsi_host->host_no,
1628 sd->bus, sd->target, sd->lun);
1630 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1631 dev_info(&h->pdev->dev,
1632 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1633 h->scsi_host->host_no,
1634 sd->bus, sd->target, sd->lun);
1636 case HPSA_LV_PENDING_ENCRYPTION:
1637 dev_info(&h->pdev->dev,
1638 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1639 h->scsi_host->host_no,
1640 sd->bus, sd->target, sd->lun);
1642 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1643 dev_info(&h->pdev->dev,
1644 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1645 h->scsi_host->host_no,
1646 sd->bus, sd->target, sd->lun);
1652 * Figure the list of physical drive pointers for a logical drive with
1653 * raid offload configured.
1655 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1656 struct hpsa_scsi_dev_t *dev[], int ndevices,
1657 struct hpsa_scsi_dev_t *logical_drive)
1659 struct raid_map_data *map = &logical_drive->raid_map;
1660 struct raid_map_disk_data *dd = &map->data[0];
1662 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1663 le16_to_cpu(map->metadata_disks_per_row);
1664 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1665 le16_to_cpu(map->layout_map_count) *
1666 total_disks_per_row;
1667 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1668 total_disks_per_row;
1671 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1672 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1674 logical_drive->nphysical_disks = nraid_map_entries;
1677 for (i = 0; i < nraid_map_entries; i++) {
1678 logical_drive->phys_disk[i] = NULL;
1679 if (!logical_drive->offload_config)
1681 for (j = 0; j < ndevices; j++) {
1684 if (dev[j]->devtype != TYPE_DISK &&
1685 dev[j]->devtype != TYPE_ZBC)
1687 if (is_logical_device(dev[j]))
1689 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1692 logical_drive->phys_disk[i] = dev[j];
1694 qdepth = min(h->nr_cmds, qdepth +
1695 logical_drive->phys_disk[i]->queue_depth);
1700 * This can happen if a physical drive is removed and
1701 * the logical drive is degraded. In that case, the RAID
1702 * map data will refer to a physical disk which isn't actually
1703 * present. And in that case offload_enabled should already
1704 * be 0, but we'll turn it off here just in case
1706 if (!logical_drive->phys_disk[i]) {
1707 logical_drive->offload_enabled = 0;
1708 logical_drive->offload_to_be_enabled = 0;
1709 logical_drive->queue_depth = 8;
1712 if (nraid_map_entries)
1714 * This is correct for reads, too high for full stripe writes,
1715 * way too high for partial stripe writes
1717 logical_drive->queue_depth = qdepth;
1719 logical_drive->queue_depth = h->nr_cmds;
1722 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1723 struct hpsa_scsi_dev_t *dev[], int ndevices)
1727 for (i = 0; i < ndevices; i++) {
1730 if (dev[i]->devtype != TYPE_DISK &&
1731 dev[i]->devtype != TYPE_ZBC)
1733 if (!is_logical_device(dev[i]))
1737 * If offload is currently enabled, the RAID map and
1738 * phys_disk[] assignment *better* not be changing
1739 * and since it isn't changing, we do not need to
1742 if (dev[i]->offload_enabled)
1745 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1749 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1756 if (is_logical_device(device)) /* RAID */
1757 rc = scsi_add_device(h->scsi_host, device->bus,
1758 device->target, device->lun);
1760 rc = hpsa_add_sas_device(h->sas_host, device);
1765 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1766 struct hpsa_scsi_dev_t *dev)
1771 for (i = 0; i < h->nr_cmds; i++) {
1772 struct CommandList *c = h->cmd_pool + i;
1773 int refcount = atomic_inc_return(&c->refcount);
1775 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1777 unsigned long flags;
1779 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1780 if (!hpsa_is_cmd_idle(c))
1782 spin_unlock_irqrestore(&h->lock, flags);
1791 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1792 struct hpsa_scsi_dev_t *device)
1798 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1803 dev_warn(&h->pdev->dev,
1804 "%s: removing device with %d outstanding commands!\n",
1810 static void hpsa_remove_device(struct ctlr_info *h,
1811 struct hpsa_scsi_dev_t *device)
1813 struct scsi_device *sdev = NULL;
1818 if (is_logical_device(device)) { /* RAID */
1819 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1820 device->target, device->lun);
1822 scsi_remove_device(sdev);
1823 scsi_device_put(sdev);
1826 * We don't expect to get here. Future commands
1827 * to this device will get a selection timeout as
1828 * if the device were gone.
1830 hpsa_show_dev_msg(KERN_WARNING, h, device,
1831 "didn't find device for removal.");
1835 device->removed = 1;
1836 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1838 hpsa_remove_sas_device(device);
1842 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1843 struct hpsa_scsi_dev_t *sd[], int nsds)
1845 /* sd contains scsi3 addresses and devtypes, and inquiry
1846 * data. This function takes what's in sd to be the current
1847 * reality and updates h->dev[] to reflect that reality.
1849 int i, entry, device_change, changes = 0;
1850 struct hpsa_scsi_dev_t *csd;
1851 unsigned long flags;
1852 struct hpsa_scsi_dev_t **added, **removed;
1853 int nadded, nremoved;
1856 * A reset can cause a device status to change
1857 * re-schedule the scan to see what happened.
1859 if (h->reset_in_progress) {
1860 h->drv_req_rescan = 1;
1864 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1865 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1867 if (!added || !removed) {
1868 dev_warn(&h->pdev->dev, "out of memory in "
1869 "adjust_hpsa_scsi_table\n");
1873 spin_lock_irqsave(&h->devlock, flags);
1875 /* find any devices in h->dev[] that are not in
1876 * sd[] and remove them from h->dev[], and for any
1877 * devices which have changed, remove the old device
1878 * info and add the new device info.
1879 * If minor device attributes change, just update
1880 * the existing device structure.
1885 while (i < h->ndevices) {
1887 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1888 if (device_change == DEVICE_NOT_FOUND) {
1890 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1891 continue; /* remove ^^^, hence i not incremented */
1892 } else if (device_change == DEVICE_CHANGED) {
1894 hpsa_scsi_replace_entry(h, i, sd[entry],
1895 added, &nadded, removed, &nremoved);
1896 /* Set it to NULL to prevent it from being freed
1897 * at the bottom of hpsa_update_scsi_devices()
1900 } else if (device_change == DEVICE_UPDATED) {
1901 hpsa_scsi_update_entry(h, i, sd[entry]);
1906 /* Now, make sure every device listed in sd[] is also
1907 * listed in h->dev[], adding them if they aren't found
1910 for (i = 0; i < nsds; i++) {
1911 if (!sd[i]) /* if already added above. */
1914 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1915 * as the SCSI mid-layer does not handle such devices well.
1916 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1917 * at 160Hz, and prevents the system from coming up.
1919 if (sd[i]->volume_offline) {
1920 hpsa_show_volume_status(h, sd[i]);
1921 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1925 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1926 h->ndevices, &entry);
1927 if (device_change == DEVICE_NOT_FOUND) {
1929 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1931 sd[i] = NULL; /* prevent from being freed later. */
1932 } else if (device_change == DEVICE_CHANGED) {
1933 /* should never happen... */
1935 dev_warn(&h->pdev->dev,
1936 "device unexpectedly changed.\n");
1937 /* but if it does happen, we just ignore that device */
1940 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1942 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1943 * any logical drives that need it enabled.
1945 for (i = 0; i < h->ndevices; i++) {
1946 if (h->dev[i] == NULL)
1948 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1951 spin_unlock_irqrestore(&h->devlock, flags);
1953 /* Monitor devices which are in one of several NOT READY states to be
1954 * brought online later. This must be done without holding h->devlock,
1955 * so don't touch h->dev[]
1957 for (i = 0; i < nsds; i++) {
1958 if (!sd[i]) /* if already added above. */
1960 if (sd[i]->volume_offline)
1961 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1964 /* Don't notify scsi mid layer of any changes the first time through
1965 * (or if there are no changes) scsi_scan_host will do it later the
1966 * first time through.
1971 /* Notify scsi mid layer of any removed devices */
1972 for (i = 0; i < nremoved; i++) {
1973 if (removed[i] == NULL)
1975 if (removed[i]->expose_device)
1976 hpsa_remove_device(h, removed[i]);
1981 /* Notify scsi mid layer of any added devices */
1982 for (i = 0; i < nadded; i++) {
1985 if (added[i] == NULL)
1987 if (!(added[i]->expose_device))
1989 rc = hpsa_add_device(h, added[i]);
1992 dev_warn(&h->pdev->dev,
1993 "addition failed %d, device not added.", rc);
1994 /* now we have to remove it from h->dev,
1995 * since it didn't get added to scsi mid layer
1997 fixup_botched_add(h, added[i]);
1998 h->drv_req_rescan = 1;
2007 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2008 * Assume's h->devlock is held.
2010 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2011 int bus, int target, int lun)
2014 struct hpsa_scsi_dev_t *sd;
2016 for (i = 0; i < h->ndevices; i++) {
2018 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2024 static int hpsa_slave_alloc(struct scsi_device *sdev)
2026 struct hpsa_scsi_dev_t *sd = NULL;
2027 unsigned long flags;
2028 struct ctlr_info *h;
2030 h = sdev_to_hba(sdev);
2031 spin_lock_irqsave(&h->devlock, flags);
2032 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2033 struct scsi_target *starget;
2034 struct sas_rphy *rphy;
2036 starget = scsi_target(sdev);
2037 rphy = target_to_rphy(starget);
2038 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2040 sd->target = sdev_id(sdev);
2041 sd->lun = sdev->lun;
2045 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2046 sdev_id(sdev), sdev->lun);
2048 if (sd && sd->expose_device) {
2049 atomic_set(&sd->ioaccel_cmds_out, 0);
2050 sdev->hostdata = sd;
2052 sdev->hostdata = NULL;
2053 spin_unlock_irqrestore(&h->devlock, flags);
2057 /* configure scsi device based on internal per-device structure */
2058 static int hpsa_slave_configure(struct scsi_device *sdev)
2060 struct hpsa_scsi_dev_t *sd;
2063 sd = sdev->hostdata;
2064 sdev->no_uld_attach = !sd || !sd->expose_device;
2067 queue_depth = sd->queue_depth != 0 ?
2068 sd->queue_depth : sdev->host->can_queue;
2070 queue_depth = sdev->host->can_queue;
2072 scsi_change_queue_depth(sdev, queue_depth);
2077 static void hpsa_slave_destroy(struct scsi_device *sdev)
2079 /* nothing to do. */
2082 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2086 if (!h->ioaccel2_cmd_sg_list)
2088 for (i = 0; i < h->nr_cmds; i++) {
2089 kfree(h->ioaccel2_cmd_sg_list[i]);
2090 h->ioaccel2_cmd_sg_list[i] = NULL;
2092 kfree(h->ioaccel2_cmd_sg_list);
2093 h->ioaccel2_cmd_sg_list = NULL;
2096 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2100 if (h->chainsize <= 0)
2103 h->ioaccel2_cmd_sg_list =
2104 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2106 if (!h->ioaccel2_cmd_sg_list)
2108 for (i = 0; i < h->nr_cmds; i++) {
2109 h->ioaccel2_cmd_sg_list[i] =
2110 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2111 h->maxsgentries, GFP_KERNEL);
2112 if (!h->ioaccel2_cmd_sg_list[i])
2118 hpsa_free_ioaccel2_sg_chain_blocks(h);
2122 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2126 if (!h->cmd_sg_list)
2128 for (i = 0; i < h->nr_cmds; i++) {
2129 kfree(h->cmd_sg_list[i]);
2130 h->cmd_sg_list[i] = NULL;
2132 kfree(h->cmd_sg_list);
2133 h->cmd_sg_list = NULL;
2136 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2140 if (h->chainsize <= 0)
2143 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2145 if (!h->cmd_sg_list) {
2146 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2149 for (i = 0; i < h->nr_cmds; i++) {
2150 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2151 h->chainsize, GFP_KERNEL);
2152 if (!h->cmd_sg_list[i]) {
2153 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2160 hpsa_free_sg_chain_blocks(h);
2164 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2165 struct io_accel2_cmd *cp, struct CommandList *c)
2167 struct ioaccel2_sg_element *chain_block;
2171 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2172 chain_size = le32_to_cpu(cp->sg[0].length);
2173 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2175 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2176 /* prevent subsequent unmapping */
2177 cp->sg->address = 0;
2180 cp->sg->address = cpu_to_le64(temp64);
2184 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2185 struct io_accel2_cmd *cp)
2187 struct ioaccel2_sg_element *chain_sg;
2192 temp64 = le64_to_cpu(chain_sg->address);
2193 chain_size = le32_to_cpu(cp->sg[0].length);
2194 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2197 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2198 struct CommandList *c)
2200 struct SGDescriptor *chain_sg, *chain_block;
2204 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2205 chain_block = h->cmd_sg_list[c->cmdindex];
2206 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2207 chain_len = sizeof(*chain_sg) *
2208 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2209 chain_sg->Len = cpu_to_le32(chain_len);
2210 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2212 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2213 /* prevent subsequent unmapping */
2214 chain_sg->Addr = cpu_to_le64(0);
2217 chain_sg->Addr = cpu_to_le64(temp64);
2221 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2222 struct CommandList *c)
2224 struct SGDescriptor *chain_sg;
2226 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2229 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2230 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2231 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2235 /* Decode the various types of errors on ioaccel2 path.
2236 * Return 1 for any error that should generate a RAID path retry.
2237 * Return 0 for errors that don't require a RAID path retry.
2239 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2240 struct CommandList *c,
2241 struct scsi_cmnd *cmd,
2242 struct io_accel2_cmd *c2,
2243 struct hpsa_scsi_dev_t *dev)
2247 u32 ioaccel2_resid = 0;
2249 switch (c2->error_data.serv_response) {
2250 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2251 switch (c2->error_data.status) {
2252 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2254 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2255 cmd->result |= SAM_STAT_CHECK_CONDITION;
2256 if (c2->error_data.data_present !=
2257 IOACCEL2_SENSE_DATA_PRESENT) {
2258 memset(cmd->sense_buffer, 0,
2259 SCSI_SENSE_BUFFERSIZE);
2262 /* copy the sense data */
2263 data_len = c2->error_data.sense_data_len;
2264 if (data_len > SCSI_SENSE_BUFFERSIZE)
2265 data_len = SCSI_SENSE_BUFFERSIZE;
2266 if (data_len > sizeof(c2->error_data.sense_data_buff))
2268 sizeof(c2->error_data.sense_data_buff);
2269 memcpy(cmd->sense_buffer,
2270 c2->error_data.sense_data_buff, data_len);
2273 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2276 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2279 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2282 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2290 case IOACCEL2_SERV_RESPONSE_FAILURE:
2291 switch (c2->error_data.status) {
2292 case IOACCEL2_STATUS_SR_IO_ERROR:
2293 case IOACCEL2_STATUS_SR_IO_ABORTED:
2294 case IOACCEL2_STATUS_SR_OVERRUN:
2297 case IOACCEL2_STATUS_SR_UNDERRUN:
2298 cmd->result = (DID_OK << 16); /* host byte */
2299 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2300 ioaccel2_resid = get_unaligned_le32(
2301 &c2->error_data.resid_cnt[0]);
2302 scsi_set_resid(cmd, ioaccel2_resid);
2304 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2305 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2306 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2308 * Did an HBA disk disappear? We will eventually
2309 * get a state change event from the controller but
2310 * in the meantime, we need to tell the OS that the
2311 * HBA disk is no longer there and stop I/O
2312 * from going down. This allows the potential re-insert
2313 * of the disk to get the same device node.
2315 if (dev->physical_device && dev->expose_device) {
2316 cmd->result = DID_NO_CONNECT << 16;
2318 h->drv_req_rescan = 1;
2319 dev_warn(&h->pdev->dev,
2320 "%s: device is gone!\n", __func__);
2323 * Retry by sending down the RAID path.
2324 * We will get an event from ctlr to
2325 * trigger rescan regardless.
2333 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2335 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2337 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2340 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2347 return retry; /* retry on raid path? */
2350 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2351 struct CommandList *c)
2353 bool do_wake = false;
2356 * Prevent the following race in the abort handler:
2358 * 1. LLD is requested to abort a SCSI command
2359 * 2. The SCSI command completes
2360 * 3. The struct CommandList associated with step 2 is made available
2361 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2362 * 5. Abort handler follows scsi_cmnd->host_scribble and
2363 * finds struct CommandList and tries to aborts it
2364 * Now we have aborted the wrong command.
2366 * Reset c->scsi_cmd here so that the abort or reset handler will know
2367 * this command has completed. Then, check to see if the handler is
2368 * waiting for this command, and, if so, wake it.
2370 c->scsi_cmd = SCSI_CMD_IDLE;
2371 mb(); /* Declare command idle before checking for pending events. */
2372 if (c->abort_pending) {
2374 c->abort_pending = false;
2376 if (c->reset_pending) {
2377 unsigned long flags;
2378 struct hpsa_scsi_dev_t *dev;
2381 * There appears to be a reset pending; lock the lock and
2382 * reconfirm. If so, then decrement the count of outstanding
2383 * commands and wake the reset command if this is the last one.
2385 spin_lock_irqsave(&h->lock, flags);
2386 dev = c->reset_pending; /* Re-fetch under the lock. */
2387 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2389 c->reset_pending = NULL;
2390 spin_unlock_irqrestore(&h->lock, flags);
2394 wake_up_all(&h->event_sync_wait_queue);
2397 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2398 struct CommandList *c)
2400 hpsa_cmd_resolve_events(h, c);
2401 cmd_tagged_free(h, c);
2404 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2405 struct CommandList *c, struct scsi_cmnd *cmd)
2407 hpsa_cmd_resolve_and_free(h, c);
2408 if (cmd && cmd->scsi_done)
2409 cmd->scsi_done(cmd);
2412 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2414 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2415 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2418 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2420 cmd->result = DID_ABORT << 16;
2423 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2424 struct scsi_cmnd *cmd)
2426 hpsa_set_scsi_cmd_aborted(cmd);
2427 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2428 c->Request.CDB, c->err_info->ScsiStatus);
2429 hpsa_cmd_resolve_and_free(h, c);
2432 static void process_ioaccel2_completion(struct ctlr_info *h,
2433 struct CommandList *c, struct scsi_cmnd *cmd,
2434 struct hpsa_scsi_dev_t *dev)
2436 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2438 /* check for good status */
2439 if (likely(c2->error_data.serv_response == 0 &&
2440 c2->error_data.status == 0))
2441 return hpsa_cmd_free_and_done(h, c, cmd);
2444 * Any RAID offload error results in retry which will use
2445 * the normal I/O path so the controller can handle whatever's
2448 if (is_logical_device(dev) &&
2449 c2->error_data.serv_response ==
2450 IOACCEL2_SERV_RESPONSE_FAILURE) {
2451 if (c2->error_data.status ==
2452 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2453 dev->offload_enabled = 0;
2454 dev->offload_to_be_enabled = 0;
2457 return hpsa_retry_cmd(h, c);
2460 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2461 return hpsa_retry_cmd(h, c);
2463 return hpsa_cmd_free_and_done(h, c, cmd);
2466 /* Returns 0 on success, < 0 otherwise. */
2467 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2468 struct CommandList *cp)
2470 u8 tmf_status = cp->err_info->ScsiStatus;
2472 switch (tmf_status) {
2473 case CISS_TMF_COMPLETE:
2475 * CISS_TMF_COMPLETE never happens, instead,
2476 * ei->CommandStatus == 0 for this case.
2478 case CISS_TMF_SUCCESS:
2480 case CISS_TMF_INVALID_FRAME:
2481 case CISS_TMF_NOT_SUPPORTED:
2482 case CISS_TMF_FAILED:
2483 case CISS_TMF_WRONG_LUN:
2484 case CISS_TMF_OVERLAPPED_TAG:
2487 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2494 static void complete_scsi_command(struct CommandList *cp)
2496 struct scsi_cmnd *cmd;
2497 struct ctlr_info *h;
2498 struct ErrorInfo *ei;
2499 struct hpsa_scsi_dev_t *dev;
2500 struct io_accel2_cmd *c2;
2503 u8 asc; /* additional sense code */
2504 u8 ascq; /* additional sense code qualifier */
2505 unsigned long sense_data_size;
2512 cmd->result = DID_NO_CONNECT << 16;
2513 return hpsa_cmd_free_and_done(h, cp, cmd);
2516 dev = cmd->device->hostdata;
2518 cmd->result = DID_NO_CONNECT << 16;
2519 return hpsa_cmd_free_and_done(h, cp, cmd);
2521 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2523 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2524 if ((cp->cmd_type == CMD_SCSI) &&
2525 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2526 hpsa_unmap_sg_chain_block(h, cp);
2528 if ((cp->cmd_type == CMD_IOACCEL2) &&
2529 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2530 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2532 cmd->result = (DID_OK << 16); /* host byte */
2533 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2535 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2536 if (dev->physical_device && dev->expose_device &&
2538 cmd->result = DID_NO_CONNECT << 16;
2539 return hpsa_cmd_free_and_done(h, cp, cmd);
2541 if (likely(cp->phys_disk != NULL))
2542 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2546 * We check for lockup status here as it may be set for
2547 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2548 * fail_all_oustanding_cmds()
2550 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2551 /* DID_NO_CONNECT will prevent a retry */
2552 cmd->result = DID_NO_CONNECT << 16;
2553 return hpsa_cmd_free_and_done(h, cp, cmd);
2556 if ((unlikely(hpsa_is_pending_event(cp)))) {
2557 if (cp->reset_pending)
2558 return hpsa_cmd_free_and_done(h, cp, cmd);
2559 if (cp->abort_pending)
2560 return hpsa_cmd_abort_and_free(h, cp, cmd);
2563 if (cp->cmd_type == CMD_IOACCEL2)
2564 return process_ioaccel2_completion(h, cp, cmd, dev);
2566 scsi_set_resid(cmd, ei->ResidualCnt);
2567 if (ei->CommandStatus == 0)
2568 return hpsa_cmd_free_and_done(h, cp, cmd);
2570 /* For I/O accelerator commands, copy over some fields to the normal
2571 * CISS header used below for error handling.
2573 if (cp->cmd_type == CMD_IOACCEL1) {
2574 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2575 cp->Header.SGList = scsi_sg_count(cmd);
2576 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2577 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2578 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2579 cp->Header.tag = c->tag;
2580 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2581 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2583 /* Any RAID offload error results in retry which will use
2584 * the normal I/O path so the controller can handle whatever's
2587 if (is_logical_device(dev)) {
2588 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2589 dev->offload_enabled = 0;
2590 return hpsa_retry_cmd(h, cp);
2594 /* an error has occurred */
2595 switch (ei->CommandStatus) {
2597 case CMD_TARGET_STATUS:
2598 cmd->result |= ei->ScsiStatus;
2599 /* copy the sense data */
2600 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2601 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2603 sense_data_size = sizeof(ei->SenseInfo);
2604 if (ei->SenseLen < sense_data_size)
2605 sense_data_size = ei->SenseLen;
2606 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2608 decode_sense_data(ei->SenseInfo, sense_data_size,
2609 &sense_key, &asc, &ascq);
2610 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2611 if (sense_key == ABORTED_COMMAND) {
2612 cmd->result |= DID_SOFT_ERROR << 16;
2617 /* Problem was not a check condition
2618 * Pass it up to the upper layers...
2620 if (ei->ScsiStatus) {
2621 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2622 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2623 "Returning result: 0x%x\n",
2625 sense_key, asc, ascq,
2627 } else { /* scsi status is zero??? How??? */
2628 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2629 "Returning no connection.\n", cp),
2631 /* Ordinarily, this case should never happen,
2632 * but there is a bug in some released firmware
2633 * revisions that allows it to happen if, for
2634 * example, a 4100 backplane loses power and
2635 * the tape drive is in it. We assume that
2636 * it's a fatal error of some kind because we
2637 * can't show that it wasn't. We will make it
2638 * look like selection timeout since that is
2639 * the most common reason for this to occur,
2640 * and it's severe enough.
2643 cmd->result = DID_NO_CONNECT << 16;
2647 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2649 case CMD_DATA_OVERRUN:
2650 dev_warn(&h->pdev->dev,
2651 "CDB %16phN data overrun\n", cp->Request.CDB);
2654 /* print_bytes(cp, sizeof(*cp), 1, 0);
2656 /* We get CMD_INVALID if you address a non-existent device
2657 * instead of a selection timeout (no response). You will
2658 * see this if you yank out a drive, then try to access it.
2659 * This is kind of a shame because it means that any other
2660 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2661 * missing target. */
2662 cmd->result = DID_NO_CONNECT << 16;
2665 case CMD_PROTOCOL_ERR:
2666 cmd->result = DID_ERROR << 16;
2667 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2670 case CMD_HARDWARE_ERR:
2671 cmd->result = DID_ERROR << 16;
2672 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2675 case CMD_CONNECTION_LOST:
2676 cmd->result = DID_ERROR << 16;
2677 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2681 /* Return now to avoid calling scsi_done(). */
2682 return hpsa_cmd_abort_and_free(h, cp, cmd);
2683 case CMD_ABORT_FAILED:
2684 cmd->result = DID_ERROR << 16;
2685 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2688 case CMD_UNSOLICITED_ABORT:
2689 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2690 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2694 cmd->result = DID_TIME_OUT << 16;
2695 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2698 case CMD_UNABORTABLE:
2699 cmd->result = DID_ERROR << 16;
2700 dev_warn(&h->pdev->dev, "Command unabortable\n");
2702 case CMD_TMF_STATUS:
2703 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2704 cmd->result = DID_ERROR << 16;
2706 case CMD_IOACCEL_DISABLED:
2707 /* This only handles the direct pass-through case since RAID
2708 * offload is handled above. Just attempt a retry.
2710 cmd->result = DID_SOFT_ERROR << 16;
2711 dev_warn(&h->pdev->dev,
2712 "cp %p had HP SSD Smart Path error\n", cp);
2715 cmd->result = DID_ERROR << 16;
2716 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2717 cp, ei->CommandStatus);
2720 return hpsa_cmd_free_and_done(h, cp, cmd);
2723 static void hpsa_pci_unmap(struct pci_dev *pdev,
2724 struct CommandList *c, int sg_used, int data_direction)
2728 for (i = 0; i < sg_used; i++)
2729 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2730 le32_to_cpu(c->SG[i].Len),
2734 static int hpsa_map_one(struct pci_dev *pdev,
2735 struct CommandList *cp,
2742 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2743 cp->Header.SGList = 0;
2744 cp->Header.SGTotal = cpu_to_le16(0);
2748 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2749 if (dma_mapping_error(&pdev->dev, addr64)) {
2750 /* Prevent subsequent unmap of something never mapped */
2751 cp->Header.SGList = 0;
2752 cp->Header.SGTotal = cpu_to_le16(0);
2755 cp->SG[0].Addr = cpu_to_le64(addr64);
2756 cp->SG[0].Len = cpu_to_le32(buflen);
2757 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2758 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2759 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2763 #define NO_TIMEOUT ((unsigned long) -1)
2764 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2765 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2766 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2768 DECLARE_COMPLETION_ONSTACK(wait);
2771 __enqueue_cmd_and_start_io(h, c, reply_queue);
2772 if (timeout_msecs == NO_TIMEOUT) {
2773 /* TODO: get rid of this no-timeout thing */
2774 wait_for_completion_io(&wait);
2777 if (!wait_for_completion_io_timeout(&wait,
2778 msecs_to_jiffies(timeout_msecs))) {
2779 dev_warn(&h->pdev->dev, "Command timed out.\n");
2785 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2786 int reply_queue, unsigned long timeout_msecs)
2788 if (unlikely(lockup_detected(h))) {
2789 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2792 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2795 static u32 lockup_detected(struct ctlr_info *h)
2798 u32 rc, *lockup_detected;
2801 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2802 rc = *lockup_detected;
2807 #define MAX_DRIVER_CMD_RETRIES 25
2808 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2809 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2811 int backoff_time = 10, retry_count = 0;
2815 memset(c->err_info, 0, sizeof(*c->err_info));
2816 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2821 if (retry_count > 3) {
2822 msleep(backoff_time);
2823 if (backoff_time < 1000)
2826 } while ((check_for_unit_attention(h, c) ||
2827 check_for_busy(h, c)) &&
2828 retry_count <= MAX_DRIVER_CMD_RETRIES);
2829 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2830 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2835 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2836 struct CommandList *c)
2838 const u8 *cdb = c->Request.CDB;
2839 const u8 *lun = c->Header.LUN.LunAddrBytes;
2841 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2845 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2846 struct CommandList *cp)
2848 const struct ErrorInfo *ei = cp->err_info;
2849 struct device *d = &cp->h->pdev->dev;
2850 u8 sense_key, asc, ascq;
2853 switch (ei->CommandStatus) {
2854 case CMD_TARGET_STATUS:
2855 if (ei->SenseLen > sizeof(ei->SenseInfo))
2856 sense_len = sizeof(ei->SenseInfo);
2858 sense_len = ei->SenseLen;
2859 decode_sense_data(ei->SenseInfo, sense_len,
2860 &sense_key, &asc, &ascq);
2861 hpsa_print_cmd(h, "SCSI status", cp);
2862 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2863 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2864 sense_key, asc, ascq);
2866 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2867 if (ei->ScsiStatus == 0)
2868 dev_warn(d, "SCSI status is abnormally zero. "
2869 "(probably indicates selection timeout "
2870 "reported incorrectly due to a known "
2871 "firmware bug, circa July, 2001.)\n");
2873 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2875 case CMD_DATA_OVERRUN:
2876 hpsa_print_cmd(h, "overrun condition", cp);
2879 /* controller unfortunately reports SCSI passthru's
2880 * to non-existent targets as invalid commands.
2882 hpsa_print_cmd(h, "invalid command", cp);
2883 dev_warn(d, "probably means device no longer present\n");
2886 case CMD_PROTOCOL_ERR:
2887 hpsa_print_cmd(h, "protocol error", cp);
2889 case CMD_HARDWARE_ERR:
2890 hpsa_print_cmd(h, "hardware error", cp);
2892 case CMD_CONNECTION_LOST:
2893 hpsa_print_cmd(h, "connection lost", cp);
2896 hpsa_print_cmd(h, "aborted", cp);
2898 case CMD_ABORT_FAILED:
2899 hpsa_print_cmd(h, "abort failed", cp);
2901 case CMD_UNSOLICITED_ABORT:
2902 hpsa_print_cmd(h, "unsolicited abort", cp);
2905 hpsa_print_cmd(h, "timed out", cp);
2907 case CMD_UNABORTABLE:
2908 hpsa_print_cmd(h, "unabortable", cp);
2910 case CMD_CTLR_LOCKUP:
2911 hpsa_print_cmd(h, "controller lockup detected", cp);
2914 hpsa_print_cmd(h, "unknown status", cp);
2915 dev_warn(d, "Unknown command status %x\n",
2920 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2921 u16 page, unsigned char *buf,
2922 unsigned char bufsize)
2925 struct CommandList *c;
2926 struct ErrorInfo *ei;
2930 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2931 page, scsi3addr, TYPE_CMD)) {
2935 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2936 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
2940 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2941 hpsa_scsi_interpret_error(h, c);
2949 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2950 u8 reset_type, int reply_queue)
2953 struct CommandList *c;
2954 struct ErrorInfo *ei;
2959 /* fill_cmd can't fail here, no data buffer to map. */
2960 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2961 scsi3addr, TYPE_MSG);
2962 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
2964 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2967 /* no unmap needed here because no data xfer. */
2970 if (ei->CommandStatus != 0) {
2971 hpsa_scsi_interpret_error(h, c);
2979 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2980 struct hpsa_scsi_dev_t *dev,
2981 unsigned char *scsi3addr)
2985 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2986 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2988 if (hpsa_is_cmd_idle(c))
2991 switch (c->cmd_type) {
2993 case CMD_IOCTL_PEND:
2994 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2995 sizeof(c->Header.LUN.LunAddrBytes));
3000 if (c->phys_disk == dev) {
3001 /* HBA mode match */
3004 /* Possible RAID mode -- check each phys dev. */
3005 /* FIXME: Do we need to take out a lock here? If
3006 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3008 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3009 /* FIXME: an alternate test might be
3011 * match = dev->phys_disk[i]->ioaccel_handle
3012 * == c2->scsi_nexus; */
3013 match = dev->phys_disk[i] == c->phys_disk;
3019 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3020 match = dev->phys_disk[i]->ioaccel_handle ==
3021 le32_to_cpu(ac->it_nexus);
3025 case 0: /* The command is in the middle of being initialized. */
3030 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3038 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3039 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3044 /* We can really only handle one reset at a time */
3045 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3046 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3050 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3052 for (i = 0; i < h->nr_cmds; i++) {
3053 struct CommandList *c = h->cmd_pool + i;
3054 int refcount = atomic_inc_return(&c->refcount);
3056 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3057 unsigned long flags;
3060 * Mark the target command as having a reset pending,
3061 * then lock a lock so that the command cannot complete
3062 * while we're considering it. If the command is not
3063 * idle then count it; otherwise revoke the event.
3065 c->reset_pending = dev;
3066 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3067 if (!hpsa_is_cmd_idle(c))
3068 atomic_inc(&dev->reset_cmds_out);
3070 c->reset_pending = NULL;
3071 spin_unlock_irqrestore(&h->lock, flags);
3077 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3079 wait_event(h->event_sync_wait_queue,
3080 atomic_read(&dev->reset_cmds_out) == 0 ||
3081 lockup_detected(h));
3083 if (unlikely(lockup_detected(h))) {
3084 dev_warn(&h->pdev->dev,
3085 "Controller lockup detected during reset wait\n");
3090 atomic_set(&dev->reset_cmds_out, 0);
3092 wait_for_device_to_become_ready(h, scsi3addr, 0);
3094 mutex_unlock(&h->reset_mutex);
3098 static void hpsa_get_raid_level(struct ctlr_info *h,
3099 unsigned char *scsi3addr, unsigned char *raid_level)
3104 *raid_level = RAID_UNKNOWN;
3105 buf = kzalloc(64, GFP_KERNEL);
3109 if (!hpsa_vpd_page_supported(h, scsi3addr,
3110 HPSA_VPD_LV_DEVICE_GEOMETRY))
3113 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3114 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3117 *raid_level = buf[8];
3118 if (*raid_level > RAID_UNKNOWN)
3119 *raid_level = RAID_UNKNOWN;
3125 #define HPSA_MAP_DEBUG
3126 #ifdef HPSA_MAP_DEBUG
3127 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3128 struct raid_map_data *map_buff)
3130 struct raid_map_disk_data *dd = &map_buff->data[0];
3132 u16 map_cnt, row_cnt, disks_per_row;
3137 /* Show details only if debugging has been activated. */
3138 if (h->raid_offload_debug < 2)
3141 dev_info(&h->pdev->dev, "structure_size = %u\n",
3142 le32_to_cpu(map_buff->structure_size));
3143 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3144 le32_to_cpu(map_buff->volume_blk_size));
3145 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3146 le64_to_cpu(map_buff->volume_blk_cnt));
3147 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3148 map_buff->phys_blk_shift);
3149 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3150 map_buff->parity_rotation_shift);
3151 dev_info(&h->pdev->dev, "strip_size = %u\n",
3152 le16_to_cpu(map_buff->strip_size));
3153 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3154 le64_to_cpu(map_buff->disk_starting_blk));
3155 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3156 le64_to_cpu(map_buff->disk_blk_cnt));
3157 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3158 le16_to_cpu(map_buff->data_disks_per_row));
3159 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3160 le16_to_cpu(map_buff->metadata_disks_per_row));
3161 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3162 le16_to_cpu(map_buff->row_cnt));
3163 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3164 le16_to_cpu(map_buff->layout_map_count));
3165 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3166 le16_to_cpu(map_buff->flags));
3167 dev_info(&h->pdev->dev, "encrypytion = %s\n",
3168 le16_to_cpu(map_buff->flags) &
3169 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3170 dev_info(&h->pdev->dev, "dekindex = %u\n",
3171 le16_to_cpu(map_buff->dekindex));
3172 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3173 for (map = 0; map < map_cnt; map++) {
3174 dev_info(&h->pdev->dev, "Map%u:\n", map);
3175 row_cnt = le16_to_cpu(map_buff->row_cnt);
3176 for (row = 0; row < row_cnt; row++) {
3177 dev_info(&h->pdev->dev, " Row%u:\n", row);
3179 le16_to_cpu(map_buff->data_disks_per_row);
3180 for (col = 0; col < disks_per_row; col++, dd++)
3181 dev_info(&h->pdev->dev,
3182 " D%02u: h=0x%04x xor=%u,%u\n",
3183 col, dd->ioaccel_handle,
3184 dd->xor_mult[0], dd->xor_mult[1]);
3186 le16_to_cpu(map_buff->metadata_disks_per_row);
3187 for (col = 0; col < disks_per_row; col++, dd++)
3188 dev_info(&h->pdev->dev,
3189 " M%02u: h=0x%04x xor=%u,%u\n",
3190 col, dd->ioaccel_handle,
3191 dd->xor_mult[0], dd->xor_mult[1]);
3196 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3197 __attribute__((unused)) int rc,
3198 __attribute__((unused)) struct raid_map_data *map_buff)
3203 static int hpsa_get_raid_map(struct ctlr_info *h,
3204 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3207 struct CommandList *c;
3208 struct ErrorInfo *ei;
3212 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3213 sizeof(this_device->raid_map), 0,
3214 scsi3addr, TYPE_CMD)) {
3215 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3219 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3220 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3224 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3225 hpsa_scsi_interpret_error(h, c);
3231 /* @todo in the future, dynamically allocate RAID map memory */
3232 if (le32_to_cpu(this_device->raid_map.structure_size) >
3233 sizeof(this_device->raid_map)) {
3234 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3237 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3244 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3245 unsigned char scsi3addr[], u16 bmic_device_index,
3246 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3249 struct CommandList *c;
3250 struct ErrorInfo *ei;
3254 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3255 0, RAID_CTLR_LUNID, TYPE_CMD);
3259 c->Request.CDB[2] = bmic_device_index & 0xff;
3260 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3262 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3263 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3267 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3268 hpsa_scsi_interpret_error(h, c);
3276 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3277 struct bmic_identify_controller *buf, size_t bufsize)
3280 struct CommandList *c;
3281 struct ErrorInfo *ei;
3285 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3286 0, RAID_CTLR_LUNID, TYPE_CMD);
3290 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3291 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3295 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3296 hpsa_scsi_interpret_error(h, c);
3304 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3305 unsigned char scsi3addr[], u16 bmic_device_index,
3306 struct bmic_identify_physical_device *buf, size_t bufsize)
3309 struct CommandList *c;
3310 struct ErrorInfo *ei;
3313 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3314 0, RAID_CTLR_LUNID, TYPE_CMD);
3318 c->Request.CDB[2] = bmic_device_index & 0xff;
3319 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3321 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3324 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3325 hpsa_scsi_interpret_error(h, c);
3335 * get enclosure information
3336 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3337 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3338 * Uses id_physical_device to determine the box_index.
3340 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3341 unsigned char *scsi3addr,
3342 struct ReportExtendedLUNdata *rlep, int rle_index,
3343 struct hpsa_scsi_dev_t *encl_dev)
3346 struct CommandList *c = NULL;
3347 struct ErrorInfo *ei = NULL;
3348 struct bmic_sense_storage_box_params *bssbp = NULL;
3349 struct bmic_identify_physical_device *id_phys = NULL;
3350 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3351 u16 bmic_device_index = 0;
3353 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3355 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3360 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3364 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3368 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3369 id_phys, sizeof(*id_phys));
3371 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3372 __func__, encl_dev->external, bmic_device_index);
3378 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3379 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3384 if (id_phys->phys_connector[1] == 'E')
3385 c->Request.CDB[5] = id_phys->box_index;
3387 c->Request.CDB[5] = 0;
3389 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3395 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3400 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3401 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3402 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3413 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3414 "Error, could not get enclosure information\n");
3417 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3418 unsigned char *scsi3addr)
3420 struct ReportExtendedLUNdata *physdev;
3425 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3429 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3430 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3434 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3436 for (i = 0; i < nphysicals; i++)
3437 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3438 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3447 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3448 struct hpsa_scsi_dev_t *dev)
3453 if (is_hba_lunid(scsi3addr)) {
3454 struct bmic_sense_subsystem_info *ssi;
3456 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3458 dev_warn(&h->pdev->dev,
3459 "%s: out of memory\n", __func__);
3463 rc = hpsa_bmic_sense_subsystem_information(h,
3464 scsi3addr, 0, ssi, sizeof(*ssi));
3466 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3467 h->sas_address = sa;
3472 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3474 dev->sas_address = sa;
3477 /* Get a device id from inquiry page 0x83 */
3478 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3479 unsigned char scsi3addr[], u8 page)
3484 unsigned char *buf, bufsize;
3486 buf = kzalloc(256, GFP_KERNEL);
3490 /* Get the size of the page list first */
3491 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3492 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3493 buf, HPSA_VPD_HEADER_SZ);
3495 goto exit_unsupported;
3497 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3498 bufsize = pages + HPSA_VPD_HEADER_SZ;
3502 /* Get the whole VPD page list */
3503 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3504 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3507 goto exit_unsupported;
3510 for (i = 1; i <= pages; i++)
3511 if (buf[3 + i] == page)
3512 goto exit_supported;
3521 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3522 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3528 this_device->offload_config = 0;
3529 this_device->offload_enabled = 0;
3530 this_device->offload_to_be_enabled = 0;
3532 buf = kzalloc(64, GFP_KERNEL);
3535 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3537 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3538 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3542 #define IOACCEL_STATUS_BYTE 4
3543 #define OFFLOAD_CONFIGURED_BIT 0x01
3544 #define OFFLOAD_ENABLED_BIT 0x02
3545 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3546 this_device->offload_config =
3547 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3548 if (this_device->offload_config) {
3549 this_device->offload_enabled =
3550 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3551 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3552 this_device->offload_enabled = 0;
3554 this_device->offload_to_be_enabled = this_device->offload_enabled;
3560 /* Get the device id from inquiry page 0x83 */
3561 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3562 unsigned char *device_id, int index, int buflen)
3567 /* Does controller have VPD for device id? */
3568 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3569 return 1; /* not supported */
3571 buf = kzalloc(64, GFP_KERNEL);
3575 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3576 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3580 memcpy(device_id, &buf[8], buflen);
3585 return rc; /*0 - got id, otherwise, didn't */
3588 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3589 void *buf, int bufsize,
3590 int extended_response)
3593 struct CommandList *c;
3594 unsigned char scsi3addr[8];
3595 struct ErrorInfo *ei;
3599 /* address the controller */
3600 memset(scsi3addr, 0, sizeof(scsi3addr));
3601 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3602 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3606 if (extended_response)
3607 c->Request.CDB[1] = extended_response;
3608 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3609 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3613 if (ei->CommandStatus != 0 &&
3614 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3615 hpsa_scsi_interpret_error(h, c);
3618 struct ReportLUNdata *rld = buf;
3620 if (rld->extended_response_flag != extended_response) {
3621 dev_err(&h->pdev->dev,
3622 "report luns requested format %u, got %u\n",
3624 rld->extended_response_flag);
3633 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3634 struct ReportExtendedLUNdata *buf, int bufsize)
3637 struct ReportLUNdata *lbuf;
3639 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3640 HPSA_REPORT_PHYS_EXTENDED);
3641 if (!rc || !hpsa_allow_any)
3644 /* REPORT PHYS EXTENDED is not supported */
3645 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3649 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3654 /* Copy ReportLUNdata header */
3655 memcpy(buf, lbuf, 8);
3656 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3657 for (i = 0; i < nphys; i++)
3658 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3664 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3665 struct ReportLUNdata *buf, int bufsize)
3667 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3670 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3671 int bus, int target, int lun)
3674 device->target = target;
3678 /* Use VPD inquiry to get details of volume status */
3679 static int hpsa_get_volume_status(struct ctlr_info *h,
3680 unsigned char scsi3addr[])
3687 buf = kzalloc(64, GFP_KERNEL);
3689 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3691 /* Does controller have VPD for logical volume status? */
3692 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3695 /* Get the size of the VPD return buffer */
3696 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3697 buf, HPSA_VPD_HEADER_SZ);
3702 /* Now get the whole VPD buffer */
3703 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3704 buf, size + HPSA_VPD_HEADER_SZ);
3707 status = buf[4]; /* status byte */
3713 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3716 /* Determine offline status of a volume.
3719 * 0xff (offline for unknown reasons)
3720 * # (integer code indicating one of several NOT READY states
3721 * describing why a volume is to be kept offline)
3723 static int hpsa_volume_offline(struct ctlr_info *h,
3724 unsigned char scsi3addr[])
3726 struct CommandList *c;
3727 unsigned char *sense;
3728 u8 sense_key, asc, ascq;
3733 #define ASC_LUN_NOT_READY 0x04
3734 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3735 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3739 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3740 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3746 sense = c->err_info->SenseInfo;
3747 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3748 sense_len = sizeof(c->err_info->SenseInfo);
3750 sense_len = c->err_info->SenseLen;
3751 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3752 cmd_status = c->err_info->CommandStatus;
3753 scsi_status = c->err_info->ScsiStatus;
3755 /* Is the volume 'not ready'? */
3756 if (cmd_status != CMD_TARGET_STATUS ||
3757 scsi_status != SAM_STAT_CHECK_CONDITION ||
3758 sense_key != NOT_READY ||
3759 asc != ASC_LUN_NOT_READY) {
3763 /* Determine the reason for not ready state */
3764 ldstat = hpsa_get_volume_status(h, scsi3addr);
3766 /* Keep volume offline in certain cases: */
3768 case HPSA_LV_UNDERGOING_ERASE:
3769 case HPSA_LV_NOT_AVAILABLE:
3770 case HPSA_LV_UNDERGOING_RPI:
3771 case HPSA_LV_PENDING_RPI:
3772 case HPSA_LV_ENCRYPTED_NO_KEY:
3773 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3774 case HPSA_LV_UNDERGOING_ENCRYPTION:
3775 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3776 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3778 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3779 /* If VPD status page isn't available,
3780 * use ASC/ASCQ to determine state
3782 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3783 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3793 * Find out if a logical device supports aborts by simply trying one.
3794 * Smart Array may claim not to support aborts on logical drives, but
3795 * if a MSA2000 * is connected, the drives on that will be presented
3796 * by the Smart Array as logical drives, and aborts may be sent to
3797 * those devices successfully. So the simplest way to find out is
3798 * to simply try an abort and see how the device responds.
3800 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3801 unsigned char *scsi3addr)
3803 struct CommandList *c;
3804 struct ErrorInfo *ei;
3807 u64 tag = (u64) -1; /* bogus tag */
3809 /* Assume that physical devices support aborts */
3810 if (!is_logical_dev_addr_mode(scsi3addr))
3815 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3816 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3818 /* no unmap needed here because no data xfer. */
3820 switch (ei->CommandStatus) {
3824 case CMD_UNABORTABLE:
3825 case CMD_ABORT_FAILED:
3828 case CMD_TMF_STATUS:
3829 rc = hpsa_evaluate_tmf_status(h, c);
3839 static int hpsa_update_device_info(struct ctlr_info *h,
3840 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3841 unsigned char *is_OBDR_device)
3844 #define OBDR_SIG_OFFSET 43
3845 #define OBDR_TAPE_SIG "$DR-10"
3846 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3847 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3849 unsigned char *inq_buff;
3850 unsigned char *obdr_sig;
3853 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3859 /* Do an inquiry to the device to see what it is. */
3860 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3861 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3862 /* Inquiry failed (msg printed already) */
3863 dev_err(&h->pdev->dev,
3864 "hpsa_update_device_info: inquiry failed\n");
3869 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3870 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3872 this_device->devtype = (inq_buff[0] & 0x1f);
3873 memcpy(this_device->scsi3addr, scsi3addr, 8);
3874 memcpy(this_device->vendor, &inq_buff[8],
3875 sizeof(this_device->vendor));
3876 memcpy(this_device->model, &inq_buff[16],
3877 sizeof(this_device->model));
3878 this_device->rev = inq_buff[2];
3879 memset(this_device->device_id, 0,
3880 sizeof(this_device->device_id));
3881 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3882 sizeof(this_device->device_id)))
3883 dev_err(&h->pdev->dev,
3884 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3886 h->scsi_host->host_no,
3887 this_device->target, this_device->lun,
3888 scsi_device_type(this_device->devtype),
3889 this_device->model);
3891 if ((this_device->devtype == TYPE_DISK ||
3892 this_device->devtype == TYPE_ZBC) &&
3893 is_logical_dev_addr_mode(scsi3addr)) {
3896 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3897 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3898 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3899 volume_offline = hpsa_volume_offline(h, scsi3addr);
3900 if (volume_offline < 0 || volume_offline > 0xff)
3901 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3902 this_device->volume_offline = volume_offline & 0xff;
3904 this_device->raid_level = RAID_UNKNOWN;
3905 this_device->offload_config = 0;
3906 this_device->offload_enabled = 0;
3907 this_device->offload_to_be_enabled = 0;
3908 this_device->hba_ioaccel_enabled = 0;
3909 this_device->volume_offline = 0;
3910 this_device->queue_depth = h->nr_cmds;
3913 if (is_OBDR_device) {
3914 /* See if this is a One-Button-Disaster-Recovery device
3915 * by looking for "$DR-10" at offset 43 in inquiry data.
3917 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3918 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3919 strncmp(obdr_sig, OBDR_TAPE_SIG,
3920 OBDR_SIG_LEN) == 0);
3930 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3931 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3933 unsigned long flags;
3936 * See if this device supports aborts. If we already know
3937 * the device, we already know if it supports aborts, otherwise
3938 * we have to find out if it supports aborts by trying one.
3940 spin_lock_irqsave(&h->devlock, flags);
3941 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3942 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3943 entry >= 0 && entry < h->ndevices) {
3944 dev->supports_aborts = h->dev[entry]->supports_aborts;
3945 spin_unlock_irqrestore(&h->devlock, flags);
3947 spin_unlock_irqrestore(&h->devlock, flags);
3948 dev->supports_aborts =
3949 hpsa_device_supports_aborts(h, scsi3addr);
3950 if (dev->supports_aborts < 0)
3951 dev->supports_aborts = 0;
3956 * Helper function to assign bus, target, lun mapping of devices.
3957 * Logical drive target and lun are assigned at this time, but
3958 * physical device lun and target assignment are deferred (assigned
3959 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3961 static void figure_bus_target_lun(struct ctlr_info *h,
3962 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3964 u32 lunid = get_unaligned_le32(lunaddrbytes);
3966 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3967 /* physical device, target and lun filled in later */
3968 if (is_hba_lunid(lunaddrbytes)) {
3969 int bus = HPSA_HBA_BUS;
3972 bus = HPSA_LEGACY_HBA_BUS;
3973 hpsa_set_bus_target_lun(device,
3974 bus, 0, lunid & 0x3fff);
3976 /* defer target, lun assignment for physical devices */
3977 hpsa_set_bus_target_lun(device,
3978 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3981 /* It's a logical device */
3982 if (device->external) {
3983 hpsa_set_bus_target_lun(device,
3984 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3988 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3994 * Get address of physical disk used for an ioaccel2 mode command:
3995 * 1. Extract ioaccel2 handle from the command.
3996 * 2. Find a matching ioaccel2 handle from list of physical disks.
3998 * 1 and set scsi3addr to address of matching physical
3999 * 0 if no matching physical disk was found.
4001 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
4002 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
4004 struct io_accel2_cmd *c2 =
4005 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
4006 unsigned long flags;
4009 spin_lock_irqsave(&h->devlock, flags);
4010 for (i = 0; i < h->ndevices; i++)
4011 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
4012 memcpy(scsi3addr, h->dev[i]->scsi3addr,
4013 sizeof(h->dev[i]->scsi3addr));
4014 spin_unlock_irqrestore(&h->devlock, flags);
4017 spin_unlock_irqrestore(&h->devlock, flags);
4021 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4022 int i, int nphysicals, int nlocal_logicals)
4024 /* In report logicals, local logicals are listed first,
4025 * then any externals.
4027 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4029 if (i == raid_ctlr_position)
4032 if (i < logicals_start)
4035 /* i is in logicals range, but still within local logicals */
4036 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4039 return 1; /* it's an external lun */
4043 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4044 * logdev. The number of luns in physdev and logdev are returned in
4045 * *nphysicals and *nlogicals, respectively.
4046 * Returns 0 on success, -1 otherwise.
4048 static int hpsa_gather_lun_info(struct ctlr_info *h,
4049 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4050 struct ReportLUNdata *logdev, u32 *nlogicals)
4052 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4053 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4056 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4057 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4058 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4059 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4060 *nphysicals = HPSA_MAX_PHYS_LUN;
4062 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4063 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4066 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4067 /* Reject Logicals in excess of our max capability. */
4068 if (*nlogicals > HPSA_MAX_LUN) {
4069 dev_warn(&h->pdev->dev,
4070 "maximum logical LUNs (%d) exceeded. "
4071 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4072 *nlogicals - HPSA_MAX_LUN);
4073 *nlogicals = HPSA_MAX_LUN;
4075 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4076 dev_warn(&h->pdev->dev,
4077 "maximum logical + physical LUNs (%d) exceeded. "
4078 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4079 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4080 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4085 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4086 int i, int nphysicals, int nlogicals,
4087 struct ReportExtendedLUNdata *physdev_list,
4088 struct ReportLUNdata *logdev_list)
4090 /* Helper function, figure out where the LUN ID info is coming from
4091 * given index i, lists of physical and logical devices, where in
4092 * the list the raid controller is supposed to appear (first or last)
4095 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4096 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4098 if (i == raid_ctlr_position)
4099 return RAID_CTLR_LUNID;
4101 if (i < logicals_start)
4102 return &physdev_list->LUN[i -
4103 (raid_ctlr_position == 0)].lunid[0];
4105 if (i < last_device)
4106 return &logdev_list->LUN[i - nphysicals -
4107 (raid_ctlr_position == 0)][0];
4112 /* get physical drive ioaccel handle and queue depth */
4113 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4114 struct hpsa_scsi_dev_t *dev,
4115 struct ReportExtendedLUNdata *rlep, int rle_index,
4116 struct bmic_identify_physical_device *id_phys)
4119 struct ext_report_lun_entry *rle;
4122 * external targets don't support BMIC
4124 if (dev->external) {
4125 dev->queue_depth = 7;
4129 rle = &rlep->LUN[rle_index];
4131 dev->ioaccel_handle = rle->ioaccel_handle;
4132 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4133 dev->hba_ioaccel_enabled = 1;
4134 memset(id_phys, 0, sizeof(*id_phys));
4135 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4136 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4139 /* Reserve space for FW operations */
4140 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4141 #define DRIVE_QUEUE_DEPTH 7
4143 le16_to_cpu(id_phys->current_queue_depth_limit) -
4144 DRIVE_CMDS_RESERVED_FOR_FW;
4146 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4149 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4150 struct ReportExtendedLUNdata *rlep, int rle_index,
4151 struct bmic_identify_physical_device *id_phys)
4153 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4155 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4156 this_device->hba_ioaccel_enabled = 1;
4158 memcpy(&this_device->active_path_index,
4159 &id_phys->active_path_number,
4160 sizeof(this_device->active_path_index));
4161 memcpy(&this_device->path_map,
4162 &id_phys->redundant_path_present_map,
4163 sizeof(this_device->path_map));
4164 memcpy(&this_device->box,
4165 &id_phys->alternate_paths_phys_box_on_port,
4166 sizeof(this_device->box));
4167 memcpy(&this_device->phys_connector,
4168 &id_phys->alternate_paths_phys_connector,
4169 sizeof(this_device->phys_connector));
4170 memcpy(&this_device->bay,
4171 &id_phys->phys_bay_in_box,
4172 sizeof(this_device->bay));
4175 /* get number of local logical disks. */
4176 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4177 struct bmic_identify_controller *id_ctlr,
4183 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4187 memset(id_ctlr, 0, sizeof(*id_ctlr));
4188 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4190 if (id_ctlr->configured_logical_drive_count < 256)
4191 *nlocals = id_ctlr->configured_logical_drive_count;
4193 *nlocals = le16_to_cpu(
4194 id_ctlr->extended_logical_unit_count);
4200 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4202 struct bmic_identify_physical_device *id_phys;
4203 bool is_spare = false;
4206 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4210 rc = hpsa_bmic_id_physical_device(h,
4212 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4213 id_phys, sizeof(*id_phys));
4215 is_spare = (id_phys->more_flags >> 6) & 0x01;
4221 #define RPL_DEV_FLAG_NON_DISK 0x1
4222 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4223 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4225 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4227 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4228 struct ext_report_lun_entry *rle)
4233 if (!MASKED_DEVICE(lunaddrbytes))
4236 device_flags = rle->device_flags;
4237 device_type = rle->device_type;
4239 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4240 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4245 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4248 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4252 * Spares may be spun down, we do not want to
4253 * do an Inquiry to a RAID set spare drive as
4254 * that would have them spun up, that is a
4255 * performance hit because I/O to the RAID device
4256 * stops while the spin up occurs which can take
4259 if (hpsa_is_disk_spare(h, lunaddrbytes))
4265 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4267 /* the idea here is we could get notified
4268 * that some devices have changed, so we do a report
4269 * physical luns and report logical luns cmd, and adjust
4270 * our list of devices accordingly.
4272 * The scsi3addr's of devices won't change so long as the
4273 * adapter is not reset. That means we can rescan and
4274 * tell which devices we already know about, vs. new
4275 * devices, vs. disappearing devices.
4277 struct ReportExtendedLUNdata *physdev_list = NULL;
4278 struct ReportLUNdata *logdev_list = NULL;
4279 struct bmic_identify_physical_device *id_phys = NULL;
4280 struct bmic_identify_controller *id_ctlr = NULL;
4283 u32 nlocal_logicals = 0;
4284 u32 ndev_allocated = 0;
4285 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4287 int i, n_ext_target_devs, ndevs_to_allocate;
4288 int raid_ctlr_position;
4289 bool physical_device;
4290 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4292 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4293 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4294 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4295 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4296 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4297 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4299 if (!currentsd || !physdev_list || !logdev_list ||
4300 !tmpdevice || !id_phys || !id_ctlr) {
4301 dev_err(&h->pdev->dev, "out of memory\n");
4304 memset(lunzerobits, 0, sizeof(lunzerobits));
4306 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4308 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4309 logdev_list, &nlogicals)) {
4310 h->drv_req_rescan = 1;
4314 /* Set number of local logicals (non PTRAID) */
4315 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4316 dev_warn(&h->pdev->dev,
4317 "%s: Can't determine number of local logical devices.\n",
4321 /* We might see up to the maximum number of logical and physical disks
4322 * plus external target devices, and a device for the local RAID
4325 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4327 /* Allocate the per device structures */
4328 for (i = 0; i < ndevs_to_allocate; i++) {
4329 if (i >= HPSA_MAX_DEVICES) {
4330 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4331 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4332 ndevs_to_allocate - HPSA_MAX_DEVICES);
4336 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4337 if (!currentsd[i]) {
4338 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4339 __FILE__, __LINE__);
4340 h->drv_req_rescan = 1;
4346 if (is_scsi_rev_5(h))
4347 raid_ctlr_position = 0;
4349 raid_ctlr_position = nphysicals + nlogicals;
4351 /* adjust our table of devices */
4352 n_ext_target_devs = 0;
4353 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4354 u8 *lunaddrbytes, is_OBDR = 0;
4356 int phys_dev_index = i - (raid_ctlr_position == 0);
4357 bool skip_device = false;
4359 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4361 /* Figure out where the LUN ID info is coming from */
4362 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4363 i, nphysicals, nlogicals, physdev_list, logdev_list);
4365 /* Determine if this is a lun from an external target array */
4366 tmpdevice->external =
4367 figure_external_status(h, raid_ctlr_position, i,
4368 nphysicals, nlocal_logicals);
4371 * Skip over some devices such as a spare.
4373 if (!tmpdevice->external && physical_device) {
4374 skip_device = hpsa_skip_device(h, lunaddrbytes,
4375 &physdev_list->LUN[phys_dev_index]);
4380 /* Get device type, vendor, model, device id */
4381 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4383 if (rc == -ENOMEM) {
4384 dev_warn(&h->pdev->dev,
4385 "Out of memory, rescan deferred.\n");
4386 h->drv_req_rescan = 1;
4390 dev_warn(&h->pdev->dev,
4391 "Inquiry failed, skipping device.\n");
4395 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4396 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4397 this_device = currentsd[ncurrent];
4399 /* Turn on discovery_polling if there are ext target devices.
4400 * Event-based change notification is unreliable for those.
4402 if (!h->discovery_polling) {
4403 if (tmpdevice->external) {
4404 h->discovery_polling = 1;
4405 dev_info(&h->pdev->dev,
4406 "External target, activate discovery polling.\n");
4411 *this_device = *tmpdevice;
4412 this_device->physical_device = physical_device;
4415 * Expose all devices except for physical devices that
4418 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4419 this_device->expose_device = 0;
4421 this_device->expose_device = 1;
4425 * Get the SAS address for physical devices that are exposed.
4427 if (this_device->physical_device && this_device->expose_device)
4428 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4430 switch (this_device->devtype) {
4432 /* We don't *really* support actual CD-ROM devices,
4433 * just "One Button Disaster Recovery" tape drive
4434 * which temporarily pretends to be a CD-ROM drive.
4435 * So we check that the device is really an OBDR tape
4436 * device by checking for "$DR-10" in bytes 43-48 of
4444 if (this_device->physical_device) {
4445 /* The disk is in HBA mode. */
4446 /* Never use RAID mapper in HBA mode. */
4447 this_device->offload_enabled = 0;
4448 hpsa_get_ioaccel_drive_info(h, this_device,
4449 physdev_list, phys_dev_index, id_phys);
4450 hpsa_get_path_info(this_device,
4451 physdev_list, phys_dev_index, id_phys);
4456 case TYPE_MEDIUM_CHANGER:
4459 case TYPE_ENCLOSURE:
4460 if (!this_device->external)
4461 hpsa_get_enclosure_info(h, lunaddrbytes,
4462 physdev_list, phys_dev_index,
4467 /* Only present the Smartarray HBA as a RAID controller.
4468 * If it's a RAID controller other than the HBA itself
4469 * (an external RAID controller, MSA500 or similar)
4472 if (!is_hba_lunid(lunaddrbytes))
4479 if (ncurrent >= HPSA_MAX_DEVICES)
4483 if (h->sas_host == NULL) {
4486 rc = hpsa_add_sas_host(h);
4488 dev_warn(&h->pdev->dev,
4489 "Could not add sas host %d\n", rc);
4494 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4497 for (i = 0; i < ndev_allocated; i++)
4498 kfree(currentsd[i]);
4500 kfree(physdev_list);
4506 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4507 struct scatterlist *sg)
4509 u64 addr64 = (u64) sg_dma_address(sg);
4510 unsigned int len = sg_dma_len(sg);
4512 desc->Addr = cpu_to_le64(addr64);
4513 desc->Len = cpu_to_le32(len);
4518 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4519 * dma mapping and fills in the scatter gather entries of the
4522 static int hpsa_scatter_gather(struct ctlr_info *h,
4523 struct CommandList *cp,
4524 struct scsi_cmnd *cmd)
4526 struct scatterlist *sg;
4527 int use_sg, i, sg_limit, chained, last_sg;
4528 struct SGDescriptor *curr_sg;
4530 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4532 use_sg = scsi_dma_map(cmd);
4537 goto sglist_finished;
4540 * If the number of entries is greater than the max for a single list,
4541 * then we have a chained list; we will set up all but one entry in the
4542 * first list (the last entry is saved for link information);
4543 * otherwise, we don't have a chained list and we'll set up at each of
4544 * the entries in the one list.
4547 chained = use_sg > h->max_cmd_sg_entries;
4548 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4549 last_sg = scsi_sg_count(cmd) - 1;
4550 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4551 hpsa_set_sg_descriptor(curr_sg, sg);
4557 * Continue with the chained list. Set curr_sg to the chained
4558 * list. Modify the limit to the total count less the entries
4559 * we've already set up. Resume the scan at the list entry
4560 * where the previous loop left off.
4562 curr_sg = h->cmd_sg_list[cp->cmdindex];
4563 sg_limit = use_sg - sg_limit;
4564 for_each_sg(sg, sg, sg_limit, i) {
4565 hpsa_set_sg_descriptor(curr_sg, sg);
4570 /* Back the pointer up to the last entry and mark it as "last". */
4571 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4573 if (use_sg + chained > h->maxSG)
4574 h->maxSG = use_sg + chained;
4577 cp->Header.SGList = h->max_cmd_sg_entries;
4578 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4579 if (hpsa_map_sg_chain_block(h, cp)) {
4580 scsi_dma_unmap(cmd);
4588 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4589 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4593 #define IO_ACCEL_INELIGIBLE (1)
4594 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4600 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4607 if (*cdb_len == 6) {
4608 block = (((cdb[1] & 0x1F) << 16) |
4615 BUG_ON(*cdb_len != 12);
4616 block = get_unaligned_be32(&cdb[2]);
4617 block_cnt = get_unaligned_be32(&cdb[6]);
4619 if (block_cnt > 0xffff)
4620 return IO_ACCEL_INELIGIBLE;
4622 cdb[0] = is_write ? WRITE_10 : READ_10;
4624 cdb[2] = (u8) (block >> 24);
4625 cdb[3] = (u8) (block >> 16);
4626 cdb[4] = (u8) (block >> 8);
4627 cdb[5] = (u8) (block);
4629 cdb[7] = (u8) (block_cnt >> 8);
4630 cdb[8] = (u8) (block_cnt);
4638 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4639 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4640 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4642 struct scsi_cmnd *cmd = c->scsi_cmd;
4643 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4645 unsigned int total_len = 0;
4646 struct scatterlist *sg;
4649 struct SGDescriptor *curr_sg;
4650 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4652 /* TODO: implement chaining support */
4653 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4654 atomic_dec(&phys_disk->ioaccel_cmds_out);
4655 return IO_ACCEL_INELIGIBLE;
4658 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4660 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4661 atomic_dec(&phys_disk->ioaccel_cmds_out);
4662 return IO_ACCEL_INELIGIBLE;
4665 c->cmd_type = CMD_IOACCEL1;
4667 /* Adjust the DMA address to point to the accelerated command buffer */
4668 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4669 (c->cmdindex * sizeof(*cp));
4670 BUG_ON(c->busaddr & 0x0000007F);
4672 use_sg = scsi_dma_map(cmd);
4674 atomic_dec(&phys_disk->ioaccel_cmds_out);
4680 scsi_for_each_sg(cmd, sg, use_sg, i) {
4681 addr64 = (u64) sg_dma_address(sg);
4682 len = sg_dma_len(sg);
4684 curr_sg->Addr = cpu_to_le64(addr64);
4685 curr_sg->Len = cpu_to_le32(len);
4686 curr_sg->Ext = cpu_to_le32(0);
4689 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4691 switch (cmd->sc_data_direction) {
4693 control |= IOACCEL1_CONTROL_DATA_OUT;
4695 case DMA_FROM_DEVICE:
4696 control |= IOACCEL1_CONTROL_DATA_IN;
4699 control |= IOACCEL1_CONTROL_NODATAXFER;
4702 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4703 cmd->sc_data_direction);
4708 control |= IOACCEL1_CONTROL_NODATAXFER;
4711 c->Header.SGList = use_sg;
4712 /* Fill out the command structure to submit */
4713 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4714 cp->transfer_len = cpu_to_le32(total_len);
4715 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4716 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4717 cp->control = cpu_to_le32(control);
4718 memcpy(cp->CDB, cdb, cdb_len);
4719 memcpy(cp->CISS_LUN, scsi3addr, 8);
4720 /* Tag was already set at init time. */
4721 enqueue_cmd_and_start_io(h, c);
4726 * Queue a command directly to a device behind the controller using the
4727 * I/O accelerator path.
4729 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4730 struct CommandList *c)
4732 struct scsi_cmnd *cmd = c->scsi_cmd;
4733 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4740 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4741 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4745 * Set encryption parameters for the ioaccel2 request
4747 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4748 struct CommandList *c, struct io_accel2_cmd *cp)
4750 struct scsi_cmnd *cmd = c->scsi_cmd;
4751 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4752 struct raid_map_data *map = &dev->raid_map;
4755 /* Are we doing encryption on this device */
4756 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4758 /* Set the data encryption key index. */
4759 cp->dekindex = map->dekindex;
4761 /* Set the encryption enable flag, encoded into direction field. */
4762 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4764 /* Set encryption tweak values based on logical block address
4765 * If block size is 512, tweak value is LBA.
4766 * For other block sizes, tweak is (LBA * block size)/ 512)
4768 switch (cmd->cmnd[0]) {
4769 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4772 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4773 (cmd->cmnd[2] << 8) |
4778 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4781 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4785 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4788 dev_err(&h->pdev->dev,
4789 "ERROR: %s: size (0x%x) not supported for encryption\n",
4790 __func__, cmd->cmnd[0]);
4795 if (le32_to_cpu(map->volume_blk_size) != 512)
4796 first_block = first_block *
4797 le32_to_cpu(map->volume_blk_size)/512;
4799 cp->tweak_lower = cpu_to_le32(first_block);
4800 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4803 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4804 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4805 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4807 struct scsi_cmnd *cmd = c->scsi_cmd;
4808 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4809 struct ioaccel2_sg_element *curr_sg;
4811 struct scatterlist *sg;
4819 if (!cmd->device->hostdata)
4822 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4824 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4825 atomic_dec(&phys_disk->ioaccel_cmds_out);
4826 return IO_ACCEL_INELIGIBLE;
4829 c->cmd_type = CMD_IOACCEL2;
4830 /* Adjust the DMA address to point to the accelerated command buffer */
4831 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4832 (c->cmdindex * sizeof(*cp));
4833 BUG_ON(c->busaddr & 0x0000007F);
4835 memset(cp, 0, sizeof(*cp));
4836 cp->IU_type = IOACCEL2_IU_TYPE;
4838 use_sg = scsi_dma_map(cmd);
4840 atomic_dec(&phys_disk->ioaccel_cmds_out);
4846 if (use_sg > h->ioaccel_maxsg) {
4847 addr64 = le64_to_cpu(
4848 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4849 curr_sg->address = cpu_to_le64(addr64);
4850 curr_sg->length = 0;
4851 curr_sg->reserved[0] = 0;
4852 curr_sg->reserved[1] = 0;
4853 curr_sg->reserved[2] = 0;
4854 curr_sg->chain_indicator = 0x80;
4856 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4858 scsi_for_each_sg(cmd, sg, use_sg, i) {
4859 addr64 = (u64) sg_dma_address(sg);
4860 len = sg_dma_len(sg);
4862 curr_sg->address = cpu_to_le64(addr64);
4863 curr_sg->length = cpu_to_le32(len);
4864 curr_sg->reserved[0] = 0;
4865 curr_sg->reserved[1] = 0;
4866 curr_sg->reserved[2] = 0;
4867 curr_sg->chain_indicator = 0;
4871 switch (cmd->sc_data_direction) {
4873 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4874 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4876 case DMA_FROM_DEVICE:
4877 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4878 cp->direction |= IOACCEL2_DIR_DATA_IN;
4881 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4882 cp->direction |= IOACCEL2_DIR_NO_DATA;
4885 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4886 cmd->sc_data_direction);
4891 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4892 cp->direction |= IOACCEL2_DIR_NO_DATA;
4895 /* Set encryption parameters, if necessary */
4896 set_encrypt_ioaccel2(h, c, cp);
4898 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4899 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4900 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4902 cp->data_len = cpu_to_le32(total_len);
4903 cp->err_ptr = cpu_to_le64(c->busaddr +
4904 offsetof(struct io_accel2_cmd, error_data));
4905 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4907 /* fill in sg elements */
4908 if (use_sg > h->ioaccel_maxsg) {
4910 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4911 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4912 atomic_dec(&phys_disk->ioaccel_cmds_out);
4913 scsi_dma_unmap(cmd);
4917 cp->sg_count = (u8) use_sg;
4919 enqueue_cmd_and_start_io(h, c);
4924 * Queue a command to the correct I/O accelerator path.
4926 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4927 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4928 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4930 if (!c->scsi_cmd->device)
4933 if (!c->scsi_cmd->device->hostdata)
4936 /* Try to honor the device's queue depth */
4937 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4938 phys_disk->queue_depth) {
4939 atomic_dec(&phys_disk->ioaccel_cmds_out);
4940 return IO_ACCEL_INELIGIBLE;
4942 if (h->transMethod & CFGTBL_Trans_io_accel1)
4943 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4944 cdb, cdb_len, scsi3addr,
4947 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4948 cdb, cdb_len, scsi3addr,
4952 static void raid_map_helper(struct raid_map_data *map,
4953 int offload_to_mirror, u32 *map_index, u32 *current_group)
4955 if (offload_to_mirror == 0) {
4956 /* use physical disk in the first mirrored group. */
4957 *map_index %= le16_to_cpu(map->data_disks_per_row);
4961 /* determine mirror group that *map_index indicates */
4962 *current_group = *map_index /
4963 le16_to_cpu(map->data_disks_per_row);
4964 if (offload_to_mirror == *current_group)
4966 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4967 /* select map index from next group */
4968 *map_index += le16_to_cpu(map->data_disks_per_row);
4971 /* select map index from first group */
4972 *map_index %= le16_to_cpu(map->data_disks_per_row);
4975 } while (offload_to_mirror != *current_group);
4979 * Attempt to perform offload RAID mapping for a logical volume I/O.
4981 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4982 struct CommandList *c)
4984 struct scsi_cmnd *cmd = c->scsi_cmd;
4985 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4986 struct raid_map_data *map = &dev->raid_map;
4987 struct raid_map_disk_data *dd = &map->data[0];
4990 u64 first_block, last_block;
4993 u64 first_row, last_row;
4994 u32 first_row_offset, last_row_offset;
4995 u32 first_column, last_column;
4996 u64 r0_first_row, r0_last_row;
4997 u32 r5or6_blocks_per_row;
4998 u64 r5or6_first_row, r5or6_last_row;
4999 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5000 u32 r5or6_first_column, r5or6_last_column;
5001 u32 total_disks_per_row;
5003 u32 first_group, last_group, current_group;
5011 #if BITS_PER_LONG == 32
5014 int offload_to_mirror;
5019 /* check for valid opcode, get LBA and block count */
5020 switch (cmd->cmnd[0]) {
5024 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5025 (cmd->cmnd[2] << 8) |
5027 block_cnt = cmd->cmnd[4];
5035 (((u64) cmd->cmnd[2]) << 24) |
5036 (((u64) cmd->cmnd[3]) << 16) |
5037 (((u64) cmd->cmnd[4]) << 8) |
5040 (((u32) cmd->cmnd[7]) << 8) |
5047 (((u64) cmd->cmnd[2]) << 24) |
5048 (((u64) cmd->cmnd[3]) << 16) |
5049 (((u64) cmd->cmnd[4]) << 8) |
5052 (((u32) cmd->cmnd[6]) << 24) |
5053 (((u32) cmd->cmnd[7]) << 16) |
5054 (((u32) cmd->cmnd[8]) << 8) |
5061 (((u64) cmd->cmnd[2]) << 56) |
5062 (((u64) cmd->cmnd[3]) << 48) |
5063 (((u64) cmd->cmnd[4]) << 40) |
5064 (((u64) cmd->cmnd[5]) << 32) |
5065 (((u64) cmd->cmnd[6]) << 24) |
5066 (((u64) cmd->cmnd[7]) << 16) |
5067 (((u64) cmd->cmnd[8]) << 8) |
5070 (((u32) cmd->cmnd[10]) << 24) |
5071 (((u32) cmd->cmnd[11]) << 16) |
5072 (((u32) cmd->cmnd[12]) << 8) |
5076 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5078 last_block = first_block + block_cnt - 1;
5080 /* check for write to non-RAID-0 */
5081 if (is_write && dev->raid_level != 0)
5082 return IO_ACCEL_INELIGIBLE;
5084 /* check for invalid block or wraparound */
5085 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5086 last_block < first_block)
5087 return IO_ACCEL_INELIGIBLE;
5089 /* calculate stripe information for the request */
5090 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5091 le16_to_cpu(map->strip_size);
5092 strip_size = le16_to_cpu(map->strip_size);
5093 #if BITS_PER_LONG == 32
5094 tmpdiv = first_block;
5095 (void) do_div(tmpdiv, blocks_per_row);
5097 tmpdiv = last_block;
5098 (void) do_div(tmpdiv, blocks_per_row);
5100 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5101 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5102 tmpdiv = first_row_offset;
5103 (void) do_div(tmpdiv, strip_size);
5104 first_column = tmpdiv;
5105 tmpdiv = last_row_offset;
5106 (void) do_div(tmpdiv, strip_size);
5107 last_column = tmpdiv;
5109 first_row = first_block / blocks_per_row;
5110 last_row = last_block / blocks_per_row;
5111 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5112 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5113 first_column = first_row_offset / strip_size;
5114 last_column = last_row_offset / strip_size;
5117 /* if this isn't a single row/column then give to the controller */
5118 if ((first_row != last_row) || (first_column != last_column))
5119 return IO_ACCEL_INELIGIBLE;
5121 /* proceeding with driver mapping */
5122 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5123 le16_to_cpu(map->metadata_disks_per_row);
5124 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5125 le16_to_cpu(map->row_cnt);
5126 map_index = (map_row * total_disks_per_row) + first_column;
5128 switch (dev->raid_level) {
5130 break; /* nothing special to do */
5132 /* Handles load balance across RAID 1 members.
5133 * (2-drive R1 and R10 with even # of drives.)
5134 * Appropriate for SSDs, not optimal for HDDs
5136 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5137 if (dev->offload_to_mirror)
5138 map_index += le16_to_cpu(map->data_disks_per_row);
5139 dev->offload_to_mirror = !dev->offload_to_mirror;
5142 /* Handles N-way mirrors (R1-ADM)
5143 * and R10 with # of drives divisible by 3.)
5145 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5147 offload_to_mirror = dev->offload_to_mirror;
5148 raid_map_helper(map, offload_to_mirror,
5149 &map_index, ¤t_group);
5150 /* set mirror group to use next time */
5152 (offload_to_mirror >=
5153 le16_to_cpu(map->layout_map_count) - 1)
5154 ? 0 : offload_to_mirror + 1;
5155 dev->offload_to_mirror = offload_to_mirror;
5156 /* Avoid direct use of dev->offload_to_mirror within this
5157 * function since multiple threads might simultaneously
5158 * increment it beyond the range of dev->layout_map_count -1.
5163 if (le16_to_cpu(map->layout_map_count) <= 1)
5166 /* Verify first and last block are in same RAID group */
5167 r5or6_blocks_per_row =
5168 le16_to_cpu(map->strip_size) *
5169 le16_to_cpu(map->data_disks_per_row);
5170 BUG_ON(r5or6_blocks_per_row == 0);
5171 stripesize = r5or6_blocks_per_row *
5172 le16_to_cpu(map->layout_map_count);
5173 #if BITS_PER_LONG == 32
5174 tmpdiv = first_block;
5175 first_group = do_div(tmpdiv, stripesize);
5176 tmpdiv = first_group;
5177 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5178 first_group = tmpdiv;
5179 tmpdiv = last_block;
5180 last_group = do_div(tmpdiv, stripesize);
5181 tmpdiv = last_group;
5182 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5183 last_group = tmpdiv;
5185 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5186 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5188 if (first_group != last_group)
5189 return IO_ACCEL_INELIGIBLE;
5191 /* Verify request is in a single row of RAID 5/6 */
5192 #if BITS_PER_LONG == 32
5193 tmpdiv = first_block;
5194 (void) do_div(tmpdiv, stripesize);
5195 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5196 tmpdiv = last_block;
5197 (void) do_div(tmpdiv, stripesize);
5198 r5or6_last_row = r0_last_row = tmpdiv;
5200 first_row = r5or6_first_row = r0_first_row =
5201 first_block / stripesize;
5202 r5or6_last_row = r0_last_row = last_block / stripesize;
5204 if (r5or6_first_row != r5or6_last_row)
5205 return IO_ACCEL_INELIGIBLE;
5208 /* Verify request is in a single column */
5209 #if BITS_PER_LONG == 32
5210 tmpdiv = first_block;
5211 first_row_offset = do_div(tmpdiv, stripesize);
5212 tmpdiv = first_row_offset;
5213 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5214 r5or6_first_row_offset = first_row_offset;
5215 tmpdiv = last_block;
5216 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5217 tmpdiv = r5or6_last_row_offset;
5218 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5219 tmpdiv = r5or6_first_row_offset;
5220 (void) do_div(tmpdiv, map->strip_size);
5221 first_column = r5or6_first_column = tmpdiv;
5222 tmpdiv = r5or6_last_row_offset;
5223 (void) do_div(tmpdiv, map->strip_size);
5224 r5or6_last_column = tmpdiv;
5226 first_row_offset = r5or6_first_row_offset =
5227 (u32)((first_block % stripesize) %
5228 r5or6_blocks_per_row);
5230 r5or6_last_row_offset =
5231 (u32)((last_block % stripesize) %
5232 r5or6_blocks_per_row);
5234 first_column = r5or6_first_column =
5235 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5237 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5239 if (r5or6_first_column != r5or6_last_column)
5240 return IO_ACCEL_INELIGIBLE;
5242 /* Request is eligible */
5243 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5244 le16_to_cpu(map->row_cnt);
5246 map_index = (first_group *
5247 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5248 (map_row * total_disks_per_row) + first_column;
5251 return IO_ACCEL_INELIGIBLE;
5254 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5255 return IO_ACCEL_INELIGIBLE;
5257 c->phys_disk = dev->phys_disk[map_index];
5259 return IO_ACCEL_INELIGIBLE;
5261 disk_handle = dd[map_index].ioaccel_handle;
5262 disk_block = le64_to_cpu(map->disk_starting_blk) +
5263 first_row * le16_to_cpu(map->strip_size) +
5264 (first_row_offset - first_column *
5265 le16_to_cpu(map->strip_size));
5266 disk_block_cnt = block_cnt;
5268 /* handle differing logical/physical block sizes */
5269 if (map->phys_blk_shift) {
5270 disk_block <<= map->phys_blk_shift;
5271 disk_block_cnt <<= map->phys_blk_shift;
5273 BUG_ON(disk_block_cnt > 0xffff);
5275 /* build the new CDB for the physical disk I/O */
5276 if (disk_block > 0xffffffff) {
5277 cdb[0] = is_write ? WRITE_16 : READ_16;
5279 cdb[2] = (u8) (disk_block >> 56);
5280 cdb[3] = (u8) (disk_block >> 48);
5281 cdb[4] = (u8) (disk_block >> 40);
5282 cdb[5] = (u8) (disk_block >> 32);
5283 cdb[6] = (u8) (disk_block >> 24);
5284 cdb[7] = (u8) (disk_block >> 16);
5285 cdb[8] = (u8) (disk_block >> 8);
5286 cdb[9] = (u8) (disk_block);
5287 cdb[10] = (u8) (disk_block_cnt >> 24);
5288 cdb[11] = (u8) (disk_block_cnt >> 16);
5289 cdb[12] = (u8) (disk_block_cnt >> 8);
5290 cdb[13] = (u8) (disk_block_cnt);
5295 cdb[0] = is_write ? WRITE_10 : READ_10;
5297 cdb[2] = (u8) (disk_block >> 24);
5298 cdb[3] = (u8) (disk_block >> 16);
5299 cdb[4] = (u8) (disk_block >> 8);
5300 cdb[5] = (u8) (disk_block);
5302 cdb[7] = (u8) (disk_block_cnt >> 8);
5303 cdb[8] = (u8) (disk_block_cnt);
5307 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5309 dev->phys_disk[map_index]);
5313 * Submit commands down the "normal" RAID stack path
5314 * All callers to hpsa_ciss_submit must check lockup_detected
5315 * beforehand, before (opt.) and after calling cmd_alloc
5317 static int hpsa_ciss_submit(struct ctlr_info *h,
5318 struct CommandList *c, struct scsi_cmnd *cmd,
5319 unsigned char scsi3addr[])
5321 cmd->host_scribble = (unsigned char *) c;
5322 c->cmd_type = CMD_SCSI;
5324 c->Header.ReplyQueue = 0; /* unused in simple mode */
5325 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5326 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5328 /* Fill in the request block... */
5330 c->Request.Timeout = 0;
5331 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5332 c->Request.CDBLen = cmd->cmd_len;
5333 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5334 switch (cmd->sc_data_direction) {
5336 c->Request.type_attr_dir =
5337 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5339 case DMA_FROM_DEVICE:
5340 c->Request.type_attr_dir =
5341 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5344 c->Request.type_attr_dir =
5345 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5347 case DMA_BIDIRECTIONAL:
5348 /* This can happen if a buggy application does a scsi passthru
5349 * and sets both inlen and outlen to non-zero. ( see
5350 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5353 c->Request.type_attr_dir =
5354 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5355 /* This is technically wrong, and hpsa controllers should
5356 * reject it with CMD_INVALID, which is the most correct
5357 * response, but non-fibre backends appear to let it
5358 * slide by, and give the same results as if this field
5359 * were set correctly. Either way is acceptable for
5360 * our purposes here.
5366 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5367 cmd->sc_data_direction);
5372 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5373 hpsa_cmd_resolve_and_free(h, c);
5374 return SCSI_MLQUEUE_HOST_BUSY;
5376 enqueue_cmd_and_start_io(h, c);
5377 /* the cmd'll come back via intr handler in complete_scsi_command() */
5381 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5382 struct CommandList *c)
5384 dma_addr_t cmd_dma_handle, err_dma_handle;
5386 /* Zero out all of commandlist except the last field, refcount */
5387 memset(c, 0, offsetof(struct CommandList, refcount));
5388 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5389 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5390 c->err_info = h->errinfo_pool + index;
5391 memset(c->err_info, 0, sizeof(*c->err_info));
5392 err_dma_handle = h->errinfo_pool_dhandle
5393 + index * sizeof(*c->err_info);
5394 c->cmdindex = index;
5395 c->busaddr = (u32) cmd_dma_handle;
5396 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5397 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5399 c->scsi_cmd = SCSI_CMD_IDLE;
5402 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5406 for (i = 0; i < h->nr_cmds; i++) {
5407 struct CommandList *c = h->cmd_pool + i;
5409 hpsa_cmd_init(h, i, c);
5410 atomic_set(&c->refcount, 0);
5414 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5415 struct CommandList *c)
5417 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5419 BUG_ON(c->cmdindex != index);
5421 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5422 memset(c->err_info, 0, sizeof(*c->err_info));
5423 c->busaddr = (u32) cmd_dma_handle;
5426 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5427 struct CommandList *c, struct scsi_cmnd *cmd,
5428 unsigned char *scsi3addr)
5430 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5431 int rc = IO_ACCEL_INELIGIBLE;
5434 return SCSI_MLQUEUE_HOST_BUSY;
5436 cmd->host_scribble = (unsigned char *) c;
5438 if (dev->offload_enabled) {
5439 hpsa_cmd_init(h, c->cmdindex, c);
5440 c->cmd_type = CMD_SCSI;
5442 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5443 if (rc < 0) /* scsi_dma_map failed. */
5444 rc = SCSI_MLQUEUE_HOST_BUSY;
5445 } else if (dev->hba_ioaccel_enabled) {
5446 hpsa_cmd_init(h, c->cmdindex, c);
5447 c->cmd_type = CMD_SCSI;
5449 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5450 if (rc < 0) /* scsi_dma_map failed. */
5451 rc = SCSI_MLQUEUE_HOST_BUSY;
5456 static void hpsa_command_resubmit_worker(struct work_struct *work)
5458 struct scsi_cmnd *cmd;
5459 struct hpsa_scsi_dev_t *dev;
5460 struct CommandList *c = container_of(work, struct CommandList, work);
5463 dev = cmd->device->hostdata;
5465 cmd->result = DID_NO_CONNECT << 16;
5466 return hpsa_cmd_free_and_done(c->h, c, cmd);
5468 if (c->reset_pending)
5469 return hpsa_cmd_resolve_and_free(c->h, c);
5470 if (c->abort_pending)
5471 return hpsa_cmd_abort_and_free(c->h, c, cmd);
5472 if (c->cmd_type == CMD_IOACCEL2) {
5473 struct ctlr_info *h = c->h;
5474 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5477 if (c2->error_data.serv_response ==
5478 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5479 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5482 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5484 * If we get here, it means dma mapping failed.
5485 * Try again via scsi mid layer, which will
5486 * then get SCSI_MLQUEUE_HOST_BUSY.
5488 cmd->result = DID_IMM_RETRY << 16;
5489 return hpsa_cmd_free_and_done(h, c, cmd);
5491 /* else, fall thru and resubmit down CISS path */
5494 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5495 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5497 * If we get here, it means dma mapping failed. Try
5498 * again via scsi mid layer, which will then get
5499 * SCSI_MLQUEUE_HOST_BUSY.
5501 * hpsa_ciss_submit will have already freed c
5502 * if it encountered a dma mapping failure.
5504 cmd->result = DID_IMM_RETRY << 16;
5505 cmd->scsi_done(cmd);
5509 /* Running in struct Scsi_Host->host_lock less mode */
5510 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5512 struct ctlr_info *h;
5513 struct hpsa_scsi_dev_t *dev;
5514 unsigned char scsi3addr[8];
5515 struct CommandList *c;
5518 /* Get the ptr to our adapter structure out of cmd->host. */
5519 h = sdev_to_hba(cmd->device);
5521 BUG_ON(cmd->request->tag < 0);
5523 dev = cmd->device->hostdata;
5525 cmd->result = DID_NO_CONNECT << 16;
5526 cmd->scsi_done(cmd);
5531 cmd->result = DID_NO_CONNECT << 16;
5532 cmd->scsi_done(cmd);
5536 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5538 if (unlikely(lockup_detected(h))) {
5539 cmd->result = DID_NO_CONNECT << 16;
5540 cmd->scsi_done(cmd);
5543 c = cmd_tagged_alloc(h, cmd);
5546 * Call alternate submit routine for I/O accelerated commands.
5547 * Retries always go down the normal I/O path.
5549 if (likely(cmd->retries == 0 &&
5550 cmd->request->cmd_type == REQ_TYPE_FS &&
5551 h->acciopath_status)) {
5552 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5555 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5556 hpsa_cmd_resolve_and_free(h, c);
5557 return SCSI_MLQUEUE_HOST_BUSY;
5560 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5563 static void hpsa_scan_complete(struct ctlr_info *h)
5565 unsigned long flags;
5567 spin_lock_irqsave(&h->scan_lock, flags);
5568 h->scan_finished = 1;
5569 wake_up_all(&h->scan_wait_queue);
5570 spin_unlock_irqrestore(&h->scan_lock, flags);
5573 static void hpsa_scan_start(struct Scsi_Host *sh)
5575 struct ctlr_info *h = shost_to_hba(sh);
5576 unsigned long flags;
5579 * Don't let rescans be initiated on a controller known to be locked
5580 * up. If the controller locks up *during* a rescan, that thread is
5581 * probably hosed, but at least we can prevent new rescan threads from
5582 * piling up on a locked up controller.
5584 if (unlikely(lockup_detected(h)))
5585 return hpsa_scan_complete(h);
5587 /* wait until any scan already in progress is finished. */
5589 spin_lock_irqsave(&h->scan_lock, flags);
5590 if (h->scan_finished)
5592 spin_unlock_irqrestore(&h->scan_lock, flags);
5593 wait_event(h->scan_wait_queue, h->scan_finished);
5594 /* Note: We don't need to worry about a race between this
5595 * thread and driver unload because the midlayer will
5596 * have incremented the reference count, so unload won't
5597 * happen if we're in here.
5600 h->scan_finished = 0; /* mark scan as in progress */
5601 spin_unlock_irqrestore(&h->scan_lock, flags);
5603 if (unlikely(lockup_detected(h)))
5604 return hpsa_scan_complete(h);
5607 * Do the scan after a reset completion
5609 if (h->reset_in_progress) {
5610 h->drv_req_rescan = 1;
5614 hpsa_update_scsi_devices(h);
5616 hpsa_scan_complete(h);
5619 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5621 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5628 else if (qdepth > logical_drive->queue_depth)
5629 qdepth = logical_drive->queue_depth;
5631 return scsi_change_queue_depth(sdev, qdepth);
5634 static int hpsa_scan_finished(struct Scsi_Host *sh,
5635 unsigned long elapsed_time)
5637 struct ctlr_info *h = shost_to_hba(sh);
5638 unsigned long flags;
5641 spin_lock_irqsave(&h->scan_lock, flags);
5642 finished = h->scan_finished;
5643 spin_unlock_irqrestore(&h->scan_lock, flags);
5647 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5649 struct Scsi_Host *sh;
5651 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5653 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5660 sh->max_channel = 3;
5661 sh->max_cmd_len = MAX_COMMAND_SIZE;
5662 sh->max_lun = HPSA_MAX_LUN;
5663 sh->max_id = HPSA_MAX_LUN;
5664 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5665 sh->cmd_per_lun = sh->can_queue;
5666 sh->sg_tablesize = h->maxsgentries;
5667 sh->transportt = hpsa_sas_transport_template;
5668 sh->hostdata[0] = (unsigned long) h;
5669 sh->irq = pci_irq_vector(h->pdev, 0);
5670 sh->unique_id = sh->irq;
5676 static int hpsa_scsi_add_host(struct ctlr_info *h)
5680 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5682 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5685 scsi_scan_host(h->scsi_host);
5690 * The block layer has already gone to the trouble of picking out a unique,
5691 * small-integer tag for this request. We use an offset from that value as
5692 * an index to select our command block. (The offset allows us to reserve the
5693 * low-numbered entries for our own uses.)
5695 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5697 int idx = scmd->request->tag;
5702 /* Offset to leave space for internal cmds. */
5703 return idx += HPSA_NRESERVED_CMDS;
5707 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5708 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5710 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5711 struct CommandList *c, unsigned char lunaddr[],
5716 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5717 (void) fill_cmd(c, TEST_UNIT_READY, h,
5718 NULL, 0, 0, lunaddr, TYPE_CMD);
5719 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5722 /* no unmap needed here because no data xfer. */
5724 /* Check if the unit is already ready. */
5725 if (c->err_info->CommandStatus == CMD_SUCCESS)
5729 * The first command sent after reset will receive "unit attention" to
5730 * indicate that the LUN has been reset...this is actually what we're
5731 * looking for (but, success is good too).
5733 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5734 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5735 (c->err_info->SenseInfo[2] == NO_SENSE ||
5736 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5743 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5744 * returns zero when the unit is ready, and non-zero when giving up.
5746 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5747 struct CommandList *c,
5748 unsigned char lunaddr[], int reply_queue)
5752 int waittime = 1; /* seconds */
5754 /* Send test unit ready until device ready, or give up. */
5755 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5758 * Wait for a bit. do this first, because if we send
5759 * the TUR right away, the reset will just abort it.
5761 msleep(1000 * waittime);
5763 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5767 /* Increase wait time with each try, up to a point. */
5768 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5771 dev_warn(&h->pdev->dev,
5772 "waiting %d secs for device to become ready.\n",
5779 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5780 unsigned char lunaddr[],
5787 struct CommandList *c;
5792 * If no specific reply queue was requested, then send the TUR
5793 * repeatedly, requesting a reply on each reply queue; otherwise execute
5794 * the loop exactly once using only the specified queue.
5796 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5798 last_queue = h->nreply_queues - 1;
5800 first_queue = reply_queue;
5801 last_queue = reply_queue;
5804 for (rq = first_queue; rq <= last_queue; rq++) {
5805 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5811 dev_warn(&h->pdev->dev, "giving up on device.\n");
5813 dev_warn(&h->pdev->dev, "device is ready.\n");
5819 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5820 * complaining. Doing a host- or bus-reset can't do anything good here.
5822 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5825 struct ctlr_info *h;
5826 struct hpsa_scsi_dev_t *dev;
5830 /* find the controller to which the command to be aborted was sent */
5831 h = sdev_to_hba(scsicmd->device);
5832 if (h == NULL) /* paranoia */
5835 if (lockup_detected(h))
5838 dev = scsicmd->device->hostdata;
5840 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5844 /* if controller locked up, we can guarantee command won't complete */
5845 if (lockup_detected(h)) {
5846 snprintf(msg, sizeof(msg),
5847 "cmd %d RESET FAILED, lockup detected",
5848 hpsa_get_cmd_index(scsicmd));
5849 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5853 /* this reset request might be the result of a lockup; check */
5854 if (detect_controller_lockup(h)) {
5855 snprintf(msg, sizeof(msg),
5856 "cmd %d RESET FAILED, new lockup detected",
5857 hpsa_get_cmd_index(scsicmd));
5858 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5862 /* Do not attempt on controller */
5863 if (is_hba_lunid(dev->scsi3addr))
5866 if (is_logical_dev_addr_mode(dev->scsi3addr))
5867 reset_type = HPSA_DEVICE_RESET_MSG;
5869 reset_type = HPSA_PHYS_TARGET_RESET;
5871 sprintf(msg, "resetting %s",
5872 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5873 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5875 h->reset_in_progress = 1;
5877 /* send a reset to the SCSI LUN which the command was sent to */
5878 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5879 DEFAULT_REPLY_QUEUE);
5880 sprintf(msg, "reset %s %s",
5881 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5882 rc == 0 ? "completed successfully" : "failed");
5883 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5884 h->reset_in_progress = 0;
5885 return rc == 0 ? SUCCESS : FAILED;
5888 static void swizzle_abort_tag(u8 *tag)
5892 memcpy(original_tag, tag, 8);
5893 tag[0] = original_tag[3];
5894 tag[1] = original_tag[2];
5895 tag[2] = original_tag[1];
5896 tag[3] = original_tag[0];
5897 tag[4] = original_tag[7];
5898 tag[5] = original_tag[6];
5899 tag[6] = original_tag[5];
5900 tag[7] = original_tag[4];
5903 static void hpsa_get_tag(struct ctlr_info *h,
5904 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5907 if (c->cmd_type == CMD_IOACCEL1) {
5908 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5909 &h->ioaccel_cmd_pool[c->cmdindex];
5910 tag = le64_to_cpu(cm1->tag);
5911 *tagupper = cpu_to_le32(tag >> 32);
5912 *taglower = cpu_to_le32(tag);
5915 if (c->cmd_type == CMD_IOACCEL2) {
5916 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5917 &h->ioaccel2_cmd_pool[c->cmdindex];
5918 /* upper tag not used in ioaccel2 mode */
5919 memset(tagupper, 0, sizeof(*tagupper));
5920 *taglower = cm2->Tag;
5923 tag = le64_to_cpu(c->Header.tag);
5924 *tagupper = cpu_to_le32(tag >> 32);
5925 *taglower = cpu_to_le32(tag);
5928 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5929 struct CommandList *abort, int reply_queue)
5932 struct CommandList *c;
5933 struct ErrorInfo *ei;
5934 __le32 tagupper, taglower;
5938 /* fill_cmd can't fail here, no buffer to map */
5939 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5940 0, 0, scsi3addr, TYPE_MSG);
5941 if (h->needs_abort_tags_swizzled)
5942 swizzle_abort_tag(&c->Request.CDB[4]);
5943 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5944 hpsa_get_tag(h, abort, &taglower, &tagupper);
5945 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5946 __func__, tagupper, taglower);
5947 /* no unmap needed here because no data xfer. */
5950 switch (ei->CommandStatus) {
5953 case CMD_TMF_STATUS:
5954 rc = hpsa_evaluate_tmf_status(h, c);
5956 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5960 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5961 __func__, tagupper, taglower);
5962 hpsa_scsi_interpret_error(h, c);
5967 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5968 __func__, tagupper, taglower);
5972 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5973 struct CommandList *command_to_abort, int reply_queue)
5975 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5976 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5977 struct io_accel2_cmd *c2a =
5978 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5979 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5980 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5986 * We're overlaying struct hpsa_tmf_struct on top of something which
5987 * was allocated as a struct io_accel2_cmd, so we better be sure it
5988 * actually fits, and doesn't overrun the error info space.
5990 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5991 sizeof(struct io_accel2_cmd));
5992 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5993 offsetof(struct hpsa_tmf_struct, error_len) +
5994 sizeof(ac->error_len));
5996 c->cmd_type = IOACCEL2_TMF;
5997 c->scsi_cmd = SCSI_CMD_BUSY;
5999 /* Adjust the DMA address to point to the accelerated command buffer */
6000 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
6001 (c->cmdindex * sizeof(struct io_accel2_cmd));
6002 BUG_ON(c->busaddr & 0x0000007F);
6004 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
6005 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
6006 ac->reply_queue = reply_queue;
6007 ac->tmf = IOACCEL2_TMF_ABORT;
6008 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
6009 memset(ac->lun_id, 0, sizeof(ac->lun_id));
6010 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
6011 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
6012 ac->error_ptr = cpu_to_le64(c->busaddr +
6013 offsetof(struct io_accel2_cmd, error_data));
6014 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
6017 /* ioaccel2 path firmware cannot handle abort task requests.
6018 * Change abort requests to physical target reset, and send to the
6019 * address of the physical disk used for the ioaccel 2 command.
6020 * Return 0 on success (IO_OK)
6024 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
6025 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
6028 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
6029 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
6030 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
6031 unsigned char *psa = &phys_scsi3addr[0];
6033 /* Get a pointer to the hpsa logical device. */
6034 scmd = abort->scsi_cmd;
6035 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
6037 dev_warn(&h->pdev->dev,
6038 "Cannot abort: no device pointer for command.\n");
6039 return -1; /* not abortable */
6042 if (h->raid_offload_debug > 0)
6043 dev_info(&h->pdev->dev,
6044 "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
6045 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
6046 "Reset as abort", scsi3addr);
6048 if (!dev->offload_enabled) {
6049 dev_warn(&h->pdev->dev,
6050 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
6051 return -1; /* not abortable */
6054 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
6055 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
6056 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
6057 return -1; /* not abortable */
6060 /* send the reset */
6061 if (h->raid_offload_debug > 0)
6062 dev_info(&h->pdev->dev,
6063 "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
6065 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
6067 dev_warn(&h->pdev->dev,
6068 "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
6070 return rc; /* failed to reset */
6073 /* wait for device to recover */
6074 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
6075 dev_warn(&h->pdev->dev,
6076 "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
6078 return -1; /* failed to recover */
6081 /* device recovered */
6082 dev_info(&h->pdev->dev,
6083 "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
6086 return rc; /* success */
6089 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
6090 struct CommandList *abort, int reply_queue)
6093 struct CommandList *c;
6094 __le32 taglower, tagupper;
6095 struct hpsa_scsi_dev_t *dev;
6096 struct io_accel2_cmd *c2;
6098 dev = abort->scsi_cmd->device->hostdata;
6102 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
6106 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
6107 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
6108 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
6109 hpsa_get_tag(h, abort, &taglower, &tagupper);
6110 dev_dbg(&h->pdev->dev,
6111 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
6112 __func__, tagupper, taglower);
6113 /* no unmap needed here because no data xfer. */
6115 dev_dbg(&h->pdev->dev,
6116 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
6117 __func__, tagupper, taglower, c2->error_data.serv_response);
6118 switch (c2->error_data.serv_response) {
6119 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
6120 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
6123 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
6124 case IOACCEL2_SERV_RESPONSE_FAILURE:
6125 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
6129 dev_warn(&h->pdev->dev,
6130 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
6131 __func__, tagupper, taglower,
6132 c2->error_data.serv_response);
6136 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
6137 tagupper, taglower);
6141 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
6142 struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
6145 * ioccelerator mode 2 commands should be aborted via the
6146 * accelerated path, since RAID path is unaware of these commands,
6147 * but not all underlying firmware can handle abort TMF.
6148 * Change abort to physical device reset when abort TMF is unsupported.
6150 if (abort->cmd_type == CMD_IOACCEL2) {
6151 if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
6152 dev->physical_device)
6153 return hpsa_send_abort_ioaccel2(h, abort,
6156 return hpsa_send_reset_as_abort_ioaccel2(h,
6158 abort, reply_queue);
6160 return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
6163 /* Find out which reply queue a command was meant to return on */
6164 static int hpsa_extract_reply_queue(struct ctlr_info *h,
6165 struct CommandList *c)
6167 if (c->cmd_type == CMD_IOACCEL2)
6168 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
6169 return c->Header.ReplyQueue;
6173 * Limit concurrency of abort commands to prevent
6174 * over-subscription of commands
6176 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
6178 #define ABORT_CMD_WAIT_MSECS 5000
6179 return !wait_event_timeout(h->abort_cmd_wait_queue,
6180 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
6181 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
6184 /* Send an abort for the specified command.
6185 * If the device and controller support it,
6186 * send a task abort request.
6188 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
6192 struct ctlr_info *h;
6193 struct hpsa_scsi_dev_t *dev;
6194 struct CommandList *abort; /* pointer to command to be aborted */
6195 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
6196 char msg[256]; /* For debug messaging. */
6198 __le32 tagupper, taglower;
6199 int refcount, reply_queue;
6204 if (sc->device == NULL)
6207 /* Find the controller of the command to be aborted */
6208 h = sdev_to_hba(sc->device);
6212 /* Find the device of the command to be aborted */
6213 dev = sc->device->hostdata;
6215 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
6220 /* If controller locked up, we can guarantee command won't complete */
6221 if (lockup_detected(h)) {
6222 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6223 "ABORT FAILED, lockup detected");
6227 /* This is a good time to check if controller lockup has occurred */
6228 if (detect_controller_lockup(h)) {
6229 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6230 "ABORT FAILED, new lockup detected");
6234 /* Check that controller supports some kind of task abort */
6235 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
6236 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6239 memset(msg, 0, sizeof(msg));
6240 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
6241 h->scsi_host->host_no, sc->device->channel,
6242 sc->device->id, sc->device->lun,
6243 "Aborting command", sc);
6245 /* Get SCSI command to be aborted */
6246 abort = (struct CommandList *) sc->host_scribble;
6247 if (abort == NULL) {
6248 /* This can happen if the command already completed. */
6251 refcount = atomic_inc_return(&abort->refcount);
6252 if (refcount == 1) { /* Command is done already. */
6257 /* Don't bother trying the abort if we know it won't work. */
6258 if (abort->cmd_type != CMD_IOACCEL2 &&
6259 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
6265 * Check that we're aborting the right command.
6266 * It's possible the CommandList already completed and got re-used.
6268 if (abort->scsi_cmd != sc) {
6273 abort->abort_pending = true;
6274 hpsa_get_tag(h, abort, &taglower, &tagupper);
6275 reply_queue = hpsa_extract_reply_queue(h, abort);
6276 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
6277 as = abort->scsi_cmd;
6279 ml += sprintf(msg+ml,
6280 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
6281 as->cmd_len, as->cmnd[0], as->cmnd[1],
6283 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
6284 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
6287 * Command is in flight, or possibly already completed
6288 * by the firmware (but not to the scsi mid layer) but we can't
6289 * distinguish which. Send the abort down.
6291 if (wait_for_available_abort_cmd(h)) {
6292 dev_warn(&h->pdev->dev,
6293 "%s FAILED, timeout waiting for an abort command to become available.\n",
6298 rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
6299 atomic_inc(&h->abort_cmds_available);
6300 wake_up_all(&h->abort_cmd_wait_queue);
6302 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
6303 hpsa_show_dev_msg(KERN_WARNING, h, dev,
6304 "FAILED to abort command");
6308 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
6309 wait_event(h->event_sync_wait_queue,
6310 abort->scsi_cmd != sc || lockup_detected(h));
6312 return !lockup_detected(h) ? SUCCESS : FAILED;
6316 * For operations with an associated SCSI command, a command block is allocated
6317 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6318 * block request tag as an index into a table of entries. cmd_tagged_free() is
6319 * the complement, although cmd_free() may be called instead.
6321 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6322 struct scsi_cmnd *scmd)
6324 int idx = hpsa_get_cmd_index(scmd);
6325 struct CommandList *c = h->cmd_pool + idx;
6327 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6328 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6329 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6330 /* The index value comes from the block layer, so if it's out of
6331 * bounds, it's probably not our bug.
6336 atomic_inc(&c->refcount);
6337 if (unlikely(!hpsa_is_cmd_idle(c))) {
6339 * We expect that the SCSI layer will hand us a unique tag
6340 * value. Thus, there should never be a collision here between
6341 * two requests...because if the selected command isn't idle
6342 * then someone is going to be very disappointed.
6344 dev_err(&h->pdev->dev,
6345 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6347 if (c->scsi_cmd != NULL)
6348 scsi_print_command(c->scsi_cmd);
6349 scsi_print_command(scmd);
6352 hpsa_cmd_partial_init(h, idx, c);
6356 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6359 * Release our reference to the block. We don't need to do anything
6360 * else to free it, because it is accessed by index. (There's no point
6361 * in checking the result of the decrement, since we cannot guarantee
6362 * that there isn't a concurrent abort which is also accessing it.)
6364 (void)atomic_dec(&c->refcount);
6368 * For operations that cannot sleep, a command block is allocated at init,
6369 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6370 * which ones are free or in use. Lock must be held when calling this.
6371 * cmd_free() is the complement.
6372 * This function never gives up and returns NULL. If it hangs,
6373 * another thread must call cmd_free() to free some tags.
6376 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6378 struct CommandList *c;
6383 * There is some *extremely* small but non-zero chance that that
6384 * multiple threads could get in here, and one thread could
6385 * be scanning through the list of bits looking for a free
6386 * one, but the free ones are always behind him, and other
6387 * threads sneak in behind him and eat them before he can
6388 * get to them, so that while there is always a free one, a
6389 * very unlucky thread might be starved anyway, never able to
6390 * beat the other threads. In reality, this happens so
6391 * infrequently as to be indistinguishable from never.
6393 * Note that we start allocating commands before the SCSI host structure
6394 * is initialized. Since the search starts at bit zero, this
6395 * all works, since we have at least one command structure available;
6396 * however, it means that the structures with the low indexes have to be
6397 * reserved for driver-initiated requests, while requests from the block
6398 * layer will use the higher indexes.
6402 i = find_next_zero_bit(h->cmd_pool_bits,
6403 HPSA_NRESERVED_CMDS,
6405 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6409 c = h->cmd_pool + i;
6410 refcount = atomic_inc_return(&c->refcount);
6411 if (unlikely(refcount > 1)) {
6412 cmd_free(h, c); /* already in use */
6413 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6416 set_bit(i & (BITS_PER_LONG - 1),
6417 h->cmd_pool_bits + (i / BITS_PER_LONG));
6418 break; /* it's ours now. */
6420 hpsa_cmd_partial_init(h, i, c);
6425 * This is the complementary operation to cmd_alloc(). Note, however, in some
6426 * corner cases it may also be used to free blocks allocated by
6427 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6428 * the clear-bit is harmless.
6430 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6432 if (atomic_dec_and_test(&c->refcount)) {
6435 i = c - h->cmd_pool;
6436 clear_bit(i & (BITS_PER_LONG - 1),
6437 h->cmd_pool_bits + (i / BITS_PER_LONG));
6441 #ifdef CONFIG_COMPAT
6443 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6446 IOCTL32_Command_struct __user *arg32 =
6447 (IOCTL32_Command_struct __user *) arg;
6448 IOCTL_Command_struct arg64;
6449 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6453 memset(&arg64, 0, sizeof(arg64));
6455 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6456 sizeof(arg64.LUN_info));
6457 err |= copy_from_user(&arg64.Request, &arg32->Request,
6458 sizeof(arg64.Request));
6459 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6460 sizeof(arg64.error_info));
6461 err |= get_user(arg64.buf_size, &arg32->buf_size);
6462 err |= get_user(cp, &arg32->buf);
6463 arg64.buf = compat_ptr(cp);
6464 err |= copy_to_user(p, &arg64, sizeof(arg64));
6469 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6472 err |= copy_in_user(&arg32->error_info, &p->error_info,
6473 sizeof(arg32->error_info));
6479 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6480 int cmd, void __user *arg)
6482 BIG_IOCTL32_Command_struct __user *arg32 =
6483 (BIG_IOCTL32_Command_struct __user *) arg;
6484 BIG_IOCTL_Command_struct arg64;
6485 BIG_IOCTL_Command_struct __user *p =
6486 compat_alloc_user_space(sizeof(arg64));
6490 memset(&arg64, 0, sizeof(arg64));
6492 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6493 sizeof(arg64.LUN_info));
6494 err |= copy_from_user(&arg64.Request, &arg32->Request,
6495 sizeof(arg64.Request));
6496 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6497 sizeof(arg64.error_info));
6498 err |= get_user(arg64.buf_size, &arg32->buf_size);
6499 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6500 err |= get_user(cp, &arg32->buf);
6501 arg64.buf = compat_ptr(cp);
6502 err |= copy_to_user(p, &arg64, sizeof(arg64));
6507 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6510 err |= copy_in_user(&arg32->error_info, &p->error_info,
6511 sizeof(arg32->error_info));
6517 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6520 case CCISS_GETPCIINFO:
6521 case CCISS_GETINTINFO:
6522 case CCISS_SETINTINFO:
6523 case CCISS_GETNODENAME:
6524 case CCISS_SETNODENAME:
6525 case CCISS_GETHEARTBEAT:
6526 case CCISS_GETBUSTYPES:
6527 case CCISS_GETFIRMVER:
6528 case CCISS_GETDRIVVER:
6529 case CCISS_REVALIDVOLS:
6530 case CCISS_DEREGDISK:
6531 case CCISS_REGNEWDISK:
6533 case CCISS_RESCANDISK:
6534 case CCISS_GETLUNINFO:
6535 return hpsa_ioctl(dev, cmd, arg);
6537 case CCISS_PASSTHRU32:
6538 return hpsa_ioctl32_passthru(dev, cmd, arg);
6539 case CCISS_BIG_PASSTHRU32:
6540 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6543 return -ENOIOCTLCMD;
6548 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6550 struct hpsa_pci_info pciinfo;
6554 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6555 pciinfo.bus = h->pdev->bus->number;
6556 pciinfo.dev_fn = h->pdev->devfn;
6557 pciinfo.board_id = h->board_id;
6558 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6563 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6565 DriverVer_type DriverVer;
6566 unsigned char vmaj, vmin, vsubmin;
6569 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6570 &vmaj, &vmin, &vsubmin);
6572 dev_info(&h->pdev->dev, "driver version string '%s' "
6573 "unrecognized.", HPSA_DRIVER_VERSION);
6578 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6581 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6586 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6588 IOCTL_Command_struct iocommand;
6589 struct CommandList *c;
6596 if (!capable(CAP_SYS_RAWIO))
6598 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6600 if ((iocommand.buf_size < 1) &&
6601 (iocommand.Request.Type.Direction != XFER_NONE)) {
6604 if (iocommand.buf_size > 0) {
6605 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6608 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6609 /* Copy the data into the buffer we created */
6610 if (copy_from_user(buff, iocommand.buf,
6611 iocommand.buf_size)) {
6616 memset(buff, 0, iocommand.buf_size);
6621 /* Fill in the command type */
6622 c->cmd_type = CMD_IOCTL_PEND;
6623 c->scsi_cmd = SCSI_CMD_BUSY;
6624 /* Fill in Command Header */
6625 c->Header.ReplyQueue = 0; /* unused in simple mode */
6626 if (iocommand.buf_size > 0) { /* buffer to fill */
6627 c->Header.SGList = 1;
6628 c->Header.SGTotal = cpu_to_le16(1);
6629 } else { /* no buffers to fill */
6630 c->Header.SGList = 0;
6631 c->Header.SGTotal = cpu_to_le16(0);
6633 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6635 /* Fill in Request block */
6636 memcpy(&c->Request, &iocommand.Request,
6637 sizeof(c->Request));
6639 /* Fill in the scatter gather information */
6640 if (iocommand.buf_size > 0) {
6641 temp64 = pci_map_single(h->pdev, buff,
6642 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6643 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6644 c->SG[0].Addr = cpu_to_le64(0);
6645 c->SG[0].Len = cpu_to_le32(0);
6649 c->SG[0].Addr = cpu_to_le64(temp64);
6650 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6651 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6653 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6655 if (iocommand.buf_size > 0)
6656 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6657 check_ioctl_unit_attention(h, c);
6663 /* Copy the error information out */
6664 memcpy(&iocommand.error_info, c->err_info,
6665 sizeof(iocommand.error_info));
6666 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6670 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6671 iocommand.buf_size > 0) {
6672 /* Copy the data out of the buffer we created */
6673 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6685 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6687 BIG_IOCTL_Command_struct *ioc;
6688 struct CommandList *c;
6689 unsigned char **buff = NULL;
6690 int *buff_size = NULL;
6696 BYTE __user *data_ptr;
6700 if (!capable(CAP_SYS_RAWIO))
6702 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6707 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6711 if ((ioc->buf_size < 1) &&
6712 (ioc->Request.Type.Direction != XFER_NONE)) {
6716 /* Check kmalloc limits using all SGs */
6717 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6721 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6725 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6730 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6735 left = ioc->buf_size;
6736 data_ptr = ioc->buf;
6738 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6739 buff_size[sg_used] = sz;
6740 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6741 if (buff[sg_used] == NULL) {
6745 if (ioc->Request.Type.Direction & XFER_WRITE) {
6746 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6751 memset(buff[sg_used], 0, sz);
6758 c->cmd_type = CMD_IOCTL_PEND;
6759 c->scsi_cmd = SCSI_CMD_BUSY;
6760 c->Header.ReplyQueue = 0;
6761 c->Header.SGList = (u8) sg_used;
6762 c->Header.SGTotal = cpu_to_le16(sg_used);
6763 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6764 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6765 if (ioc->buf_size > 0) {
6767 for (i = 0; i < sg_used; i++) {
6768 temp64 = pci_map_single(h->pdev, buff[i],
6769 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6770 if (dma_mapping_error(&h->pdev->dev,
6771 (dma_addr_t) temp64)) {
6772 c->SG[i].Addr = cpu_to_le64(0);
6773 c->SG[i].Len = cpu_to_le32(0);
6774 hpsa_pci_unmap(h->pdev, c, i,
6775 PCI_DMA_BIDIRECTIONAL);
6779 c->SG[i].Addr = cpu_to_le64(temp64);
6780 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6781 c->SG[i].Ext = cpu_to_le32(0);
6783 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6785 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6788 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6789 check_ioctl_unit_attention(h, c);
6795 /* Copy the error information out */
6796 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6797 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6801 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6804 /* Copy the data out of the buffer we created */
6805 BYTE __user *ptr = ioc->buf;
6806 for (i = 0; i < sg_used; i++) {
6807 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6811 ptr += buff_size[i];
6821 for (i = 0; i < sg_used; i++)
6830 static void check_ioctl_unit_attention(struct ctlr_info *h,
6831 struct CommandList *c)
6833 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6834 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6835 (void) check_for_unit_attention(h, c);
6841 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6843 struct ctlr_info *h;
6844 void __user *argp = (void __user *)arg;
6847 h = sdev_to_hba(dev);
6850 case CCISS_DEREGDISK:
6851 case CCISS_REGNEWDISK:
6853 hpsa_scan_start(h->scsi_host);
6855 case CCISS_GETPCIINFO:
6856 return hpsa_getpciinfo_ioctl(h, argp);
6857 case CCISS_GETDRIVVER:
6858 return hpsa_getdrivver_ioctl(h, argp);
6859 case CCISS_PASSTHRU:
6860 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6862 rc = hpsa_passthru_ioctl(h, argp);
6863 atomic_inc(&h->passthru_cmds_avail);
6865 case CCISS_BIG_PASSTHRU:
6866 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6868 rc = hpsa_big_passthru_ioctl(h, argp);
6869 atomic_inc(&h->passthru_cmds_avail);
6876 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6879 struct CommandList *c;
6883 /* fill_cmd can't fail here, no data buffer to map */
6884 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6885 RAID_CTLR_LUNID, TYPE_MSG);
6886 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6888 enqueue_cmd_and_start_io(h, c);
6889 /* Don't wait for completion, the reset won't complete. Don't free
6890 * the command either. This is the last command we will send before
6891 * re-initializing everything, so it doesn't matter and won't leak.
6896 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6897 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6900 int pci_dir = XFER_NONE;
6901 u64 tag; /* for commands to be aborted */
6903 c->cmd_type = CMD_IOCTL_PEND;
6904 c->scsi_cmd = SCSI_CMD_BUSY;
6905 c->Header.ReplyQueue = 0;
6906 if (buff != NULL && size > 0) {
6907 c->Header.SGList = 1;
6908 c->Header.SGTotal = cpu_to_le16(1);
6910 c->Header.SGList = 0;
6911 c->Header.SGTotal = cpu_to_le16(0);
6913 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6915 if (cmd_type == TYPE_CMD) {
6918 /* are we trying to read a vital product page */
6919 if (page_code & VPD_PAGE) {
6920 c->Request.CDB[1] = 0x01;
6921 c->Request.CDB[2] = (page_code & 0xff);
6923 c->Request.CDBLen = 6;
6924 c->Request.type_attr_dir =
6925 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6926 c->Request.Timeout = 0;
6927 c->Request.CDB[0] = HPSA_INQUIRY;
6928 c->Request.CDB[4] = size & 0xFF;
6930 case HPSA_REPORT_LOG:
6931 case HPSA_REPORT_PHYS:
6932 /* Talking to controller so It's a physical command
6933 mode = 00 target = 0. Nothing to write.
6935 c->Request.CDBLen = 12;
6936 c->Request.type_attr_dir =
6937 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6938 c->Request.Timeout = 0;
6939 c->Request.CDB[0] = cmd;
6940 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6941 c->Request.CDB[7] = (size >> 16) & 0xFF;
6942 c->Request.CDB[8] = (size >> 8) & 0xFF;
6943 c->Request.CDB[9] = size & 0xFF;
6945 case BMIC_SENSE_DIAG_OPTIONS:
6946 c->Request.CDBLen = 16;
6947 c->Request.type_attr_dir =
6948 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6949 c->Request.Timeout = 0;
6950 /* Spec says this should be BMIC_WRITE */
6951 c->Request.CDB[0] = BMIC_READ;
6952 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6954 case BMIC_SET_DIAG_OPTIONS:
6955 c->Request.CDBLen = 16;
6956 c->Request.type_attr_dir =
6957 TYPE_ATTR_DIR(cmd_type,
6958 ATTR_SIMPLE, XFER_WRITE);
6959 c->Request.Timeout = 0;
6960 c->Request.CDB[0] = BMIC_WRITE;
6961 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6963 case HPSA_CACHE_FLUSH:
6964 c->Request.CDBLen = 12;
6965 c->Request.type_attr_dir =
6966 TYPE_ATTR_DIR(cmd_type,
6967 ATTR_SIMPLE, XFER_WRITE);
6968 c->Request.Timeout = 0;
6969 c->Request.CDB[0] = BMIC_WRITE;
6970 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6971 c->Request.CDB[7] = (size >> 8) & 0xFF;
6972 c->Request.CDB[8] = size & 0xFF;
6974 case TEST_UNIT_READY:
6975 c->Request.CDBLen = 6;
6976 c->Request.type_attr_dir =
6977 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6978 c->Request.Timeout = 0;
6980 case HPSA_GET_RAID_MAP:
6981 c->Request.CDBLen = 12;
6982 c->Request.type_attr_dir =
6983 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6984 c->Request.Timeout = 0;
6985 c->Request.CDB[0] = HPSA_CISS_READ;
6986 c->Request.CDB[1] = cmd;
6987 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6988 c->Request.CDB[7] = (size >> 16) & 0xFF;
6989 c->Request.CDB[8] = (size >> 8) & 0xFF;
6990 c->Request.CDB[9] = size & 0xFF;
6992 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6993 c->Request.CDBLen = 10;
6994 c->Request.type_attr_dir =
6995 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6996 c->Request.Timeout = 0;
6997 c->Request.CDB[0] = BMIC_READ;
6998 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6999 c->Request.CDB[7] = (size >> 16) & 0xFF;
7000 c->Request.CDB[8] = (size >> 8) & 0xFF;
7002 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
7003 c->Request.CDBLen = 10;
7004 c->Request.type_attr_dir =
7005 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7006 c->Request.Timeout = 0;
7007 c->Request.CDB[0] = BMIC_READ;
7008 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
7009 c->Request.CDB[7] = (size >> 16) & 0xFF;
7010 c->Request.CDB[8] = (size >> 8) & 0XFF;
7012 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
7013 c->Request.CDBLen = 10;
7014 c->Request.type_attr_dir =
7015 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7016 c->Request.Timeout = 0;
7017 c->Request.CDB[0] = BMIC_READ;
7018 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
7019 c->Request.CDB[7] = (size >> 16) & 0xFF;
7020 c->Request.CDB[8] = (size >> 8) & 0XFF;
7022 case BMIC_SENSE_STORAGE_BOX_PARAMS:
7023 c->Request.CDBLen = 10;
7024 c->Request.type_attr_dir =
7025 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7026 c->Request.Timeout = 0;
7027 c->Request.CDB[0] = BMIC_READ;
7028 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
7029 c->Request.CDB[7] = (size >> 16) & 0xFF;
7030 c->Request.CDB[8] = (size >> 8) & 0XFF;
7032 case BMIC_IDENTIFY_CONTROLLER:
7033 c->Request.CDBLen = 10;
7034 c->Request.type_attr_dir =
7035 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7036 c->Request.Timeout = 0;
7037 c->Request.CDB[0] = BMIC_READ;
7038 c->Request.CDB[1] = 0;
7039 c->Request.CDB[2] = 0;
7040 c->Request.CDB[3] = 0;
7041 c->Request.CDB[4] = 0;
7042 c->Request.CDB[5] = 0;
7043 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
7044 c->Request.CDB[7] = (size >> 16) & 0xFF;
7045 c->Request.CDB[8] = (size >> 8) & 0XFF;
7046 c->Request.CDB[9] = 0;
7049 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
7053 } else if (cmd_type == TYPE_MSG) {
7056 case HPSA_PHYS_TARGET_RESET:
7057 c->Request.CDBLen = 16;
7058 c->Request.type_attr_dir =
7059 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7060 c->Request.Timeout = 0; /* Don't time out */
7061 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7062 c->Request.CDB[0] = HPSA_RESET;
7063 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
7064 /* Physical target reset needs no control bytes 4-7*/
7065 c->Request.CDB[4] = 0x00;
7066 c->Request.CDB[5] = 0x00;
7067 c->Request.CDB[6] = 0x00;
7068 c->Request.CDB[7] = 0x00;
7070 case HPSA_DEVICE_RESET_MSG:
7071 c->Request.CDBLen = 16;
7072 c->Request.type_attr_dir =
7073 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7074 c->Request.Timeout = 0; /* Don't time out */
7075 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7076 c->Request.CDB[0] = cmd;
7077 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
7078 /* If bytes 4-7 are zero, it means reset the */
7080 c->Request.CDB[4] = 0x00;
7081 c->Request.CDB[5] = 0x00;
7082 c->Request.CDB[6] = 0x00;
7083 c->Request.CDB[7] = 0x00;
7085 case HPSA_ABORT_MSG:
7086 memcpy(&tag, buff, sizeof(tag));
7087 dev_dbg(&h->pdev->dev,
7088 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
7089 tag, c->Header.tag);
7090 c->Request.CDBLen = 16;
7091 c->Request.type_attr_dir =
7092 TYPE_ATTR_DIR(cmd_type,
7093 ATTR_SIMPLE, XFER_WRITE);
7094 c->Request.Timeout = 0; /* Don't time out */
7095 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
7096 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
7097 c->Request.CDB[2] = 0x00; /* reserved */
7098 c->Request.CDB[3] = 0x00; /* reserved */
7099 /* Tag to abort goes in CDB[4]-CDB[11] */
7100 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
7101 c->Request.CDB[12] = 0x00; /* reserved */
7102 c->Request.CDB[13] = 0x00; /* reserved */
7103 c->Request.CDB[14] = 0x00; /* reserved */
7104 c->Request.CDB[15] = 0x00; /* reserved */
7107 dev_warn(&h->pdev->dev, "unknown message type %d\n",
7112 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
7116 switch (GET_DIR(c->Request.type_attr_dir)) {
7118 pci_dir = PCI_DMA_FROMDEVICE;
7121 pci_dir = PCI_DMA_TODEVICE;
7124 pci_dir = PCI_DMA_NONE;
7127 pci_dir = PCI_DMA_BIDIRECTIONAL;
7129 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
7135 * Map (physical) PCI mem into (virtual) kernel space
7137 static void __iomem *remap_pci_mem(ulong base, ulong size)
7139 ulong page_base = ((ulong) base) & PAGE_MASK;
7140 ulong page_offs = ((ulong) base) - page_base;
7141 void __iomem *page_remapped = ioremap_nocache(page_base,
7144 return page_remapped ? (page_remapped + page_offs) : NULL;
7147 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
7149 return h->access.command_completed(h, q);
7152 static inline bool interrupt_pending(struct ctlr_info *h)
7154 return h->access.intr_pending(h);
7157 static inline long interrupt_not_for_us(struct ctlr_info *h)
7159 return (h->access.intr_pending(h) == 0) ||
7160 (h->interrupts_enabled == 0);
7163 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
7166 if (unlikely(tag_index >= h->nr_cmds)) {
7167 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
7173 static inline void finish_cmd(struct CommandList *c)
7175 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
7176 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
7177 || c->cmd_type == CMD_IOACCEL2))
7178 complete_scsi_command(c);
7179 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
7180 complete(c->waiting);
7183 /* process completion of an indexed ("direct lookup") command */
7184 static inline void process_indexed_cmd(struct ctlr_info *h,
7188 struct CommandList *c;
7190 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
7191 if (!bad_tag(h, tag_index, raw_tag)) {
7192 c = h->cmd_pool + tag_index;
7197 /* Some controllers, like p400, will give us one interrupt
7198 * after a soft reset, even if we turned interrupts off.
7199 * Only need to check for this in the hpsa_xxx_discard_completions
7202 static int ignore_bogus_interrupt(struct ctlr_info *h)
7204 if (likely(!reset_devices))
7207 if (likely(h->interrupts_enabled))
7210 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7211 "(known firmware bug.) Ignoring.\n");
7217 * Convert &h->q[x] (passed to interrupt handlers) back to h.
7218 * Relies on (h-q[x] == x) being true for x such that
7219 * 0 <= x < MAX_REPLY_QUEUES.
7221 static struct ctlr_info *queue_to_hba(u8 *queue)
7223 return container_of((queue - *queue), struct ctlr_info, q[0]);
7226 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7228 struct ctlr_info *h = queue_to_hba(queue);
7229 u8 q = *(u8 *) queue;
7232 if (ignore_bogus_interrupt(h))
7235 if (interrupt_not_for_us(h))
7237 h->last_intr_timestamp = get_jiffies_64();
7238 while (interrupt_pending(h)) {
7239 raw_tag = get_next_completion(h, q);
7240 while (raw_tag != FIFO_EMPTY)
7241 raw_tag = next_command(h, q);
7246 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7248 struct ctlr_info *h = queue_to_hba(queue);
7250 u8 q = *(u8 *) queue;
7252 if (ignore_bogus_interrupt(h))
7255 h->last_intr_timestamp = get_jiffies_64();
7256 raw_tag = get_next_completion(h, q);
7257 while (raw_tag != FIFO_EMPTY)
7258 raw_tag = next_command(h, q);
7262 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7264 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7266 u8 q = *(u8 *) queue;
7268 if (interrupt_not_for_us(h))
7270 h->last_intr_timestamp = get_jiffies_64();
7271 while (interrupt_pending(h)) {
7272 raw_tag = get_next_completion(h, q);
7273 while (raw_tag != FIFO_EMPTY) {
7274 process_indexed_cmd(h, raw_tag);
7275 raw_tag = next_command(h, q);
7281 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7283 struct ctlr_info *h = queue_to_hba(queue);
7285 u8 q = *(u8 *) queue;
7287 h->last_intr_timestamp = get_jiffies_64();
7288 raw_tag = get_next_completion(h, q);
7289 while (raw_tag != FIFO_EMPTY) {
7290 process_indexed_cmd(h, raw_tag);
7291 raw_tag = next_command(h, q);
7296 /* Send a message CDB to the firmware. Careful, this only works
7297 * in simple mode, not performant mode due to the tag lookup.
7298 * We only ever use this immediately after a controller reset.
7300 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7304 struct CommandListHeader CommandHeader;
7305 struct RequestBlock Request;
7306 struct ErrDescriptor ErrorDescriptor;
7308 struct Command *cmd;
7309 static const size_t cmd_sz = sizeof(*cmd) +
7310 sizeof(cmd->ErrorDescriptor);
7314 void __iomem *vaddr;
7317 vaddr = pci_ioremap_bar(pdev, 0);
7321 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7322 * CCISS commands, so they must be allocated from the lower 4GiB of
7325 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7331 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7337 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7338 * although there's no guarantee, we assume that the address is at
7339 * least 4-byte aligned (most likely, it's page-aligned).
7341 paddr32 = cpu_to_le32(paddr64);
7343 cmd->CommandHeader.ReplyQueue = 0;
7344 cmd->CommandHeader.SGList = 0;
7345 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7346 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7347 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7349 cmd->Request.CDBLen = 16;
7350 cmd->Request.type_attr_dir =
7351 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7352 cmd->Request.Timeout = 0; /* Don't time out */
7353 cmd->Request.CDB[0] = opcode;
7354 cmd->Request.CDB[1] = type;
7355 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7356 cmd->ErrorDescriptor.Addr =
7357 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7358 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7360 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7362 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7363 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7364 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7366 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7371 /* we leak the DMA buffer here ... no choice since the controller could
7372 * still complete the command.
7374 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7375 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7380 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7382 if (tag & HPSA_ERROR_BIT) {
7383 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7388 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7393 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7395 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7396 void __iomem *vaddr, u32 use_doorbell)
7400 /* For everything after the P600, the PCI power state method
7401 * of resetting the controller doesn't work, so we have this
7402 * other way using the doorbell register.
7404 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7405 writel(use_doorbell, vaddr + SA5_DOORBELL);
7407 /* PMC hardware guys tell us we need a 10 second delay after
7408 * doorbell reset and before any attempt to talk to the board
7409 * at all to ensure that this actually works and doesn't fall
7410 * over in some weird corner cases.
7413 } else { /* Try to do it the PCI power state way */
7415 /* Quoting from the Open CISS Specification: "The Power
7416 * Management Control/Status Register (CSR) controls the power
7417 * state of the device. The normal operating state is D0,
7418 * CSR=00h. The software off state is D3, CSR=03h. To reset
7419 * the controller, place the interface device in D3 then to D0,
7420 * this causes a secondary PCI reset which will reset the
7425 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7427 /* enter the D3hot power management state */
7428 rc = pci_set_power_state(pdev, PCI_D3hot);
7434 /* enter the D0 power management state */
7435 rc = pci_set_power_state(pdev, PCI_D0);
7440 * The P600 requires a small delay when changing states.
7441 * Otherwise we may think the board did not reset and we bail.
7442 * This for kdump only and is particular to the P600.
7449 static void init_driver_version(char *driver_version, int len)
7451 memset(driver_version, 0, len);
7452 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7455 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7457 char *driver_version;
7458 int i, size = sizeof(cfgtable->driver_version);
7460 driver_version = kmalloc(size, GFP_KERNEL);
7461 if (!driver_version)
7464 init_driver_version(driver_version, size);
7465 for (i = 0; i < size; i++)
7466 writeb(driver_version[i], &cfgtable->driver_version[i]);
7467 kfree(driver_version);
7471 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7472 unsigned char *driver_ver)
7476 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7477 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7480 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7483 char *driver_ver, *old_driver_ver;
7484 int rc, size = sizeof(cfgtable->driver_version);
7486 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7487 if (!old_driver_ver)
7489 driver_ver = old_driver_ver + size;
7491 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7492 * should have been changed, otherwise we know the reset failed.
7494 init_driver_version(old_driver_ver, size);
7495 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7496 rc = !memcmp(driver_ver, old_driver_ver, size);
7497 kfree(old_driver_ver);
7500 /* This does a hard reset of the controller using PCI power management
7501 * states or the using the doorbell register.
7503 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7507 u64 cfg_base_addr_index;
7508 void __iomem *vaddr;
7509 unsigned long paddr;
7510 u32 misc_fw_support;
7512 struct CfgTable __iomem *cfgtable;
7514 u16 command_register;
7516 /* For controllers as old as the P600, this is very nearly
7519 * pci_save_state(pci_dev);
7520 * pci_set_power_state(pci_dev, PCI_D3hot);
7521 * pci_set_power_state(pci_dev, PCI_D0);
7522 * pci_restore_state(pci_dev);
7524 * For controllers newer than the P600, the pci power state
7525 * method of resetting doesn't work so we have another way
7526 * using the doorbell register.
7529 if (!ctlr_is_resettable(board_id)) {
7530 dev_warn(&pdev->dev, "Controller not resettable\n");
7534 /* if controller is soft- but not hard resettable... */
7535 if (!ctlr_is_hard_resettable(board_id))
7536 return -ENOTSUPP; /* try soft reset later. */
7538 /* Save the PCI command register */
7539 pci_read_config_word(pdev, 4, &command_register);
7540 pci_save_state(pdev);
7542 /* find the first memory BAR, so we can find the cfg table */
7543 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7546 vaddr = remap_pci_mem(paddr, 0x250);
7550 /* find cfgtable in order to check if reset via doorbell is supported */
7551 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7552 &cfg_base_addr_index, &cfg_offset);
7555 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7556 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7561 rc = write_driver_ver_to_cfgtable(cfgtable);
7563 goto unmap_cfgtable;
7565 /* If reset via doorbell register is supported, use that.
7566 * There are two such methods. Favor the newest method.
7568 misc_fw_support = readl(&cfgtable->misc_fw_support);
7569 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7571 use_doorbell = DOORBELL_CTLR_RESET2;
7573 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7575 dev_warn(&pdev->dev,
7576 "Soft reset not supported. Firmware update is required.\n");
7577 rc = -ENOTSUPP; /* try soft reset */
7578 goto unmap_cfgtable;
7582 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7584 goto unmap_cfgtable;
7586 pci_restore_state(pdev);
7587 pci_write_config_word(pdev, 4, command_register);
7589 /* Some devices (notably the HP Smart Array 5i Controller)
7590 need a little pause here */
7591 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7593 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7595 dev_warn(&pdev->dev,
7596 "Failed waiting for board to become ready after hard reset\n");
7597 goto unmap_cfgtable;
7600 rc = controller_reset_failed(vaddr);
7602 goto unmap_cfgtable;
7604 dev_warn(&pdev->dev, "Unable to successfully reset "
7605 "controller. Will try soft reset.\n");
7608 dev_info(&pdev->dev, "board ready after hard reset.\n");
7620 * We cannot read the structure directly, for portability we must use
7622 * This is for debug only.
7624 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7630 dev_info(dev, "Controller Configuration information\n");
7631 dev_info(dev, "------------------------------------\n");
7632 for (i = 0; i < 4; i++)
7633 temp_name[i] = readb(&(tb->Signature[i]));
7634 temp_name[4] = '\0';
7635 dev_info(dev, " Signature = %s\n", temp_name);
7636 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7637 dev_info(dev, " Transport methods supported = 0x%x\n",
7638 readl(&(tb->TransportSupport)));
7639 dev_info(dev, " Transport methods active = 0x%x\n",
7640 readl(&(tb->TransportActive)));
7641 dev_info(dev, " Requested transport Method = 0x%x\n",
7642 readl(&(tb->HostWrite.TransportRequest)));
7643 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7644 readl(&(tb->HostWrite.CoalIntDelay)));
7645 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7646 readl(&(tb->HostWrite.CoalIntCount)));
7647 dev_info(dev, " Max outstanding commands = %d\n",
7648 readl(&(tb->CmdsOutMax)));
7649 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7650 for (i = 0; i < 16; i++)
7651 temp_name[i] = readb(&(tb->ServerName[i]));
7652 temp_name[16] = '\0';
7653 dev_info(dev, " Server Name = %s\n", temp_name);
7654 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7655 readl(&(tb->HeartBeat)));
7656 #endif /* HPSA_DEBUG */
7659 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7661 int i, offset, mem_type, bar_type;
7663 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7666 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7667 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7668 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7671 mem_type = pci_resource_flags(pdev, i) &
7672 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7674 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7675 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7676 offset += 4; /* 32 bit */
7678 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7681 default: /* reserved in PCI 2.2 */
7682 dev_warn(&pdev->dev,
7683 "base address is invalid\n");
7688 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7694 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7696 pci_free_irq_vectors(h->pdev);
7697 h->msix_vectors = 0;
7700 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7701 * controllers that are capable. If not, we use legacy INTx mode.
7703 static int hpsa_interrupt_mode(struct ctlr_info *h)
7705 unsigned int flags = PCI_IRQ_LEGACY;
7708 /* Some boards advertise MSI but don't really support it */
7709 switch (h->board_id) {
7716 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7717 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7719 h->msix_vectors = ret;
7723 flags |= PCI_IRQ_MSI;
7727 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7733 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7736 u32 subsystem_vendor_id, subsystem_device_id;
7738 subsystem_vendor_id = pdev->subsystem_vendor;
7739 subsystem_device_id = pdev->subsystem_device;
7740 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7741 subsystem_vendor_id;
7743 for (i = 0; i < ARRAY_SIZE(products); i++)
7744 if (*board_id == products[i].board_id)
7747 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7748 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7750 dev_warn(&pdev->dev, "unrecognized board ID: "
7751 "0x%08x, ignoring.\n", *board_id);
7754 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7757 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7758 unsigned long *memory_bar)
7762 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7763 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7764 /* addressing mode bits already removed */
7765 *memory_bar = pci_resource_start(pdev, i);
7766 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7770 dev_warn(&pdev->dev, "no memory BAR found\n");
7774 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7780 iterations = HPSA_BOARD_READY_ITERATIONS;
7782 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7784 for (i = 0; i < iterations; i++) {
7785 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7786 if (wait_for_ready) {
7787 if (scratchpad == HPSA_FIRMWARE_READY)
7790 if (scratchpad != HPSA_FIRMWARE_READY)
7793 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7795 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7799 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7800 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7803 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7804 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7805 *cfg_base_addr &= (u32) 0x0000ffff;
7806 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7807 if (*cfg_base_addr_index == -1) {
7808 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7814 static void hpsa_free_cfgtables(struct ctlr_info *h)
7816 if (h->transtable) {
7817 iounmap(h->transtable);
7818 h->transtable = NULL;
7821 iounmap(h->cfgtable);
7826 /* Find and map CISS config table and transfer table
7827 + * several items must be unmapped (freed) later
7829 static int hpsa_find_cfgtables(struct ctlr_info *h)
7833 u64 cfg_base_addr_index;
7837 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7838 &cfg_base_addr_index, &cfg_offset);
7841 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7842 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7844 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7847 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7850 /* Find performant mode table. */
7851 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7852 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7853 cfg_base_addr_index)+cfg_offset+trans_offset,
7854 sizeof(*h->transtable));
7855 if (!h->transtable) {
7856 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7857 hpsa_free_cfgtables(h);
7863 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7865 #define MIN_MAX_COMMANDS 16
7866 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7868 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7870 /* Limit commands in memory limited kdump scenario. */
7871 if (reset_devices && h->max_commands > 32)
7872 h->max_commands = 32;
7874 if (h->max_commands < MIN_MAX_COMMANDS) {
7875 dev_warn(&h->pdev->dev,
7876 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7879 h->max_commands = MIN_MAX_COMMANDS;
7883 /* If the controller reports that the total max sg entries is greater than 512,
7884 * then we know that chained SG blocks work. (Original smart arrays did not
7885 * support chained SG blocks and would return zero for max sg entries.)
7887 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7889 return h->maxsgentries > 512;
7892 /* Interrogate the hardware for some limits:
7893 * max commands, max SG elements without chaining, and with chaining,
7894 * SG chain block size, etc.
7896 static void hpsa_find_board_params(struct ctlr_info *h)
7898 hpsa_get_max_perf_mode_cmds(h);
7899 h->nr_cmds = h->max_commands;
7900 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7901 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7902 if (hpsa_supports_chained_sg_blocks(h)) {
7903 /* Limit in-command s/g elements to 32 save dma'able memory. */
7904 h->max_cmd_sg_entries = 32;
7905 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7906 h->maxsgentries--; /* save one for chain pointer */
7909 * Original smart arrays supported at most 31 s/g entries
7910 * embedded inline in the command (trying to use more
7911 * would lock up the controller)
7913 h->max_cmd_sg_entries = 31;
7914 h->maxsgentries = 31; /* default to traditional values */
7918 /* Find out what task management functions are supported and cache */
7919 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7920 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7921 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7922 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7923 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7924 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7925 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7928 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7930 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7931 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7937 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7941 driver_support = readl(&(h->cfgtable->driver_support));
7942 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7944 driver_support |= ENABLE_SCSI_PREFETCH;
7946 driver_support |= ENABLE_UNIT_ATTN;
7947 writel(driver_support, &(h->cfgtable->driver_support));
7950 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7951 * in a prefetch beyond physical memory.
7953 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7957 if (h->board_id != 0x3225103C)
7959 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7960 dma_prefetch |= 0x8000;
7961 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7964 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7968 unsigned long flags;
7969 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7970 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7971 spin_lock_irqsave(&h->lock, flags);
7972 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7973 spin_unlock_irqrestore(&h->lock, flags);
7974 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7976 /* delay and try again */
7977 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7984 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7988 unsigned long flags;
7990 /* under certain very rare conditions, this can take awhile.
7991 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7992 * as we enter this code.)
7994 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7995 if (h->remove_in_progress)
7997 spin_lock_irqsave(&h->lock, flags);
7998 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7999 spin_unlock_irqrestore(&h->lock, flags);
8000 if (!(doorbell_value & CFGTBL_ChangeReq))
8002 /* delay and try again */
8003 msleep(MODE_CHANGE_WAIT_INTERVAL);
8010 /* return -ENODEV or other reason on error, 0 on success */
8011 static int hpsa_enter_simple_mode(struct ctlr_info *h)
8015 trans_support = readl(&(h->cfgtable->TransportSupport));
8016 if (!(trans_support & SIMPLE_MODE))
8019 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
8021 /* Update the field, and then ring the doorbell */
8022 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
8023 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8024 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8025 if (hpsa_wait_for_mode_change_ack(h))
8027 print_cfg_table(&h->pdev->dev, h->cfgtable);
8028 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
8030 h->transMethod = CFGTBL_Trans_Simple;
8033 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
8037 /* free items allocated or mapped by hpsa_pci_init */
8038 static void hpsa_free_pci_init(struct ctlr_info *h)
8040 hpsa_free_cfgtables(h); /* pci_init 4 */
8041 iounmap(h->vaddr); /* pci_init 3 */
8043 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8045 * call pci_disable_device before pci_release_regions per
8046 * Documentation/PCI/pci.txt
8048 pci_disable_device(h->pdev); /* pci_init 1 */
8049 pci_release_regions(h->pdev); /* pci_init 2 */
8052 /* several items must be freed later */
8053 static int hpsa_pci_init(struct ctlr_info *h)
8055 int prod_index, err;
8057 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
8060 h->product_name = products[prod_index].product_name;
8061 h->access = *(products[prod_index].access);
8063 h->needs_abort_tags_swizzled =
8064 ctlr_needs_abort_tags_swizzled(h->board_id);
8066 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
8067 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
8069 err = pci_enable_device(h->pdev);
8071 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
8072 pci_disable_device(h->pdev);
8076 err = pci_request_regions(h->pdev, HPSA);
8078 dev_err(&h->pdev->dev,
8079 "failed to obtain PCI resources\n");
8080 pci_disable_device(h->pdev);
8084 pci_set_master(h->pdev);
8086 err = hpsa_interrupt_mode(h);
8089 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
8091 goto clean2; /* intmode+region, pci */
8092 h->vaddr = remap_pci_mem(h->paddr, 0x250);
8094 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
8096 goto clean2; /* intmode+region, pci */
8098 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8100 goto clean3; /* vaddr, intmode+region, pci */
8101 err = hpsa_find_cfgtables(h);
8103 goto clean3; /* vaddr, intmode+region, pci */
8104 hpsa_find_board_params(h);
8106 if (!hpsa_CISS_signature_present(h)) {
8108 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
8110 hpsa_set_driver_support_bits(h);
8111 hpsa_p600_dma_prefetch_quirk(h);
8112 err = hpsa_enter_simple_mode(h);
8114 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
8117 clean4: /* cfgtables, vaddr, intmode+region, pci */
8118 hpsa_free_cfgtables(h);
8119 clean3: /* vaddr, intmode+region, pci */
8122 clean2: /* intmode+region, pci */
8123 hpsa_disable_interrupt_mode(h);
8126 * call pci_disable_device before pci_release_regions per
8127 * Documentation/PCI/pci.txt
8129 pci_disable_device(h->pdev);
8130 pci_release_regions(h->pdev);
8134 static void hpsa_hba_inquiry(struct ctlr_info *h)
8138 #define HBA_INQUIRY_BYTE_COUNT 64
8139 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
8140 if (!h->hba_inquiry_data)
8142 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
8143 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
8145 kfree(h->hba_inquiry_data);
8146 h->hba_inquiry_data = NULL;
8150 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
8153 void __iomem *vaddr;
8158 /* kdump kernel is loading, we don't know in which state is
8159 * the pci interface. The dev->enable_cnt is equal zero
8160 * so we call enable+disable, wait a while and switch it on.
8162 rc = pci_enable_device(pdev);
8164 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
8167 pci_disable_device(pdev);
8168 msleep(260); /* a randomly chosen number */
8169 rc = pci_enable_device(pdev);
8171 dev_warn(&pdev->dev, "failed to enable device.\n");
8175 pci_set_master(pdev);
8177 vaddr = pci_ioremap_bar(pdev, 0);
8178 if (vaddr == NULL) {
8182 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8185 /* Reset the controller with a PCI power-cycle or via doorbell */
8186 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8188 /* -ENOTSUPP here means we cannot reset the controller
8189 * but it's already (and still) up and running in
8190 * "performant mode". Or, it might be 640x, which can't reset
8191 * due to concerns about shared bbwc between 6402/6404 pair.
8196 /* Now try to get the controller to respond to a no-op */
8197 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8198 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8199 if (hpsa_noop(pdev) == 0)
8202 dev_warn(&pdev->dev, "no-op failed%s\n",
8203 (i < 11 ? "; re-trying" : ""));
8208 pci_disable_device(pdev);
8212 static void hpsa_free_cmd_pool(struct ctlr_info *h)
8214 kfree(h->cmd_pool_bits);
8215 h->cmd_pool_bits = NULL;
8217 pci_free_consistent(h->pdev,
8218 h->nr_cmds * sizeof(struct CommandList),
8220 h->cmd_pool_dhandle);
8222 h->cmd_pool_dhandle = 0;
8224 if (h->errinfo_pool) {
8225 pci_free_consistent(h->pdev,
8226 h->nr_cmds * sizeof(struct ErrorInfo),
8228 h->errinfo_pool_dhandle);
8229 h->errinfo_pool = NULL;
8230 h->errinfo_pool_dhandle = 0;
8234 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8236 h->cmd_pool_bits = kzalloc(
8237 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
8238 sizeof(unsigned long), GFP_KERNEL);
8239 h->cmd_pool = pci_alloc_consistent(h->pdev,
8240 h->nr_cmds * sizeof(*h->cmd_pool),
8241 &(h->cmd_pool_dhandle));
8242 h->errinfo_pool = pci_alloc_consistent(h->pdev,
8243 h->nr_cmds * sizeof(*h->errinfo_pool),
8244 &(h->errinfo_pool_dhandle));
8245 if ((h->cmd_pool_bits == NULL)
8246 || (h->cmd_pool == NULL)
8247 || (h->errinfo_pool == NULL)) {
8248 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8251 hpsa_preinitialize_commands(h);
8254 hpsa_free_cmd_pool(h);
8258 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8259 static void hpsa_free_irqs(struct ctlr_info *h)
8263 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8264 /* Single reply queue, only one irq to free */
8265 free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
8266 h->q[h->intr_mode] = 0;
8270 for (i = 0; i < h->msix_vectors; i++) {
8271 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8274 for (; i < MAX_REPLY_QUEUES; i++)
8278 /* returns 0 on success; cleans up and returns -Enn on error */
8279 static int hpsa_request_irqs(struct ctlr_info *h,
8280 irqreturn_t (*msixhandler)(int, void *),
8281 irqreturn_t (*intxhandler)(int, void *))
8286 * initialize h->q[x] = x so that interrupt handlers know which
8289 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8292 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8293 /* If performant mode and MSI-X, use multiple reply queues */
8294 for (i = 0; i < h->msix_vectors; i++) {
8295 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8296 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8302 dev_err(&h->pdev->dev,
8303 "failed to get irq %d for %s\n",
8304 pci_irq_vector(h->pdev, i), h->devname);
8305 for (j = 0; j < i; j++) {
8306 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8309 for (; j < MAX_REPLY_QUEUES; j++)
8315 /* Use single reply pool */
8316 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8317 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8318 h->msix_vectors ? "x" : "");
8319 rc = request_irq(pci_irq_vector(h->pdev, 0),
8322 &h->q[h->intr_mode]);
8324 sprintf(h->intrname[h->intr_mode],
8325 "%s-intx", h->devname);
8326 rc = request_irq(pci_irq_vector(h->pdev, 0),
8327 intxhandler, IRQF_SHARED,
8329 &h->q[h->intr_mode]);
8333 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8334 pci_irq_vector(h->pdev, 0), h->devname);
8341 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8344 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8346 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8347 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8349 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8353 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8354 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8356 dev_warn(&h->pdev->dev, "Board failed to become ready "
8357 "after soft reset.\n");
8364 static void hpsa_free_reply_queues(struct ctlr_info *h)
8368 for (i = 0; i < h->nreply_queues; i++) {
8369 if (!h->reply_queue[i].head)
8371 pci_free_consistent(h->pdev,
8372 h->reply_queue_size,
8373 h->reply_queue[i].head,
8374 h->reply_queue[i].busaddr);
8375 h->reply_queue[i].head = NULL;
8376 h->reply_queue[i].busaddr = 0;
8378 h->reply_queue_size = 0;
8381 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8383 hpsa_free_performant_mode(h); /* init_one 7 */
8384 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8385 hpsa_free_cmd_pool(h); /* init_one 5 */
8386 hpsa_free_irqs(h); /* init_one 4 */
8387 scsi_host_put(h->scsi_host); /* init_one 3 */
8388 h->scsi_host = NULL; /* init_one 3 */
8389 hpsa_free_pci_init(h); /* init_one 2_5 */
8390 free_percpu(h->lockup_detected); /* init_one 2 */
8391 h->lockup_detected = NULL; /* init_one 2 */
8392 if (h->resubmit_wq) {
8393 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8394 h->resubmit_wq = NULL;
8396 if (h->rescan_ctlr_wq) {
8397 destroy_workqueue(h->rescan_ctlr_wq);
8398 h->rescan_ctlr_wq = NULL;
8400 kfree(h); /* init_one 1 */
8403 /* Called when controller lockup detected. */
8404 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8407 struct CommandList *c;
8410 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8411 for (i = 0; i < h->nr_cmds; i++) {
8412 c = h->cmd_pool + i;
8413 refcount = atomic_inc_return(&c->refcount);
8415 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8417 atomic_dec(&h->commands_outstanding);
8422 dev_warn(&h->pdev->dev,
8423 "failed %d commands in fail_all\n", failcount);
8426 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8430 for_each_online_cpu(cpu) {
8431 u32 *lockup_detected;
8432 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8433 *lockup_detected = value;
8435 wmb(); /* be sure the per-cpu variables are out to memory */
8438 static void controller_lockup_detected(struct ctlr_info *h)
8440 unsigned long flags;
8441 u32 lockup_detected;
8443 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8444 spin_lock_irqsave(&h->lock, flags);
8445 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8446 if (!lockup_detected) {
8447 /* no heartbeat, but controller gave us a zero. */
8448 dev_warn(&h->pdev->dev,
8449 "lockup detected after %d but scratchpad register is zero\n",
8450 h->heartbeat_sample_interval / HZ);
8451 lockup_detected = 0xffffffff;
8453 set_lockup_detected_for_all_cpus(h, lockup_detected);
8454 spin_unlock_irqrestore(&h->lock, flags);
8455 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8456 lockup_detected, h->heartbeat_sample_interval / HZ);
8457 pci_disable_device(h->pdev);
8458 fail_all_outstanding_cmds(h);
8461 static int detect_controller_lockup(struct ctlr_info *h)
8465 unsigned long flags;
8467 now = get_jiffies_64();
8468 /* If we've received an interrupt recently, we're ok. */
8469 if (time_after64(h->last_intr_timestamp +
8470 (h->heartbeat_sample_interval), now))
8474 * If we've already checked the heartbeat recently, we're ok.
8475 * This could happen if someone sends us a signal. We
8476 * otherwise don't care about signals in this thread.
8478 if (time_after64(h->last_heartbeat_timestamp +
8479 (h->heartbeat_sample_interval), now))
8482 /* If heartbeat has not changed since we last looked, we're not ok. */
8483 spin_lock_irqsave(&h->lock, flags);
8484 heartbeat = readl(&h->cfgtable->HeartBeat);
8485 spin_unlock_irqrestore(&h->lock, flags);
8486 if (h->last_heartbeat == heartbeat) {
8487 controller_lockup_detected(h);
8492 h->last_heartbeat = heartbeat;
8493 h->last_heartbeat_timestamp = now;
8497 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8502 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8505 /* Ask the controller to clear the events we're handling. */
8506 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8507 | CFGTBL_Trans_io_accel2)) &&
8508 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8509 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8511 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8512 event_type = "state change";
8513 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8514 event_type = "configuration change";
8515 /* Stop sending new RAID offload reqs via the IO accelerator */
8516 scsi_block_requests(h->scsi_host);
8517 for (i = 0; i < h->ndevices; i++) {
8518 h->dev[i]->offload_enabled = 0;
8519 h->dev[i]->offload_to_be_enabled = 0;
8521 hpsa_drain_accel_commands(h);
8522 /* Set 'accelerator path config change' bit */
8523 dev_warn(&h->pdev->dev,
8524 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8525 h->events, event_type);
8526 writel(h->events, &(h->cfgtable->clear_event_notify));
8527 /* Set the "clear event notify field update" bit 6 */
8528 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8529 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8530 hpsa_wait_for_clear_event_notify_ack(h);
8531 scsi_unblock_requests(h->scsi_host);
8533 /* Acknowledge controller notification events. */
8534 writel(h->events, &(h->cfgtable->clear_event_notify));
8535 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8536 hpsa_wait_for_clear_event_notify_ack(h);
8538 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8539 hpsa_wait_for_mode_change_ack(h);
8545 /* Check a register on the controller to see if there are configuration
8546 * changes (added/changed/removed logical drives, etc.) which mean that
8547 * we should rescan the controller for devices.
8548 * Also check flag for driver-initiated rescan.
8550 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8552 if (h->drv_req_rescan) {
8553 h->drv_req_rescan = 0;
8557 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8560 h->events = readl(&(h->cfgtable->event_notify));
8561 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8565 * Check if any of the offline devices have become ready
8567 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8569 unsigned long flags;
8570 struct offline_device_entry *d;
8571 struct list_head *this, *tmp;
8573 spin_lock_irqsave(&h->offline_device_lock, flags);
8574 list_for_each_safe(this, tmp, &h->offline_device_list) {
8575 d = list_entry(this, struct offline_device_entry,
8577 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8578 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8579 spin_lock_irqsave(&h->offline_device_lock, flags);
8580 list_del(&d->offline_list);
8581 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8584 spin_lock_irqsave(&h->offline_device_lock, flags);
8586 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8590 static int hpsa_luns_changed(struct ctlr_info *h)
8592 int rc = 1; /* assume there are changes */
8593 struct ReportLUNdata *logdev = NULL;
8595 /* if we can't find out if lun data has changed,
8596 * assume that it has.
8599 if (!h->lastlogicals)
8602 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8604 dev_warn(&h->pdev->dev,
8605 "Out of memory, can't track lun changes.\n");
8608 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8609 dev_warn(&h->pdev->dev,
8610 "report luns failed, can't track lun changes.\n");
8613 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8614 dev_info(&h->pdev->dev,
8615 "Lun changes detected.\n");
8616 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8619 rc = 0; /* no changes detected. */
8625 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8627 unsigned long flags;
8628 struct ctlr_info *h = container_of(to_delayed_work(work),
8629 struct ctlr_info, rescan_ctlr_work);
8632 if (h->remove_in_progress)
8636 * Do the scan after the reset
8638 if (h->reset_in_progress) {
8639 h->drv_req_rescan = 1;
8643 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8644 scsi_host_get(h->scsi_host);
8645 hpsa_ack_ctlr_events(h);
8646 hpsa_scan_start(h->scsi_host);
8647 scsi_host_put(h->scsi_host);
8648 } else if (h->discovery_polling) {
8649 hpsa_disable_rld_caching(h);
8650 if (hpsa_luns_changed(h)) {
8651 struct Scsi_Host *sh = NULL;
8653 dev_info(&h->pdev->dev,
8654 "driver discovery polling rescan.\n");
8655 sh = scsi_host_get(h->scsi_host);
8657 hpsa_scan_start(sh);
8662 spin_lock_irqsave(&h->lock, flags);
8663 if (!h->remove_in_progress)
8664 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8665 h->heartbeat_sample_interval);
8666 spin_unlock_irqrestore(&h->lock, flags);
8669 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8671 unsigned long flags;
8672 struct ctlr_info *h = container_of(to_delayed_work(work),
8673 struct ctlr_info, monitor_ctlr_work);
8675 detect_controller_lockup(h);
8676 if (lockup_detected(h))
8679 spin_lock_irqsave(&h->lock, flags);
8680 if (!h->remove_in_progress)
8681 schedule_delayed_work(&h->monitor_ctlr_work,
8682 h->heartbeat_sample_interval);
8683 spin_unlock_irqrestore(&h->lock, flags);
8686 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8689 struct workqueue_struct *wq = NULL;
8691 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8693 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8698 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8701 struct ctlr_info *h;
8702 int try_soft_reset = 0;
8703 unsigned long flags;
8706 if (number_of_controllers == 0)
8707 printk(KERN_INFO DRIVER_NAME "\n");
8709 rc = hpsa_lookup_board_id(pdev, &board_id);
8711 dev_warn(&pdev->dev, "Board ID not found\n");
8715 rc = hpsa_init_reset_devices(pdev, board_id);
8717 if (rc != -ENOTSUPP)
8719 /* If the reset fails in a particular way (it has no way to do
8720 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8721 * a soft reset once we get the controller configured up to the
8722 * point that it can accept a command.
8728 reinit_after_soft_reset:
8730 /* Command structures must be aligned on a 32-byte boundary because
8731 * the 5 lower bits of the address are used by the hardware. and by
8732 * the driver. See comments in hpsa.h for more info.
8734 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8735 h = kzalloc(sizeof(*h), GFP_KERNEL);
8737 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8743 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8744 INIT_LIST_HEAD(&h->offline_device_list);
8745 spin_lock_init(&h->lock);
8746 spin_lock_init(&h->offline_device_lock);
8747 spin_lock_init(&h->scan_lock);
8748 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8749 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8751 /* Allocate and clear per-cpu variable lockup_detected */
8752 h->lockup_detected = alloc_percpu(u32);
8753 if (!h->lockup_detected) {
8754 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8756 goto clean1; /* aer/h */
8758 set_lockup_detected_for_all_cpus(h, 0);
8760 rc = hpsa_pci_init(h);
8762 goto clean2; /* lu, aer/h */
8764 /* relies on h-> settings made by hpsa_pci_init, including
8765 * interrupt_mode h->intr */
8766 rc = hpsa_scsi_host_alloc(h);
8768 goto clean2_5; /* pci, lu, aer/h */
8770 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8771 h->ctlr = number_of_controllers;
8772 number_of_controllers++;
8774 /* configure PCI DMA stuff */
8775 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8779 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8783 dev_err(&pdev->dev, "no suitable DMA available\n");
8784 goto clean3; /* shost, pci, lu, aer/h */
8788 /* make sure the board interrupts are off */
8789 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8791 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8793 goto clean3; /* shost, pci, lu, aer/h */
8794 rc = hpsa_alloc_cmd_pool(h);
8796 goto clean4; /* irq, shost, pci, lu, aer/h */
8797 rc = hpsa_alloc_sg_chain_blocks(h);
8799 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8800 init_waitqueue_head(&h->scan_wait_queue);
8801 init_waitqueue_head(&h->abort_cmd_wait_queue);
8802 init_waitqueue_head(&h->event_sync_wait_queue);
8803 mutex_init(&h->reset_mutex);
8804 h->scan_finished = 1; /* no scan currently in progress */
8806 pci_set_drvdata(pdev, h);
8809 spin_lock_init(&h->devlock);
8810 rc = hpsa_put_ctlr_into_performant_mode(h);
8812 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8814 /* create the resubmit workqueue */
8815 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8816 if (!h->rescan_ctlr_wq) {
8821 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8822 if (!h->resubmit_wq) {
8824 goto clean7; /* aer/h */
8828 * At this point, the controller is ready to take commands.
8829 * Now, if reset_devices and the hard reset didn't work, try
8830 * the soft reset and see if that works.
8832 if (try_soft_reset) {
8834 /* This is kind of gross. We may or may not get a completion
8835 * from the soft reset command, and if we do, then the value
8836 * from the fifo may or may not be valid. So, we wait 10 secs
8837 * after the reset throwing away any completions we get during
8838 * that time. Unregister the interrupt handler and register
8839 * fake ones to scoop up any residual completions.
8841 spin_lock_irqsave(&h->lock, flags);
8842 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8843 spin_unlock_irqrestore(&h->lock, flags);
8845 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8846 hpsa_intx_discard_completions);
8848 dev_warn(&h->pdev->dev,
8849 "Failed to request_irq after soft reset.\n");
8851 * cannot goto clean7 or free_irqs will be called
8852 * again. Instead, do its work
8854 hpsa_free_performant_mode(h); /* clean7 */
8855 hpsa_free_sg_chain_blocks(h); /* clean6 */
8856 hpsa_free_cmd_pool(h); /* clean5 */
8858 * skip hpsa_free_irqs(h) clean4 since that
8859 * was just called before request_irqs failed
8864 rc = hpsa_kdump_soft_reset(h);
8866 /* Neither hard nor soft reset worked, we're hosed. */
8869 dev_info(&h->pdev->dev, "Board READY.\n");
8870 dev_info(&h->pdev->dev,
8871 "Waiting for stale completions to drain.\n");
8872 h->access.set_intr_mask(h, HPSA_INTR_ON);
8874 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8876 rc = controller_reset_failed(h->cfgtable);
8878 dev_info(&h->pdev->dev,
8879 "Soft reset appears to have failed.\n");
8881 /* since the controller's reset, we have to go back and re-init
8882 * everything. Easiest to just forget what we've done and do it
8885 hpsa_undo_allocations_after_kdump_soft_reset(h);
8888 /* don't goto clean, we already unallocated */
8891 goto reinit_after_soft_reset;
8894 /* Enable Accelerated IO path at driver layer */
8895 h->acciopath_status = 1;
8896 /* Disable discovery polling.*/
8897 h->discovery_polling = 0;
8900 /* Turn the interrupts on so we can service requests */
8901 h->access.set_intr_mask(h, HPSA_INTR_ON);
8903 hpsa_hba_inquiry(h);
8905 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8906 if (!h->lastlogicals)
8907 dev_info(&h->pdev->dev,
8908 "Can't track change to report lun data\n");
8910 /* hook into SCSI subsystem */
8911 rc = hpsa_scsi_add_host(h);
8913 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8915 /* Monitor the controller for firmware lockups */
8916 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8917 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8918 schedule_delayed_work(&h->monitor_ctlr_work,
8919 h->heartbeat_sample_interval);
8920 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8921 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8922 h->heartbeat_sample_interval);
8925 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8926 hpsa_free_performant_mode(h);
8927 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8928 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8929 hpsa_free_sg_chain_blocks(h);
8930 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8931 hpsa_free_cmd_pool(h);
8932 clean4: /* irq, shost, pci, lu, aer/h */
8934 clean3: /* shost, pci, lu, aer/h */
8935 scsi_host_put(h->scsi_host);
8936 h->scsi_host = NULL;
8937 clean2_5: /* pci, lu, aer/h */
8938 hpsa_free_pci_init(h);
8939 clean2: /* lu, aer/h */
8940 if (h->lockup_detected) {
8941 free_percpu(h->lockup_detected);
8942 h->lockup_detected = NULL;
8944 clean1: /* wq/aer/h */
8945 if (h->resubmit_wq) {
8946 destroy_workqueue(h->resubmit_wq);
8947 h->resubmit_wq = NULL;
8949 if (h->rescan_ctlr_wq) {
8950 destroy_workqueue(h->rescan_ctlr_wq);
8951 h->rescan_ctlr_wq = NULL;
8957 static void hpsa_flush_cache(struct ctlr_info *h)
8960 struct CommandList *c;
8963 if (unlikely(lockup_detected(h)))
8965 flush_buf = kzalloc(4, GFP_KERNEL);
8971 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8972 RAID_CTLR_LUNID, TYPE_CMD)) {
8975 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8976 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8979 if (c->err_info->CommandStatus != 0)
8981 dev_warn(&h->pdev->dev,
8982 "error flushing cache on controller\n");
8987 /* Make controller gather fresh report lun data each time we
8988 * send down a report luns request
8990 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8993 struct CommandList *c;
8996 /* Don't bother trying to set diag options if locked up */
8997 if (unlikely(h->lockup_detected))
9000 options = kzalloc(sizeof(*options), GFP_KERNEL);
9002 dev_err(&h->pdev->dev,
9003 "Error: failed to disable rld caching, during alloc.\n");
9009 /* first, get the current diag options settings */
9010 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9011 RAID_CTLR_LUNID, TYPE_CMD))
9014 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9015 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9016 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9019 /* Now, set the bit for disabling the RLD caching */
9020 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
9022 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
9023 RAID_CTLR_LUNID, TYPE_CMD))
9026 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9027 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
9028 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9031 /* Now verify that it got set: */
9032 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9033 RAID_CTLR_LUNID, TYPE_CMD))
9036 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9037 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9038 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9041 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9045 dev_err(&h->pdev->dev,
9046 "Error: failed to disable report lun data caching.\n");
9052 static void hpsa_shutdown(struct pci_dev *pdev)
9054 struct ctlr_info *h;
9056 h = pci_get_drvdata(pdev);
9057 /* Turn board interrupts off and send the flush cache command
9058 * sendcmd will turn off interrupt, and send the flush...
9059 * To write all data in the battery backed cache to disks
9061 hpsa_flush_cache(h);
9062 h->access.set_intr_mask(h, HPSA_INTR_OFF);
9063 hpsa_free_irqs(h); /* init_one 4 */
9064 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
9067 static void hpsa_free_device_info(struct ctlr_info *h)
9071 for (i = 0; i < h->ndevices; i++) {
9077 static void hpsa_remove_one(struct pci_dev *pdev)
9079 struct ctlr_info *h;
9080 unsigned long flags;
9082 if (pci_get_drvdata(pdev) == NULL) {
9083 dev_err(&pdev->dev, "unable to remove device\n");
9086 h = pci_get_drvdata(pdev);
9088 /* Get rid of any controller monitoring work items */
9089 spin_lock_irqsave(&h->lock, flags);
9090 h->remove_in_progress = 1;
9091 spin_unlock_irqrestore(&h->lock, flags);
9092 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9093 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9094 destroy_workqueue(h->rescan_ctlr_wq);
9095 destroy_workqueue(h->resubmit_wq);
9098 * Call before disabling interrupts.
9099 * scsi_remove_host can trigger I/O operations especially
9100 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9101 * operations which cannot complete and will hang the system.
9104 scsi_remove_host(h->scsi_host); /* init_one 8 */
9105 /* includes hpsa_free_irqs - init_one 4 */
9106 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9107 hpsa_shutdown(pdev);
9109 hpsa_free_device_info(h); /* scan */
9111 kfree(h->hba_inquiry_data); /* init_one 10 */
9112 h->hba_inquiry_data = NULL; /* init_one 10 */
9113 hpsa_free_ioaccel2_sg_chain_blocks(h);
9114 hpsa_free_performant_mode(h); /* init_one 7 */
9115 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
9116 hpsa_free_cmd_pool(h); /* init_one 5 */
9117 kfree(h->lastlogicals);
9119 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9121 scsi_host_put(h->scsi_host); /* init_one 3 */
9122 h->scsi_host = NULL; /* init_one 3 */
9124 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9125 hpsa_free_pci_init(h); /* init_one 2.5 */
9127 free_percpu(h->lockup_detected); /* init_one 2 */
9128 h->lockup_detected = NULL; /* init_one 2 */
9129 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9131 hpsa_delete_sas_host(h);
9133 kfree(h); /* init_one 1 */
9136 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9137 __attribute__((unused)) pm_message_t state)
9142 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9147 static struct pci_driver hpsa_pci_driver = {
9149 .probe = hpsa_init_one,
9150 .remove = hpsa_remove_one,
9151 .id_table = hpsa_pci_device_id, /* id_table */
9152 .shutdown = hpsa_shutdown,
9153 .suspend = hpsa_suspend,
9154 .resume = hpsa_resume,
9157 /* Fill in bucket_map[], given nsgs (the max number of
9158 * scatter gather elements supported) and bucket[],
9159 * which is an array of 8 integers. The bucket[] array
9160 * contains 8 different DMA transfer sizes (in 16
9161 * byte increments) which the controller uses to fetch
9162 * commands. This function fills in bucket_map[], which
9163 * maps a given number of scatter gather elements to one of
9164 * the 8 DMA transfer sizes. The point of it is to allow the
9165 * controller to only do as much DMA as needed to fetch the
9166 * command, with the DMA transfer size encoded in the lower
9167 * bits of the command address.
9169 static void calc_bucket_map(int bucket[], int num_buckets,
9170 int nsgs, int min_blocks, u32 *bucket_map)
9174 /* Note, bucket_map must have nsgs+1 entries. */
9175 for (i = 0; i <= nsgs; i++) {
9176 /* Compute size of a command with i SG entries */
9177 size = i + min_blocks;
9178 b = num_buckets; /* Assume the biggest bucket */
9179 /* Find the bucket that is just big enough */
9180 for (j = 0; j < num_buckets; j++) {
9181 if (bucket[j] >= size) {
9186 /* for a command with i SG entries, use bucket b. */
9192 * return -ENODEV on err, 0 on success (or no action)
9193 * allocates numerous items that must be freed later
9195 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9198 unsigned long register_value;
9199 unsigned long transMethod = CFGTBL_Trans_Performant |
9200 (trans_support & CFGTBL_Trans_use_short_tags) |
9201 CFGTBL_Trans_enable_directed_msix |
9202 (trans_support & (CFGTBL_Trans_io_accel1 |
9203 CFGTBL_Trans_io_accel2));
9204 struct access_method access = SA5_performant_access;
9206 /* This is a bit complicated. There are 8 registers on
9207 * the controller which we write to to tell it 8 different
9208 * sizes of commands which there may be. It's a way of
9209 * reducing the DMA done to fetch each command. Encoded into
9210 * each command's tag are 3 bits which communicate to the controller
9211 * which of the eight sizes that command fits within. The size of
9212 * each command depends on how many scatter gather entries there are.
9213 * Each SG entry requires 16 bytes. The eight registers are programmed
9214 * with the number of 16-byte blocks a command of that size requires.
9215 * The smallest command possible requires 5 such 16 byte blocks.
9216 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9217 * blocks. Note, this only extends to the SG entries contained
9218 * within the command block, and does not extend to chained blocks
9219 * of SG elements. bft[] contains the eight values we write to
9220 * the registers. They are not evenly distributed, but have more
9221 * sizes for small commands, and fewer sizes for larger commands.
9223 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9224 #define MIN_IOACCEL2_BFT_ENTRY 5
9225 #define HPSA_IOACCEL2_HEADER_SZ 4
9226 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9227 13, 14, 15, 16, 17, 18, 19,
9228 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9229 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9230 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9231 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9232 16 * MIN_IOACCEL2_BFT_ENTRY);
9233 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9234 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9235 /* 5 = 1 s/g entry or 4k
9236 * 6 = 2 s/g entry or 8k
9237 * 8 = 4 s/g entry or 16k
9238 * 10 = 6 s/g entry or 24k
9241 /* If the controller supports either ioaccel method then
9242 * we can also use the RAID stack submit path that does not
9243 * perform the superfluous readl() after each command submission.
9245 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9246 access = SA5_performant_access_no_read;
9248 /* Controller spec: zero out this buffer. */
9249 for (i = 0; i < h->nreply_queues; i++)
9250 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9252 bft[7] = SG_ENTRIES_IN_CMD + 4;
9253 calc_bucket_map(bft, ARRAY_SIZE(bft),
9254 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9255 for (i = 0; i < 8; i++)
9256 writel(bft[i], &h->transtable->BlockFetch[i]);
9258 /* size of controller ring buffer */
9259 writel(h->max_commands, &h->transtable->RepQSize);
9260 writel(h->nreply_queues, &h->transtable->RepQCount);
9261 writel(0, &h->transtable->RepQCtrAddrLow32);
9262 writel(0, &h->transtable->RepQCtrAddrHigh32);
9264 for (i = 0; i < h->nreply_queues; i++) {
9265 writel(0, &h->transtable->RepQAddr[i].upper);
9266 writel(h->reply_queue[i].busaddr,
9267 &h->transtable->RepQAddr[i].lower);
9270 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9271 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9273 * enable outbound interrupt coalescing in accelerator mode;
9275 if (trans_support & CFGTBL_Trans_io_accel1) {
9276 access = SA5_ioaccel_mode1_access;
9277 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9278 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9280 if (trans_support & CFGTBL_Trans_io_accel2) {
9281 access = SA5_ioaccel_mode2_access;
9282 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9283 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9286 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9287 if (hpsa_wait_for_mode_change_ack(h)) {
9288 dev_err(&h->pdev->dev,
9289 "performant mode problem - doorbell timeout\n");
9292 register_value = readl(&(h->cfgtable->TransportActive));
9293 if (!(register_value & CFGTBL_Trans_Performant)) {
9294 dev_err(&h->pdev->dev,
9295 "performant mode problem - transport not active\n");
9298 /* Change the access methods to the performant access methods */
9300 h->transMethod = transMethod;
9302 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9303 (trans_support & CFGTBL_Trans_io_accel2)))
9306 if (trans_support & CFGTBL_Trans_io_accel1) {
9307 /* Set up I/O accelerator mode */
9308 for (i = 0; i < h->nreply_queues; i++) {
9309 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9310 h->reply_queue[i].current_entry =
9311 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9313 bft[7] = h->ioaccel_maxsg + 8;
9314 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9315 h->ioaccel1_blockFetchTable);
9317 /* initialize all reply queue entries to unused */
9318 for (i = 0; i < h->nreply_queues; i++)
9319 memset(h->reply_queue[i].head,
9320 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9321 h->reply_queue_size);
9323 /* set all the constant fields in the accelerator command
9324 * frames once at init time to save CPU cycles later.
9326 for (i = 0; i < h->nr_cmds; i++) {
9327 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9329 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9330 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9331 (i * sizeof(struct ErrorInfo)));
9332 cp->err_info_len = sizeof(struct ErrorInfo);
9333 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9334 cp->host_context_flags =
9335 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9336 cp->timeout_sec = 0;
9339 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9341 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9342 (i * sizeof(struct io_accel1_cmd)));
9344 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9345 u64 cfg_offset, cfg_base_addr_index;
9346 u32 bft2_offset, cfg_base_addr;
9349 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9350 &cfg_base_addr_index, &cfg_offset);
9351 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9352 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9353 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9354 4, h->ioaccel2_blockFetchTable);
9355 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9356 BUILD_BUG_ON(offsetof(struct CfgTable,
9357 io_accel_request_size_offset) != 0xb8);
9358 h->ioaccel2_bft2_regs =
9359 remap_pci_mem(pci_resource_start(h->pdev,
9360 cfg_base_addr_index) +
9361 cfg_offset + bft2_offset,
9363 sizeof(*h->ioaccel2_bft2_regs));
9364 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9365 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9367 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9368 if (hpsa_wait_for_mode_change_ack(h)) {
9369 dev_err(&h->pdev->dev,
9370 "performant mode problem - enabling ioaccel mode\n");
9376 /* Free ioaccel1 mode command blocks and block fetch table */
9377 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9379 if (h->ioaccel_cmd_pool) {
9380 pci_free_consistent(h->pdev,
9381 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9382 h->ioaccel_cmd_pool,
9383 h->ioaccel_cmd_pool_dhandle);
9384 h->ioaccel_cmd_pool = NULL;
9385 h->ioaccel_cmd_pool_dhandle = 0;
9387 kfree(h->ioaccel1_blockFetchTable);
9388 h->ioaccel1_blockFetchTable = NULL;
9391 /* Allocate ioaccel1 mode command blocks and block fetch table */
9392 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9395 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9396 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9397 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9399 /* Command structures must be aligned on a 128-byte boundary
9400 * because the 7 lower bits of the address are used by the
9403 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9404 IOACCEL1_COMMANDLIST_ALIGNMENT);
9405 h->ioaccel_cmd_pool =
9406 pci_alloc_consistent(h->pdev,
9407 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9408 &(h->ioaccel_cmd_pool_dhandle));
9410 h->ioaccel1_blockFetchTable =
9411 kmalloc(((h->ioaccel_maxsg + 1) *
9412 sizeof(u32)), GFP_KERNEL);
9414 if ((h->ioaccel_cmd_pool == NULL) ||
9415 (h->ioaccel1_blockFetchTable == NULL))
9418 memset(h->ioaccel_cmd_pool, 0,
9419 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9423 hpsa_free_ioaccel1_cmd_and_bft(h);
9427 /* Free ioaccel2 mode command blocks and block fetch table */
9428 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9430 hpsa_free_ioaccel2_sg_chain_blocks(h);
9432 if (h->ioaccel2_cmd_pool) {
9433 pci_free_consistent(h->pdev,
9434 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9435 h->ioaccel2_cmd_pool,
9436 h->ioaccel2_cmd_pool_dhandle);
9437 h->ioaccel2_cmd_pool = NULL;
9438 h->ioaccel2_cmd_pool_dhandle = 0;
9440 kfree(h->ioaccel2_blockFetchTable);
9441 h->ioaccel2_blockFetchTable = NULL;
9444 /* Allocate ioaccel2 mode command blocks and block fetch table */
9445 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9449 /* Allocate ioaccel2 mode command blocks and block fetch table */
9452 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9453 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9454 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9456 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9457 IOACCEL2_COMMANDLIST_ALIGNMENT);
9458 h->ioaccel2_cmd_pool =
9459 pci_alloc_consistent(h->pdev,
9460 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9461 &(h->ioaccel2_cmd_pool_dhandle));
9463 h->ioaccel2_blockFetchTable =
9464 kmalloc(((h->ioaccel_maxsg + 1) *
9465 sizeof(u32)), GFP_KERNEL);
9467 if ((h->ioaccel2_cmd_pool == NULL) ||
9468 (h->ioaccel2_blockFetchTable == NULL)) {
9473 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9477 memset(h->ioaccel2_cmd_pool, 0,
9478 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9482 hpsa_free_ioaccel2_cmd_and_bft(h);
9486 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9487 static void hpsa_free_performant_mode(struct ctlr_info *h)
9489 kfree(h->blockFetchTable);
9490 h->blockFetchTable = NULL;
9491 hpsa_free_reply_queues(h);
9492 hpsa_free_ioaccel1_cmd_and_bft(h);
9493 hpsa_free_ioaccel2_cmd_and_bft(h);
9496 /* return -ENODEV on error, 0 on success (or no action)
9497 * allocates numerous items that must be freed later
9499 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9502 unsigned long transMethod = CFGTBL_Trans_Performant |
9503 CFGTBL_Trans_use_short_tags;
9506 if (hpsa_simple_mode)
9509 trans_support = readl(&(h->cfgtable->TransportSupport));
9510 if (!(trans_support & PERFORMANT_MODE))
9513 /* Check for I/O accelerator mode support */
9514 if (trans_support & CFGTBL_Trans_io_accel1) {
9515 transMethod |= CFGTBL_Trans_io_accel1 |
9516 CFGTBL_Trans_enable_directed_msix;
9517 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9520 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9521 transMethod |= CFGTBL_Trans_io_accel2 |
9522 CFGTBL_Trans_enable_directed_msix;
9523 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9528 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9529 hpsa_get_max_perf_mode_cmds(h);
9530 /* Performant mode ring buffer and supporting data structures */
9531 h->reply_queue_size = h->max_commands * sizeof(u64);
9533 for (i = 0; i < h->nreply_queues; i++) {
9534 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9535 h->reply_queue_size,
9536 &(h->reply_queue[i].busaddr));
9537 if (!h->reply_queue[i].head) {
9539 goto clean1; /* rq, ioaccel */
9541 h->reply_queue[i].size = h->max_commands;
9542 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9543 h->reply_queue[i].current_entry = 0;
9546 /* Need a block fetch table for performant mode */
9547 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9548 sizeof(u32)), GFP_KERNEL);
9549 if (!h->blockFetchTable) {
9551 goto clean1; /* rq, ioaccel */
9554 rc = hpsa_enter_performant_mode(h, trans_support);
9556 goto clean2; /* bft, rq, ioaccel */
9559 clean2: /* bft, rq, ioaccel */
9560 kfree(h->blockFetchTable);
9561 h->blockFetchTable = NULL;
9562 clean1: /* rq, ioaccel */
9563 hpsa_free_reply_queues(h);
9564 hpsa_free_ioaccel1_cmd_and_bft(h);
9565 hpsa_free_ioaccel2_cmd_and_bft(h);
9569 static int is_accelerated_cmd(struct CommandList *c)
9571 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9574 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9576 struct CommandList *c = NULL;
9577 int i, accel_cmds_out;
9580 do { /* wait for all outstanding ioaccel commands to drain out */
9582 for (i = 0; i < h->nr_cmds; i++) {
9583 c = h->cmd_pool + i;
9584 refcount = atomic_inc_return(&c->refcount);
9585 if (refcount > 1) /* Command is allocated */
9586 accel_cmds_out += is_accelerated_cmd(c);
9589 if (accel_cmds_out <= 0)
9595 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9596 struct hpsa_sas_port *hpsa_sas_port)
9598 struct hpsa_sas_phy *hpsa_sas_phy;
9599 struct sas_phy *phy;
9601 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9605 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9606 hpsa_sas_port->next_phy_index);
9608 kfree(hpsa_sas_phy);
9612 hpsa_sas_port->next_phy_index++;
9613 hpsa_sas_phy->phy = phy;
9614 hpsa_sas_phy->parent_port = hpsa_sas_port;
9616 return hpsa_sas_phy;
9619 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9621 struct sas_phy *phy = hpsa_sas_phy->phy;
9623 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9625 if (hpsa_sas_phy->added_to_port)
9626 list_del(&hpsa_sas_phy->phy_list_entry);
9627 kfree(hpsa_sas_phy);
9630 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9633 struct hpsa_sas_port *hpsa_sas_port;
9634 struct sas_phy *phy;
9635 struct sas_identify *identify;
9637 hpsa_sas_port = hpsa_sas_phy->parent_port;
9638 phy = hpsa_sas_phy->phy;
9640 identify = &phy->identify;
9641 memset(identify, 0, sizeof(*identify));
9642 identify->sas_address = hpsa_sas_port->sas_address;
9643 identify->device_type = SAS_END_DEVICE;
9644 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9645 identify->target_port_protocols = SAS_PROTOCOL_STP;
9646 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9647 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9648 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9649 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9650 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9652 rc = sas_phy_add(hpsa_sas_phy->phy);
9656 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9657 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9658 &hpsa_sas_port->phy_list_head);
9659 hpsa_sas_phy->added_to_port = true;
9665 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9666 struct sas_rphy *rphy)
9668 struct sas_identify *identify;
9670 identify = &rphy->identify;
9671 identify->sas_address = hpsa_sas_port->sas_address;
9672 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9673 identify->target_port_protocols = SAS_PROTOCOL_STP;
9675 return sas_rphy_add(rphy);
9678 static struct hpsa_sas_port
9679 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9683 struct hpsa_sas_port *hpsa_sas_port;
9684 struct sas_port *port;
9686 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9690 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9691 hpsa_sas_port->parent_node = hpsa_sas_node;
9693 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9695 goto free_hpsa_port;
9697 rc = sas_port_add(port);
9701 hpsa_sas_port->port = port;
9702 hpsa_sas_port->sas_address = sas_address;
9703 list_add_tail(&hpsa_sas_port->port_list_entry,
9704 &hpsa_sas_node->port_list_head);
9706 return hpsa_sas_port;
9709 sas_port_free(port);
9711 kfree(hpsa_sas_port);
9716 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9718 struct hpsa_sas_phy *hpsa_sas_phy;
9719 struct hpsa_sas_phy *next;
9721 list_for_each_entry_safe(hpsa_sas_phy, next,
9722 &hpsa_sas_port->phy_list_head, phy_list_entry)
9723 hpsa_free_sas_phy(hpsa_sas_phy);
9725 sas_port_delete(hpsa_sas_port->port);
9726 list_del(&hpsa_sas_port->port_list_entry);
9727 kfree(hpsa_sas_port);
9730 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9732 struct hpsa_sas_node *hpsa_sas_node;
9734 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9735 if (hpsa_sas_node) {
9736 hpsa_sas_node->parent_dev = parent_dev;
9737 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9740 return hpsa_sas_node;
9743 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9745 struct hpsa_sas_port *hpsa_sas_port;
9746 struct hpsa_sas_port *next;
9751 list_for_each_entry_safe(hpsa_sas_port, next,
9752 &hpsa_sas_node->port_list_head, port_list_entry)
9753 hpsa_free_sas_port(hpsa_sas_port);
9755 kfree(hpsa_sas_node);
9758 static struct hpsa_scsi_dev_t
9759 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9760 struct sas_rphy *rphy)
9763 struct hpsa_scsi_dev_t *device;
9765 for (i = 0; i < h->ndevices; i++) {
9767 if (!device->sas_port)
9769 if (device->sas_port->rphy == rphy)
9776 static int hpsa_add_sas_host(struct ctlr_info *h)
9779 struct device *parent_dev;
9780 struct hpsa_sas_node *hpsa_sas_node;
9781 struct hpsa_sas_port *hpsa_sas_port;
9782 struct hpsa_sas_phy *hpsa_sas_phy;
9784 parent_dev = &h->scsi_host->shost_gendev;
9786 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9790 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9791 if (!hpsa_sas_port) {
9796 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9797 if (!hpsa_sas_phy) {
9802 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9806 h->sas_host = hpsa_sas_node;
9811 hpsa_free_sas_phy(hpsa_sas_phy);
9813 hpsa_free_sas_port(hpsa_sas_port);
9815 hpsa_free_sas_node(hpsa_sas_node);
9820 static void hpsa_delete_sas_host(struct ctlr_info *h)
9822 hpsa_free_sas_node(h->sas_host);
9825 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9826 struct hpsa_scsi_dev_t *device)
9829 struct hpsa_sas_port *hpsa_sas_port;
9830 struct sas_rphy *rphy;
9832 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9836 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9842 hpsa_sas_port->rphy = rphy;
9843 device->sas_port = hpsa_sas_port;
9845 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9852 hpsa_free_sas_port(hpsa_sas_port);
9853 device->sas_port = NULL;
9858 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9860 if (device->sas_port) {
9861 hpsa_free_sas_port(device->sas_port);
9862 device->sas_port = NULL;
9867 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9873 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9880 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9886 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9892 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9898 hpsa_sas_phy_setup(struct sas_phy *phy)
9904 hpsa_sas_phy_release(struct sas_phy *phy)
9909 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9914 /* SMP = Serial Management Protocol */
9916 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9917 struct request *req)
9922 static struct sas_function_template hpsa_sas_transport_functions = {
9923 .get_linkerrors = hpsa_sas_get_linkerrors,
9924 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9925 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9926 .phy_reset = hpsa_sas_phy_reset,
9927 .phy_enable = hpsa_sas_phy_enable,
9928 .phy_setup = hpsa_sas_phy_setup,
9929 .phy_release = hpsa_sas_phy_release,
9930 .set_phy_speed = hpsa_sas_phy_speed,
9931 .smp_handler = hpsa_sas_smp_handler,
9935 * This is it. Register the PCI driver information for the cards we control
9936 * the OS will call our registered routines when it finds one of our cards.
9938 static int __init hpsa_init(void)
9942 hpsa_sas_transport_template =
9943 sas_attach_transport(&hpsa_sas_transport_functions);
9944 if (!hpsa_sas_transport_template)
9947 rc = pci_register_driver(&hpsa_pci_driver);
9950 sas_release_transport(hpsa_sas_transport_template);
9955 static void __exit hpsa_cleanup(void)
9957 pci_unregister_driver(&hpsa_pci_driver);
9958 sas_release_transport(hpsa_sas_transport_template);
9961 static void __attribute__((unused)) verify_offsets(void)
9963 #define VERIFY_OFFSET(member, offset) \
9964 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9966 VERIFY_OFFSET(structure_size, 0);
9967 VERIFY_OFFSET(volume_blk_size, 4);
9968 VERIFY_OFFSET(volume_blk_cnt, 8);
9969 VERIFY_OFFSET(phys_blk_shift, 16);
9970 VERIFY_OFFSET(parity_rotation_shift, 17);
9971 VERIFY_OFFSET(strip_size, 18);
9972 VERIFY_OFFSET(disk_starting_blk, 20);
9973 VERIFY_OFFSET(disk_blk_cnt, 28);
9974 VERIFY_OFFSET(data_disks_per_row, 36);
9975 VERIFY_OFFSET(metadata_disks_per_row, 38);
9976 VERIFY_OFFSET(row_cnt, 40);
9977 VERIFY_OFFSET(layout_map_count, 42);
9978 VERIFY_OFFSET(flags, 44);
9979 VERIFY_OFFSET(dekindex, 46);
9980 /* VERIFY_OFFSET(reserved, 48 */
9981 VERIFY_OFFSET(data, 64);
9983 #undef VERIFY_OFFSET
9985 #define VERIFY_OFFSET(member, offset) \
9986 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9988 VERIFY_OFFSET(IU_type, 0);
9989 VERIFY_OFFSET(direction, 1);
9990 VERIFY_OFFSET(reply_queue, 2);
9991 /* VERIFY_OFFSET(reserved1, 3); */
9992 VERIFY_OFFSET(scsi_nexus, 4);
9993 VERIFY_OFFSET(Tag, 8);
9994 VERIFY_OFFSET(cdb, 16);
9995 VERIFY_OFFSET(cciss_lun, 32);
9996 VERIFY_OFFSET(data_len, 40);
9997 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9998 VERIFY_OFFSET(sg_count, 45);
9999 /* VERIFY_OFFSET(reserved3 */
10000 VERIFY_OFFSET(err_ptr, 48);
10001 VERIFY_OFFSET(err_len, 56);
10002 /* VERIFY_OFFSET(reserved4 */
10003 VERIFY_OFFSET(sg, 64);
10005 #undef VERIFY_OFFSET
10007 #define VERIFY_OFFSET(member, offset) \
10008 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
10010 VERIFY_OFFSET(dev_handle, 0x00);
10011 VERIFY_OFFSET(reserved1, 0x02);
10012 VERIFY_OFFSET(function, 0x03);
10013 VERIFY_OFFSET(reserved2, 0x04);
10014 VERIFY_OFFSET(err_info, 0x0C);
10015 VERIFY_OFFSET(reserved3, 0x10);
10016 VERIFY_OFFSET(err_info_len, 0x12);
10017 VERIFY_OFFSET(reserved4, 0x13);
10018 VERIFY_OFFSET(sgl_offset, 0x14);
10019 VERIFY_OFFSET(reserved5, 0x15);
10020 VERIFY_OFFSET(transfer_len, 0x1C);
10021 VERIFY_OFFSET(reserved6, 0x20);
10022 VERIFY_OFFSET(io_flags, 0x24);
10023 VERIFY_OFFSET(reserved7, 0x26);
10024 VERIFY_OFFSET(LUN, 0x34);
10025 VERIFY_OFFSET(control, 0x3C);
10026 VERIFY_OFFSET(CDB, 0x40);
10027 VERIFY_OFFSET(reserved8, 0x50);
10028 VERIFY_OFFSET(host_context_flags, 0x60);
10029 VERIFY_OFFSET(timeout_sec, 0x62);
10030 VERIFY_OFFSET(ReplyQueue, 0x64);
10031 VERIFY_OFFSET(reserved9, 0x65);
10032 VERIFY_OFFSET(tag, 0x68);
10033 VERIFY_OFFSET(host_addr, 0x70);
10034 VERIFY_OFFSET(CISS_LUN, 0x78);
10035 VERIFY_OFFSET(SG, 0x78 + 8);
10036 #undef VERIFY_OFFSET
10039 module_init(hpsa_init);
10040 module_exit(hpsa_cleanup);