2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <asm/div64.h>
55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56 #define HPSA_DRIVER_VERSION "3.4.4-1"
57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
60 /* How long to wait (in milliseconds) for board to go into simple mode */
61 #define MAX_CONFIG_WAIT 30000
62 #define MAX_IOCTL_CONFIG_WAIT 1000
64 /*define how many times we will try a command because of bus resets */
65 #define MAX_CMD_RETRIES 3
67 /* Embedded module documentation macros - see modules.h */
68 MODULE_AUTHOR("Hewlett-Packard Company");
69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
72 MODULE_VERSION(HPSA_DRIVER_VERSION);
73 MODULE_LICENSE("GPL");
75 static int hpsa_allow_any;
76 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
77 MODULE_PARM_DESC(hpsa_allow_any,
78 "Allow hpsa driver to access unknown HP Smart Array hardware");
79 static int hpsa_simple_mode;
80 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(hpsa_simple_mode,
82 "Use 'simple mode' rather than 'performant mode'");
84 /* define the PCI info for the cards we can control */
85 static const struct pci_device_id hpsa_pci_device_id[] = {
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
127 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
131 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
132 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
133 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
137 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
139 /* board_id = Subsystem Device ID & Vendor ID
140 * product = Marketing Name for the board
141 * access = Address of the struct of function pointers
143 static struct board_type products[] = {
144 {0x3241103C, "Smart Array P212", &SA5_access},
145 {0x3243103C, "Smart Array P410", &SA5_access},
146 {0x3245103C, "Smart Array P410i", &SA5_access},
147 {0x3247103C, "Smart Array P411", &SA5_access},
148 {0x3249103C, "Smart Array P812", &SA5_access},
149 {0x324A103C, "Smart Array P712m", &SA5_access},
150 {0x324B103C, "Smart Array P711m", &SA5_access},
151 {0x3350103C, "Smart Array P222", &SA5_access},
152 {0x3351103C, "Smart Array P420", &SA5_access},
153 {0x3352103C, "Smart Array P421", &SA5_access},
154 {0x3353103C, "Smart Array P822", &SA5_access},
155 {0x3354103C, "Smart Array P420i", &SA5_access},
156 {0x3355103C, "Smart Array P220i", &SA5_access},
157 {0x3356103C, "Smart Array P721m", &SA5_access},
158 {0x1921103C, "Smart Array P830i", &SA5_access},
159 {0x1922103C, "Smart Array P430", &SA5_access},
160 {0x1923103C, "Smart Array P431", &SA5_access},
161 {0x1924103C, "Smart Array P830", &SA5_access},
162 {0x1926103C, "Smart Array P731m", &SA5_access},
163 {0x1928103C, "Smart Array P230i", &SA5_access},
164 {0x1929103C, "Smart Array P530", &SA5_access},
165 {0x21BD103C, "Smart Array", &SA5_access},
166 {0x21BE103C, "Smart Array", &SA5_access},
167 {0x21BF103C, "Smart Array", &SA5_access},
168 {0x21C0103C, "Smart Array", &SA5_access},
169 {0x21C1103C, "Smart Array", &SA5_access},
170 {0x21C2103C, "Smart Array", &SA5_access},
171 {0x21C3103C, "Smart Array", &SA5_access},
172 {0x21C4103C, "Smart Array", &SA5_access},
173 {0x21C5103C, "Smart Array", &SA5_access},
174 {0x21C6103C, "Smart Array", &SA5_access},
175 {0x21C7103C, "Smart Array", &SA5_access},
176 {0x21C8103C, "Smart Array", &SA5_access},
177 {0x21C9103C, "Smart Array", &SA5_access},
178 {0x21CA103C, "Smart Array", &SA5_access},
179 {0x21CB103C, "Smart Array", &SA5_access},
180 {0x21CC103C, "Smart Array", &SA5_access},
181 {0x21CD103C, "Smart Array", &SA5_access},
182 {0x21CE103C, "Smart Array", &SA5_access},
183 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
184 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
185 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
186 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
187 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
188 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
191 static int number_of_controllers;
193 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
194 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
195 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
196 static void start_io(struct ctlr_info *h);
199 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
202 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
203 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
204 static struct CommandList *cmd_alloc(struct ctlr_info *h);
205 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
206 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
207 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
209 #define VPD_PAGE (1 << 8)
211 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
212 static void hpsa_scan_start(struct Scsi_Host *);
213 static int hpsa_scan_finished(struct Scsi_Host *sh,
214 unsigned long elapsed_time);
215 static int hpsa_change_queue_depth(struct scsi_device *sdev,
216 int qdepth, int reason);
218 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
219 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
220 static int hpsa_slave_alloc(struct scsi_device *sdev);
221 static void hpsa_slave_destroy(struct scsi_device *sdev);
223 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
224 static int check_for_unit_attention(struct ctlr_info *h,
225 struct CommandList *c);
226 static void check_ioctl_unit_attention(struct ctlr_info *h,
227 struct CommandList *c);
228 /* performant mode helper functions */
229 static void calc_bucket_map(int *bucket, int num_buckets,
230 int nsgs, int min_blocks, int *bucket_map);
231 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
232 static inline u32 next_command(struct ctlr_info *h, u8 q);
233 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
234 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
236 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
237 unsigned long *memory_bar);
238 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
239 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
241 static inline void finish_cmd(struct CommandList *c);
242 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
243 #define BOARD_NOT_READY 0
244 #define BOARD_READY 1
245 static void hpsa_drain_accel_commands(struct ctlr_info *h);
246 static void hpsa_flush_cache(struct ctlr_info *h);
247 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
248 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
251 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
253 unsigned long *priv = shost_priv(sdev->host);
254 return (struct ctlr_info *) *priv;
257 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
259 unsigned long *priv = shost_priv(sh);
260 return (struct ctlr_info *) *priv;
263 static int check_for_unit_attention(struct ctlr_info *h,
264 struct CommandList *c)
266 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
269 switch (c->err_info->SenseInfo[12]) {
271 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
272 "detected, command retried\n", h->ctlr);
275 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
276 "detected, action required\n", h->ctlr);
278 case REPORT_LUNS_CHANGED:
279 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
280 "changed, action required\n", h->ctlr);
282 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
283 * target (array) devices.
287 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
288 "or device reset detected\n", h->ctlr);
290 case UNIT_ATTENTION_CLEARED:
291 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
292 "cleared by another initiator\n", h->ctlr);
295 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
296 "unit attention detected\n", h->ctlr);
302 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
304 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
305 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
306 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
308 dev_warn(&h->pdev->dev, HPSA "device busy");
312 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
313 struct device_attribute *attr,
314 const char *buf, size_t count)
318 struct Scsi_Host *shost = class_to_shost(dev);
321 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
323 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
324 strncpy(tmpbuf, buf, len);
326 if (sscanf(tmpbuf, "%d", &status) != 1)
328 h = shost_to_hba(shost);
329 h->acciopath_status = !!status;
330 dev_warn(&h->pdev->dev,
331 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
332 h->acciopath_status ? "enabled" : "disabled");
336 static ssize_t host_store_raid_offload_debug(struct device *dev,
337 struct device_attribute *attr,
338 const char *buf, size_t count)
340 int debug_level, len;
342 struct Scsi_Host *shost = class_to_shost(dev);
345 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
347 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
348 strncpy(tmpbuf, buf, len);
350 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
354 h = shost_to_hba(shost);
355 h->raid_offload_debug = debug_level;
356 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
357 h->raid_offload_debug);
361 static ssize_t host_store_rescan(struct device *dev,
362 struct device_attribute *attr,
363 const char *buf, size_t count)
366 struct Scsi_Host *shost = class_to_shost(dev);
367 h = shost_to_hba(shost);
368 hpsa_scan_start(h->scsi_host);
372 static ssize_t host_show_firmware_revision(struct device *dev,
373 struct device_attribute *attr, char *buf)
376 struct Scsi_Host *shost = class_to_shost(dev);
377 unsigned char *fwrev;
379 h = shost_to_hba(shost);
380 if (!h->hba_inquiry_data)
382 fwrev = &h->hba_inquiry_data[32];
383 return snprintf(buf, 20, "%c%c%c%c\n",
384 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
387 static ssize_t host_show_commands_outstanding(struct device *dev,
388 struct device_attribute *attr, char *buf)
390 struct Scsi_Host *shost = class_to_shost(dev);
391 struct ctlr_info *h = shost_to_hba(shost);
393 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
396 static ssize_t host_show_transport_mode(struct device *dev,
397 struct device_attribute *attr, char *buf)
400 struct Scsi_Host *shost = class_to_shost(dev);
402 h = shost_to_hba(shost);
403 return snprintf(buf, 20, "%s\n",
404 h->transMethod & CFGTBL_Trans_Performant ?
405 "performant" : "simple");
408 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
409 struct device_attribute *attr, char *buf)
412 struct Scsi_Host *shost = class_to_shost(dev);
414 h = shost_to_hba(shost);
415 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
416 (h->acciopath_status == 1) ? "enabled" : "disabled");
419 /* List of controllers which cannot be hard reset on kexec with reset_devices */
420 static u32 unresettable_controller[] = {
421 0x324a103C, /* Smart Array P712m */
422 0x324b103C, /* SmartArray P711m */
423 0x3223103C, /* Smart Array P800 */
424 0x3234103C, /* Smart Array P400 */
425 0x3235103C, /* Smart Array P400i */
426 0x3211103C, /* Smart Array E200i */
427 0x3212103C, /* Smart Array E200 */
428 0x3213103C, /* Smart Array E200i */
429 0x3214103C, /* Smart Array E200i */
430 0x3215103C, /* Smart Array E200i */
431 0x3237103C, /* Smart Array E500 */
432 0x323D103C, /* Smart Array P700m */
433 0x40800E11, /* Smart Array 5i */
434 0x409C0E11, /* Smart Array 6400 */
435 0x409D0E11, /* Smart Array 6400 EM */
436 0x40700E11, /* Smart Array 5300 */
437 0x40820E11, /* Smart Array 532 */
438 0x40830E11, /* Smart Array 5312 */
439 0x409A0E11, /* Smart Array 641 */
440 0x409B0E11, /* Smart Array 642 */
441 0x40910E11, /* Smart Array 6i */
444 /* List of controllers which cannot even be soft reset */
445 static u32 soft_unresettable_controller[] = {
446 0x40800E11, /* Smart Array 5i */
447 0x40700E11, /* Smart Array 5300 */
448 0x40820E11, /* Smart Array 532 */
449 0x40830E11, /* Smart Array 5312 */
450 0x409A0E11, /* Smart Array 641 */
451 0x409B0E11, /* Smart Array 642 */
452 0x40910E11, /* Smart Array 6i */
453 /* Exclude 640x boards. These are two pci devices in one slot
454 * which share a battery backed cache module. One controls the
455 * cache, the other accesses the cache through the one that controls
456 * it. If we reset the one controlling the cache, the other will
457 * likely not be happy. Just forbid resetting this conjoined mess.
458 * The 640x isn't really supported by hpsa anyway.
460 0x409C0E11, /* Smart Array 6400 */
461 0x409D0E11, /* Smart Array 6400 EM */
464 static int ctlr_is_hard_resettable(u32 board_id)
468 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
469 if (unresettable_controller[i] == board_id)
474 static int ctlr_is_soft_resettable(u32 board_id)
478 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
479 if (soft_unresettable_controller[i] == board_id)
484 static int ctlr_is_resettable(u32 board_id)
486 return ctlr_is_hard_resettable(board_id) ||
487 ctlr_is_soft_resettable(board_id);
490 static ssize_t host_show_resettable(struct device *dev,
491 struct device_attribute *attr, char *buf)
494 struct Scsi_Host *shost = class_to_shost(dev);
496 h = shost_to_hba(shost);
497 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
500 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
502 return (scsi3addr[3] & 0xC0) == 0x40;
505 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
508 #define HPSA_RAID_0 0
509 #define HPSA_RAID_4 1
510 #define HPSA_RAID_1 2 /* also used for RAID 10 */
511 #define HPSA_RAID_5 3 /* also used for RAID 50 */
512 #define HPSA_RAID_51 4
513 #define HPSA_RAID_6 5 /* also used for RAID 60 */
514 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
515 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
517 static ssize_t raid_level_show(struct device *dev,
518 struct device_attribute *attr, char *buf)
521 unsigned char rlevel;
523 struct scsi_device *sdev;
524 struct hpsa_scsi_dev_t *hdev;
527 sdev = to_scsi_device(dev);
528 h = sdev_to_hba(sdev);
529 spin_lock_irqsave(&h->lock, flags);
530 hdev = sdev->hostdata;
532 spin_unlock_irqrestore(&h->lock, flags);
536 /* Is this even a logical drive? */
537 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
538 spin_unlock_irqrestore(&h->lock, flags);
539 l = snprintf(buf, PAGE_SIZE, "N/A\n");
543 rlevel = hdev->raid_level;
544 spin_unlock_irqrestore(&h->lock, flags);
545 if (rlevel > RAID_UNKNOWN)
546 rlevel = RAID_UNKNOWN;
547 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
551 static ssize_t lunid_show(struct device *dev,
552 struct device_attribute *attr, char *buf)
555 struct scsi_device *sdev;
556 struct hpsa_scsi_dev_t *hdev;
558 unsigned char lunid[8];
560 sdev = to_scsi_device(dev);
561 h = sdev_to_hba(sdev);
562 spin_lock_irqsave(&h->lock, flags);
563 hdev = sdev->hostdata;
565 spin_unlock_irqrestore(&h->lock, flags);
568 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
569 spin_unlock_irqrestore(&h->lock, flags);
570 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
571 lunid[0], lunid[1], lunid[2], lunid[3],
572 lunid[4], lunid[5], lunid[6], lunid[7]);
575 static ssize_t unique_id_show(struct device *dev,
576 struct device_attribute *attr, char *buf)
579 struct scsi_device *sdev;
580 struct hpsa_scsi_dev_t *hdev;
582 unsigned char sn[16];
584 sdev = to_scsi_device(dev);
585 h = sdev_to_hba(sdev);
586 spin_lock_irqsave(&h->lock, flags);
587 hdev = sdev->hostdata;
589 spin_unlock_irqrestore(&h->lock, flags);
592 memcpy(sn, hdev->device_id, sizeof(sn));
593 spin_unlock_irqrestore(&h->lock, flags);
594 return snprintf(buf, 16 * 2 + 2,
595 "%02X%02X%02X%02X%02X%02X%02X%02X"
596 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
597 sn[0], sn[1], sn[2], sn[3],
598 sn[4], sn[5], sn[6], sn[7],
599 sn[8], sn[9], sn[10], sn[11],
600 sn[12], sn[13], sn[14], sn[15]);
603 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
604 struct device_attribute *attr, char *buf)
607 struct scsi_device *sdev;
608 struct hpsa_scsi_dev_t *hdev;
612 sdev = to_scsi_device(dev);
613 h = sdev_to_hba(sdev);
614 spin_lock_irqsave(&h->lock, flags);
615 hdev = sdev->hostdata;
617 spin_unlock_irqrestore(&h->lock, flags);
620 offload_enabled = hdev->offload_enabled;
621 spin_unlock_irqrestore(&h->lock, flags);
622 return snprintf(buf, 20, "%d\n", offload_enabled);
625 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
626 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
627 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
628 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
629 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
630 host_show_hp_ssd_smart_path_enabled, NULL);
631 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
632 host_show_hp_ssd_smart_path_status,
633 host_store_hp_ssd_smart_path_status);
634 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
635 host_store_raid_offload_debug);
636 static DEVICE_ATTR(firmware_revision, S_IRUGO,
637 host_show_firmware_revision, NULL);
638 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
639 host_show_commands_outstanding, NULL);
640 static DEVICE_ATTR(transport_mode, S_IRUGO,
641 host_show_transport_mode, NULL);
642 static DEVICE_ATTR(resettable, S_IRUGO,
643 host_show_resettable, NULL);
645 static struct device_attribute *hpsa_sdev_attrs[] = {
646 &dev_attr_raid_level,
649 &dev_attr_hp_ssd_smart_path_enabled,
653 static struct device_attribute *hpsa_shost_attrs[] = {
655 &dev_attr_firmware_revision,
656 &dev_attr_commands_outstanding,
657 &dev_attr_transport_mode,
658 &dev_attr_resettable,
659 &dev_attr_hp_ssd_smart_path_status,
660 &dev_attr_raid_offload_debug,
664 static struct scsi_host_template hpsa_driver_template = {
665 .module = THIS_MODULE,
668 .queuecommand = hpsa_scsi_queue_command,
669 .scan_start = hpsa_scan_start,
670 .scan_finished = hpsa_scan_finished,
671 .change_queue_depth = hpsa_change_queue_depth,
673 .use_clustering = ENABLE_CLUSTERING,
674 .eh_abort_handler = hpsa_eh_abort_handler,
675 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
677 .slave_alloc = hpsa_slave_alloc,
678 .slave_destroy = hpsa_slave_destroy,
680 .compat_ioctl = hpsa_compat_ioctl,
682 .sdev_attrs = hpsa_sdev_attrs,
683 .shost_attrs = hpsa_shost_attrs,
689 /* Enqueuing and dequeuing functions for cmdlists. */
690 static inline void addQ(struct list_head *list, struct CommandList *c)
692 list_add_tail(&c->list, list);
695 static inline u32 next_command(struct ctlr_info *h, u8 q)
698 struct reply_pool *rq = &h->reply_queue[q];
701 if (h->transMethod & CFGTBL_Trans_io_accel1)
702 return h->access.command_completed(h, q);
704 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
705 return h->access.command_completed(h, q);
707 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
708 a = rq->head[rq->current_entry];
710 spin_lock_irqsave(&h->lock, flags);
711 h->commands_outstanding--;
712 spin_unlock_irqrestore(&h->lock, flags);
716 /* Check for wraparound */
717 if (rq->current_entry == h->max_commands) {
718 rq->current_entry = 0;
725 * There are some special bits in the bus address of the
726 * command that we have to set for the controller to know
727 * how to process the command:
729 * Normal performant mode:
730 * bit 0: 1 means performant mode, 0 means simple mode.
731 * bits 1-3 = block fetch table entry
732 * bits 4-6 = command type (== 0)
735 * bit 0 = "performant mode" bit.
736 * bits 1-3 = block fetch table entry
737 * bits 4-6 = command type (== 110)
738 * (command type is needed because ioaccel1 mode
739 * commands are submitted through the same register as normal
740 * mode commands, so this is how the controller knows whether
741 * the command is normal mode or ioaccel1 mode.)
744 * bit 0 = "performant mode" bit.
745 * bits 1-4 = block fetch table entry (note extra bit)
746 * bits 4-6 = not needed, because ioaccel2 mode has
747 * a separate special register for submitting commands.
750 /* set_performant_mode: Modify the tag for cciss performant
751 * set bit 0 for pull model, bits 3-1 for block fetch
754 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
756 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
757 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
758 if (likely(h->msix_vector > 0))
759 c->Header.ReplyQueue =
760 raw_smp_processor_id() % h->nreply_queues;
764 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
765 struct CommandList *c)
767 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
769 /* Tell the controller to post the reply to the queue for this
770 * processor. This seems to give the best I/O throughput.
772 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
773 /* Set the bits in the address sent down to include:
774 * - performant mode bit (bit 0)
775 * - pull count (bits 1-3)
776 * - command type (bits 4-6)
778 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
779 IOACCEL1_BUSADDR_CMDTYPE;
782 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
783 struct CommandList *c)
785 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
787 /* Tell the controller to post the reply to the queue for this
788 * processor. This seems to give the best I/O throughput.
790 cp->reply_queue = smp_processor_id() % h->nreply_queues;
791 /* Set the bits in the address sent down to include:
792 * - performant mode bit not used in ioaccel mode 2
793 * - pull count (bits 0-3)
794 * - command type isn't needed for ioaccel2
796 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
799 static int is_firmware_flash_cmd(u8 *cdb)
801 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
805 * During firmware flash, the heartbeat register may not update as frequently
806 * as it should. So we dial down lockup detection during firmware flash. and
807 * dial it back up when firmware flash completes.
809 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
810 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
811 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
812 struct CommandList *c)
814 if (!is_firmware_flash_cmd(c->Request.CDB))
816 atomic_inc(&h->firmware_flash_in_progress);
817 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
820 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
821 struct CommandList *c)
823 if (is_firmware_flash_cmd(c->Request.CDB) &&
824 atomic_dec_and_test(&h->firmware_flash_in_progress))
825 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
828 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
829 struct CommandList *c)
833 switch (c->cmd_type) {
835 set_ioaccel1_performant_mode(h, c);
838 set_ioaccel2_performant_mode(h, c);
841 set_performant_mode(h, c);
843 dial_down_lockup_detection_during_fw_flash(h, c);
844 spin_lock_irqsave(&h->lock, flags);
847 spin_unlock_irqrestore(&h->lock, flags);
851 static inline void removeQ(struct CommandList *c)
853 if (WARN_ON(list_empty(&c->list)))
855 list_del_init(&c->list);
858 static inline int is_hba_lunid(unsigned char scsi3addr[])
860 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
863 static inline int is_scsi_rev_5(struct ctlr_info *h)
865 if (!h->hba_inquiry_data)
867 if ((h->hba_inquiry_data[2] & 0x07) == 5)
872 static int hpsa_find_target_lun(struct ctlr_info *h,
873 unsigned char scsi3addr[], int bus, int *target, int *lun)
875 /* finds an unused bus, target, lun for a new physical device
876 * assumes h->devlock is held
879 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
881 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
883 for (i = 0; i < h->ndevices; i++) {
884 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
885 __set_bit(h->dev[i]->target, lun_taken);
888 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
889 if (i < HPSA_MAX_DEVICES) {
898 /* Add an entry into h->dev[] array. */
899 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
900 struct hpsa_scsi_dev_t *device,
901 struct hpsa_scsi_dev_t *added[], int *nadded)
903 /* assumes h->devlock is held */
906 unsigned char addr1[8], addr2[8];
907 struct hpsa_scsi_dev_t *sd;
909 if (n >= HPSA_MAX_DEVICES) {
910 dev_err(&h->pdev->dev, "too many devices, some will be "
915 /* physical devices do not have lun or target assigned until now. */
916 if (device->lun != -1)
917 /* Logical device, lun is already assigned. */
920 /* If this device a non-zero lun of a multi-lun device
921 * byte 4 of the 8-byte LUN addr will contain the logical
922 * unit no, zero otherise.
924 if (device->scsi3addr[4] == 0) {
925 /* This is not a non-zero lun of a multi-lun device */
926 if (hpsa_find_target_lun(h, device->scsi3addr,
927 device->bus, &device->target, &device->lun) != 0)
932 /* This is a non-zero lun of a multi-lun device.
933 * Search through our list and find the device which
934 * has the same 8 byte LUN address, excepting byte 4.
935 * Assign the same bus and target for this new LUN.
936 * Use the logical unit number from the firmware.
938 memcpy(addr1, device->scsi3addr, 8);
940 for (i = 0; i < n; i++) {
942 memcpy(addr2, sd->scsi3addr, 8);
944 /* differ only in byte 4? */
945 if (memcmp(addr1, addr2, 8) == 0) {
946 device->bus = sd->bus;
947 device->target = sd->target;
948 device->lun = device->scsi3addr[4];
952 if (device->lun == -1) {
953 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
954 " suspect firmware bug or unsupported hardware "
963 added[*nadded] = device;
966 /* initially, (before registering with scsi layer) we don't
967 * know our hostno and we don't want to print anything first
968 * time anyway (the scsi layer's inquiries will show that info)
970 /* if (hostno != -1) */
971 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
972 scsi_device_type(device->devtype), hostno,
973 device->bus, device->target, device->lun);
977 /* Update an entry in h->dev[] array. */
978 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
979 int entry, struct hpsa_scsi_dev_t *new_entry)
981 /* assumes h->devlock is held */
982 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
984 /* Raid level changed. */
985 h->dev[entry]->raid_level = new_entry->raid_level;
987 /* Raid offload parameters changed. */
988 h->dev[entry]->offload_config = new_entry->offload_config;
989 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
990 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
991 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
992 h->dev[entry]->raid_map = new_entry->raid_map;
994 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
995 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
996 new_entry->target, new_entry->lun);
999 /* Replace an entry from h->dev[] array. */
1000 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1001 int entry, struct hpsa_scsi_dev_t *new_entry,
1002 struct hpsa_scsi_dev_t *added[], int *nadded,
1003 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1005 /* assumes h->devlock is held */
1006 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1007 removed[*nremoved] = h->dev[entry];
1011 * New physical devices won't have target/lun assigned yet
1012 * so we need to preserve the values in the slot we are replacing.
1014 if (new_entry->target == -1) {
1015 new_entry->target = h->dev[entry]->target;
1016 new_entry->lun = h->dev[entry]->lun;
1019 h->dev[entry] = new_entry;
1020 added[*nadded] = new_entry;
1022 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1023 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1024 new_entry->target, new_entry->lun);
1027 /* Remove an entry from h->dev[] array. */
1028 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1029 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1031 /* assumes h->devlock is held */
1033 struct hpsa_scsi_dev_t *sd;
1035 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1038 removed[*nremoved] = h->dev[entry];
1041 for (i = entry; i < h->ndevices-1; i++)
1042 h->dev[i] = h->dev[i+1];
1044 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1045 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1049 #define SCSI3ADDR_EQ(a, b) ( \
1050 (a)[7] == (b)[7] && \
1051 (a)[6] == (b)[6] && \
1052 (a)[5] == (b)[5] && \
1053 (a)[4] == (b)[4] && \
1054 (a)[3] == (b)[3] && \
1055 (a)[2] == (b)[2] && \
1056 (a)[1] == (b)[1] && \
1059 static void fixup_botched_add(struct ctlr_info *h,
1060 struct hpsa_scsi_dev_t *added)
1062 /* called when scsi_add_device fails in order to re-adjust
1063 * h->dev[] to match the mid layer's view.
1065 unsigned long flags;
1068 spin_lock_irqsave(&h->lock, flags);
1069 for (i = 0; i < h->ndevices; i++) {
1070 if (h->dev[i] == added) {
1071 for (j = i; j < h->ndevices-1; j++)
1072 h->dev[j] = h->dev[j+1];
1077 spin_unlock_irqrestore(&h->lock, flags);
1081 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1082 struct hpsa_scsi_dev_t *dev2)
1084 /* we compare everything except lun and target as these
1085 * are not yet assigned. Compare parts likely
1088 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1089 sizeof(dev1->scsi3addr)) != 0)
1091 if (memcmp(dev1->device_id, dev2->device_id,
1092 sizeof(dev1->device_id)) != 0)
1094 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1096 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1098 if (dev1->devtype != dev2->devtype)
1100 if (dev1->bus != dev2->bus)
1105 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1106 struct hpsa_scsi_dev_t *dev2)
1108 /* Device attributes that can change, but don't mean
1109 * that the device is a different device, nor that the OS
1110 * needs to be told anything about the change.
1112 if (dev1->raid_level != dev2->raid_level)
1114 if (dev1->offload_config != dev2->offload_config)
1116 if (dev1->offload_enabled != dev2->offload_enabled)
1121 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1122 * and return needle location in *index. If scsi3addr matches, but not
1123 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1124 * location in *index.
1125 * In the case of a minor device attribute change, such as RAID level, just
1126 * return DEVICE_UPDATED, along with the updated device's location in index.
1127 * If needle not found, return DEVICE_NOT_FOUND.
1129 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1130 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1134 #define DEVICE_NOT_FOUND 0
1135 #define DEVICE_CHANGED 1
1136 #define DEVICE_SAME 2
1137 #define DEVICE_UPDATED 3
1138 for (i = 0; i < haystack_size; i++) {
1139 if (haystack[i] == NULL) /* previously removed. */
1141 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1143 if (device_is_the_same(needle, haystack[i])) {
1144 if (device_updated(needle, haystack[i]))
1145 return DEVICE_UPDATED;
1148 /* Keep offline devices offline */
1149 if (needle->volume_offline)
1150 return DEVICE_NOT_FOUND;
1151 return DEVICE_CHANGED;
1156 return DEVICE_NOT_FOUND;
1159 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1160 unsigned char scsi3addr[])
1162 struct offline_device_entry *device;
1163 unsigned long flags;
1165 /* Check to see if device is already on the list */
1166 spin_lock_irqsave(&h->offline_device_lock, flags);
1167 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1168 if (memcmp(device->scsi3addr, scsi3addr,
1169 sizeof(device->scsi3addr)) == 0) {
1170 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1174 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1176 /* Device is not on the list, add it. */
1177 device = kmalloc(sizeof(*device), GFP_KERNEL);
1179 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1182 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1183 spin_lock_irqsave(&h->offline_device_lock, flags);
1184 list_add_tail(&device->offline_list, &h->offline_device_list);
1185 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1188 /* Print a message explaining various offline volume states */
1189 static void hpsa_show_volume_status(struct ctlr_info *h,
1190 struct hpsa_scsi_dev_t *sd)
1192 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1193 dev_info(&h->pdev->dev,
1194 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1195 h->scsi_host->host_no,
1196 sd->bus, sd->target, sd->lun);
1197 switch (sd->volume_offline) {
1200 case HPSA_LV_UNDERGOING_ERASE:
1201 dev_info(&h->pdev->dev,
1202 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1203 h->scsi_host->host_no,
1204 sd->bus, sd->target, sd->lun);
1206 case HPSA_LV_UNDERGOING_RPI:
1207 dev_info(&h->pdev->dev,
1208 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1209 h->scsi_host->host_no,
1210 sd->bus, sd->target, sd->lun);
1212 case HPSA_LV_PENDING_RPI:
1213 dev_info(&h->pdev->dev,
1214 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1215 h->scsi_host->host_no,
1216 sd->bus, sd->target, sd->lun);
1218 case HPSA_LV_ENCRYPTED_NO_KEY:
1219 dev_info(&h->pdev->dev,
1220 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1221 h->scsi_host->host_no,
1222 sd->bus, sd->target, sd->lun);
1224 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1225 dev_info(&h->pdev->dev,
1226 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1227 h->scsi_host->host_no,
1228 sd->bus, sd->target, sd->lun);
1230 case HPSA_LV_UNDERGOING_ENCRYPTION:
1231 dev_info(&h->pdev->dev,
1232 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1233 h->scsi_host->host_no,
1234 sd->bus, sd->target, sd->lun);
1236 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1237 dev_info(&h->pdev->dev,
1238 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1239 h->scsi_host->host_no,
1240 sd->bus, sd->target, sd->lun);
1242 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1243 dev_info(&h->pdev->dev,
1244 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1245 h->scsi_host->host_no,
1246 sd->bus, sd->target, sd->lun);
1248 case HPSA_LV_PENDING_ENCRYPTION:
1249 dev_info(&h->pdev->dev,
1250 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1251 h->scsi_host->host_no,
1252 sd->bus, sd->target, sd->lun);
1254 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1255 dev_info(&h->pdev->dev,
1256 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1257 h->scsi_host->host_no,
1258 sd->bus, sd->target, sd->lun);
1263 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1264 struct hpsa_scsi_dev_t *sd[], int nsds)
1266 /* sd contains scsi3 addresses and devtypes, and inquiry
1267 * data. This function takes what's in sd to be the current
1268 * reality and updates h->dev[] to reflect that reality.
1270 int i, entry, device_change, changes = 0;
1271 struct hpsa_scsi_dev_t *csd;
1272 unsigned long flags;
1273 struct hpsa_scsi_dev_t **added, **removed;
1274 int nadded, nremoved;
1275 struct Scsi_Host *sh = NULL;
1277 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1278 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1280 if (!added || !removed) {
1281 dev_warn(&h->pdev->dev, "out of memory in "
1282 "adjust_hpsa_scsi_table\n");
1286 spin_lock_irqsave(&h->devlock, flags);
1288 /* find any devices in h->dev[] that are not in
1289 * sd[] and remove them from h->dev[], and for any
1290 * devices which have changed, remove the old device
1291 * info and add the new device info.
1292 * If minor device attributes change, just update
1293 * the existing device structure.
1298 while (i < h->ndevices) {
1300 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1301 if (device_change == DEVICE_NOT_FOUND) {
1303 hpsa_scsi_remove_entry(h, hostno, i,
1304 removed, &nremoved);
1305 continue; /* remove ^^^, hence i not incremented */
1306 } else if (device_change == DEVICE_CHANGED) {
1308 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1309 added, &nadded, removed, &nremoved);
1310 /* Set it to NULL to prevent it from being freed
1311 * at the bottom of hpsa_update_scsi_devices()
1314 } else if (device_change == DEVICE_UPDATED) {
1315 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1320 /* Now, make sure every device listed in sd[] is also
1321 * listed in h->dev[], adding them if they aren't found
1324 for (i = 0; i < nsds; i++) {
1325 if (!sd[i]) /* if already added above. */
1328 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1329 * as the SCSI mid-layer does not handle such devices well.
1330 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1331 * at 160Hz, and prevents the system from coming up.
1333 if (sd[i]->volume_offline) {
1334 hpsa_show_volume_status(h, sd[i]);
1335 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1336 h->scsi_host->host_no,
1337 sd[i]->bus, sd[i]->target, sd[i]->lun);
1341 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1342 h->ndevices, &entry);
1343 if (device_change == DEVICE_NOT_FOUND) {
1345 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1346 added, &nadded) != 0)
1348 sd[i] = NULL; /* prevent from being freed later. */
1349 } else if (device_change == DEVICE_CHANGED) {
1350 /* should never happen... */
1352 dev_warn(&h->pdev->dev,
1353 "device unexpectedly changed.\n");
1354 /* but if it does happen, we just ignore that device */
1357 spin_unlock_irqrestore(&h->devlock, flags);
1359 /* Monitor devices which are in one of several NOT READY states to be
1360 * brought online later. This must be done without holding h->devlock,
1361 * so don't touch h->dev[]
1363 for (i = 0; i < nsds; i++) {
1364 if (!sd[i]) /* if already added above. */
1366 if (sd[i]->volume_offline)
1367 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1370 /* Don't notify scsi mid layer of any changes the first time through
1371 * (or if there are no changes) scsi_scan_host will do it later the
1372 * first time through.
1374 if (hostno == -1 || !changes)
1378 /* Notify scsi mid layer of any removed devices */
1379 for (i = 0; i < nremoved; i++) {
1380 struct scsi_device *sdev =
1381 scsi_device_lookup(sh, removed[i]->bus,
1382 removed[i]->target, removed[i]->lun);
1384 scsi_remove_device(sdev);
1385 scsi_device_put(sdev);
1387 /* We don't expect to get here.
1388 * future cmds to this device will get selection
1389 * timeout as if the device was gone.
1391 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1392 " for removal.", hostno, removed[i]->bus,
1393 removed[i]->target, removed[i]->lun);
1399 /* Notify scsi mid layer of any added devices */
1400 for (i = 0; i < nadded; i++) {
1401 if (scsi_add_device(sh, added[i]->bus,
1402 added[i]->target, added[i]->lun) == 0)
1404 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1405 "device not added.\n", hostno, added[i]->bus,
1406 added[i]->target, added[i]->lun);
1407 /* now we have to remove it from h->dev,
1408 * since it didn't get added to scsi mid layer
1410 fixup_botched_add(h, added[i]);
1419 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1420 * Assume's h->devlock is held.
1422 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1423 int bus, int target, int lun)
1426 struct hpsa_scsi_dev_t *sd;
1428 for (i = 0; i < h->ndevices; i++) {
1430 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1436 /* link sdev->hostdata to our per-device structure. */
1437 static int hpsa_slave_alloc(struct scsi_device *sdev)
1439 struct hpsa_scsi_dev_t *sd;
1440 unsigned long flags;
1441 struct ctlr_info *h;
1443 h = sdev_to_hba(sdev);
1444 spin_lock_irqsave(&h->devlock, flags);
1445 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1446 sdev_id(sdev), sdev->lun);
1448 sdev->hostdata = sd;
1449 spin_unlock_irqrestore(&h->devlock, flags);
1453 static void hpsa_slave_destroy(struct scsi_device *sdev)
1455 /* nothing to do. */
1458 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1462 if (!h->cmd_sg_list)
1464 for (i = 0; i < h->nr_cmds; i++) {
1465 kfree(h->cmd_sg_list[i]);
1466 h->cmd_sg_list[i] = NULL;
1468 kfree(h->cmd_sg_list);
1469 h->cmd_sg_list = NULL;
1472 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1476 if (h->chainsize <= 0)
1479 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1481 if (!h->cmd_sg_list)
1483 for (i = 0; i < h->nr_cmds; i++) {
1484 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1485 h->chainsize, GFP_KERNEL);
1486 if (!h->cmd_sg_list[i])
1492 hpsa_free_sg_chain_blocks(h);
1496 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1497 struct CommandList *c)
1499 struct SGDescriptor *chain_sg, *chain_block;
1502 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1503 chain_block = h->cmd_sg_list[c->cmdindex];
1504 chain_sg->Ext = HPSA_SG_CHAIN;
1505 chain_sg->Len = sizeof(*chain_sg) *
1506 (c->Header.SGTotal - h->max_cmd_sg_entries);
1507 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1509 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1510 /* prevent subsequent unmapping */
1511 chain_sg->Addr.lower = 0;
1512 chain_sg->Addr.upper = 0;
1515 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1516 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1520 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1521 struct CommandList *c)
1523 struct SGDescriptor *chain_sg;
1524 union u64bit temp64;
1526 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1529 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1530 temp64.val32.lower = chain_sg->Addr.lower;
1531 temp64.val32.upper = chain_sg->Addr.upper;
1532 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1536 /* Decode the various types of errors on ioaccel2 path.
1537 * Return 1 for any error that should generate a RAID path retry.
1538 * Return 0 for errors that don't require a RAID path retry.
1540 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1541 struct CommandList *c,
1542 struct scsi_cmnd *cmd,
1543 struct io_accel2_cmd *c2)
1548 switch (c2->error_data.serv_response) {
1549 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1550 switch (c2->error_data.status) {
1551 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1553 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1554 dev_warn(&h->pdev->dev,
1555 "%s: task complete with check condition.\n",
1556 "HP SSD Smart Path");
1557 if (c2->error_data.data_present !=
1558 IOACCEL2_SENSE_DATA_PRESENT)
1560 /* copy the sense data */
1561 data_len = c2->error_data.sense_data_len;
1562 if (data_len > SCSI_SENSE_BUFFERSIZE)
1563 data_len = SCSI_SENSE_BUFFERSIZE;
1564 if (data_len > sizeof(c2->error_data.sense_data_buff))
1566 sizeof(c2->error_data.sense_data_buff);
1567 memcpy(cmd->sense_buffer,
1568 c2->error_data.sense_data_buff, data_len);
1569 cmd->result |= SAM_STAT_CHECK_CONDITION;
1572 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1573 dev_warn(&h->pdev->dev,
1574 "%s: task complete with BUSY status.\n",
1575 "HP SSD Smart Path");
1578 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1579 dev_warn(&h->pdev->dev,
1580 "%s: task complete with reservation conflict.\n",
1581 "HP SSD Smart Path");
1584 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1585 /* Make scsi midlayer do unlimited retries */
1586 cmd->result = DID_IMM_RETRY << 16;
1588 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1589 dev_warn(&h->pdev->dev,
1590 "%s: task complete with aborted status.\n",
1591 "HP SSD Smart Path");
1595 dev_warn(&h->pdev->dev,
1596 "%s: task complete with unrecognized status: 0x%02x\n",
1597 "HP SSD Smart Path", c2->error_data.status);
1602 case IOACCEL2_SERV_RESPONSE_FAILURE:
1603 /* don't expect to get here. */
1604 dev_warn(&h->pdev->dev,
1605 "unexpected delivery or target failure, status = 0x%02x\n",
1606 c2->error_data.status);
1609 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1611 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1613 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1614 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1617 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1618 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1621 dev_warn(&h->pdev->dev,
1622 "%s: Unrecognized server response: 0x%02x\n",
1623 "HP SSD Smart Path",
1624 c2->error_data.serv_response);
1629 return retry; /* retry on raid path? */
1632 static void process_ioaccel2_completion(struct ctlr_info *h,
1633 struct CommandList *c, struct scsi_cmnd *cmd,
1634 struct hpsa_scsi_dev_t *dev)
1636 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1639 /* check for good status */
1640 if (likely(c2->error_data.serv_response == 0 &&
1641 c2->error_data.status == 0)) {
1643 cmd->scsi_done(cmd);
1647 /* Any RAID offload error results in retry which will use
1648 * the normal I/O path so the controller can handle whatever's
1651 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1652 c2->error_data.serv_response ==
1653 IOACCEL2_SERV_RESPONSE_FAILURE) {
1654 if (c2->error_data.status ==
1655 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1656 dev_warn(&h->pdev->dev,
1657 "%s: Path is unavailable, retrying on standard path.\n",
1658 "HP SSD Smart Path");
1660 dev_warn(&h->pdev->dev,
1661 "%s: Error 0x%02x, retrying on standard path.\n",
1662 "HP SSD Smart Path", c2->error_data.status);
1664 dev->offload_enabled = 0;
1665 h->drv_req_rescan = 1; /* schedule controller for a rescan */
1666 cmd->result = DID_SOFT_ERROR << 16;
1668 cmd->scsi_done(cmd);
1671 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1672 /* If error found, disable Smart Path, schedule a rescan,
1673 * and force a retry on the standard path.
1676 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1677 "HP SSD Smart Path");
1678 dev->offload_enabled = 0; /* Disable Smart Path */
1679 h->drv_req_rescan = 1; /* schedule controller rescan */
1680 cmd->result = DID_SOFT_ERROR << 16;
1683 cmd->scsi_done(cmd);
1686 static void complete_scsi_command(struct CommandList *cp)
1688 struct scsi_cmnd *cmd;
1689 struct ctlr_info *h;
1690 struct ErrorInfo *ei;
1691 struct hpsa_scsi_dev_t *dev;
1693 unsigned char sense_key;
1694 unsigned char asc; /* additional sense code */
1695 unsigned char ascq; /* additional sense code qualifier */
1696 unsigned long sense_data_size;
1699 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1701 dev = cmd->device->hostdata;
1703 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1704 if ((cp->cmd_type == CMD_SCSI) &&
1705 (cp->Header.SGTotal > h->max_cmd_sg_entries))
1706 hpsa_unmap_sg_chain_block(h, cp);
1708 cmd->result = (DID_OK << 16); /* host byte */
1709 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1711 if (cp->cmd_type == CMD_IOACCEL2)
1712 return process_ioaccel2_completion(h, cp, cmd, dev);
1714 cmd->result |= ei->ScsiStatus;
1716 /* copy the sense data whether we need to or not. */
1717 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1718 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1720 sense_data_size = sizeof(ei->SenseInfo);
1721 if (ei->SenseLen < sense_data_size)
1722 sense_data_size = ei->SenseLen;
1724 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1725 scsi_set_resid(cmd, ei->ResidualCnt);
1727 if (ei->CommandStatus == 0) {
1729 cmd->scsi_done(cmd);
1733 /* For I/O accelerator commands, copy over some fields to the normal
1734 * CISS header used below for error handling.
1736 if (cp->cmd_type == CMD_IOACCEL1) {
1737 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1738 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1739 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1740 cp->Header.Tag.lower = c->Tag.lower;
1741 cp->Header.Tag.upper = c->Tag.upper;
1742 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1743 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1745 /* Any RAID offload error results in retry which will use
1746 * the normal I/O path so the controller can handle whatever's
1749 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1750 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1751 dev->offload_enabled = 0;
1752 cmd->result = DID_SOFT_ERROR << 16;
1754 cmd->scsi_done(cmd);
1759 /* an error has occurred */
1760 switch (ei->CommandStatus) {
1762 case CMD_TARGET_STATUS:
1763 if (ei->ScsiStatus) {
1765 sense_key = 0xf & ei->SenseInfo[2];
1766 /* Get additional sense code */
1767 asc = ei->SenseInfo[12];
1768 /* Get addition sense code qualifier */
1769 ascq = ei->SenseInfo[13];
1772 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1773 if (check_for_unit_attention(h, cp))
1775 if (sense_key == ILLEGAL_REQUEST) {
1777 * SCSI REPORT_LUNS is commonly unsupported on
1778 * Smart Array. Suppress noisy complaint.
1780 if (cp->Request.CDB[0] == REPORT_LUNS)
1783 /* If ASC/ASCQ indicate Logical Unit
1784 * Not Supported condition,
1786 if ((asc == 0x25) && (ascq == 0x0)) {
1787 dev_warn(&h->pdev->dev, "cp %p "
1788 "has check condition\n", cp);
1793 if (sense_key == NOT_READY) {
1794 /* If Sense is Not Ready, Logical Unit
1795 * Not ready, Manual Intervention
1798 if ((asc == 0x04) && (ascq == 0x03)) {
1799 dev_warn(&h->pdev->dev, "cp %p "
1800 "has check condition: unit "
1801 "not ready, manual "
1802 "intervention required\n", cp);
1806 if (sense_key == ABORTED_COMMAND) {
1807 /* Aborted command is retryable */
1808 dev_warn(&h->pdev->dev, "cp %p "
1809 "has check condition: aborted command: "
1810 "ASC: 0x%x, ASCQ: 0x%x\n",
1812 cmd->result |= DID_SOFT_ERROR << 16;
1815 /* Must be some other type of check condition */
1816 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1818 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1819 "Returning result: 0x%x, "
1820 "cmd=[%02x %02x %02x %02x %02x "
1821 "%02x %02x %02x %02x %02x %02x "
1822 "%02x %02x %02x %02x %02x]\n",
1823 cp, sense_key, asc, ascq,
1825 cmd->cmnd[0], cmd->cmnd[1],
1826 cmd->cmnd[2], cmd->cmnd[3],
1827 cmd->cmnd[4], cmd->cmnd[5],
1828 cmd->cmnd[6], cmd->cmnd[7],
1829 cmd->cmnd[8], cmd->cmnd[9],
1830 cmd->cmnd[10], cmd->cmnd[11],
1831 cmd->cmnd[12], cmd->cmnd[13],
1832 cmd->cmnd[14], cmd->cmnd[15]);
1837 /* Problem was not a check condition
1838 * Pass it up to the upper layers...
1840 if (ei->ScsiStatus) {
1841 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1842 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1843 "Returning result: 0x%x\n",
1845 sense_key, asc, ascq,
1847 } else { /* scsi status is zero??? How??? */
1848 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1849 "Returning no connection.\n", cp),
1851 /* Ordinarily, this case should never happen,
1852 * but there is a bug in some released firmware
1853 * revisions that allows it to happen if, for
1854 * example, a 4100 backplane loses power and
1855 * the tape drive is in it. We assume that
1856 * it's a fatal error of some kind because we
1857 * can't show that it wasn't. We will make it
1858 * look like selection timeout since that is
1859 * the most common reason for this to occur,
1860 * and it's severe enough.
1863 cmd->result = DID_NO_CONNECT << 16;
1867 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1869 case CMD_DATA_OVERRUN:
1870 dev_warn(&h->pdev->dev, "cp %p has"
1871 " completed with data overrun "
1875 /* print_bytes(cp, sizeof(*cp), 1, 0);
1877 /* We get CMD_INVALID if you address a non-existent device
1878 * instead of a selection timeout (no response). You will
1879 * see this if you yank out a drive, then try to access it.
1880 * This is kind of a shame because it means that any other
1881 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1882 * missing target. */
1883 cmd->result = DID_NO_CONNECT << 16;
1886 case CMD_PROTOCOL_ERR:
1887 cmd->result = DID_ERROR << 16;
1888 dev_warn(&h->pdev->dev, "cp %p has "
1889 "protocol error\n", cp);
1891 case CMD_HARDWARE_ERR:
1892 cmd->result = DID_ERROR << 16;
1893 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1895 case CMD_CONNECTION_LOST:
1896 cmd->result = DID_ERROR << 16;
1897 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1900 cmd->result = DID_ABORT << 16;
1901 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1902 cp, ei->ScsiStatus);
1904 case CMD_ABORT_FAILED:
1905 cmd->result = DID_ERROR << 16;
1906 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1908 case CMD_UNSOLICITED_ABORT:
1909 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1910 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1914 cmd->result = DID_TIME_OUT << 16;
1915 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1917 case CMD_UNABORTABLE:
1918 cmd->result = DID_ERROR << 16;
1919 dev_warn(&h->pdev->dev, "Command unabortable\n");
1921 case CMD_IOACCEL_DISABLED:
1922 /* This only handles the direct pass-through case since RAID
1923 * offload is handled above. Just attempt a retry.
1925 cmd->result = DID_SOFT_ERROR << 16;
1926 dev_warn(&h->pdev->dev,
1927 "cp %p had HP SSD Smart Path error\n", cp);
1930 cmd->result = DID_ERROR << 16;
1931 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1932 cp, ei->CommandStatus);
1935 cmd->scsi_done(cmd);
1938 static void hpsa_pci_unmap(struct pci_dev *pdev,
1939 struct CommandList *c, int sg_used, int data_direction)
1942 union u64bit addr64;
1944 for (i = 0; i < sg_used; i++) {
1945 addr64.val32.lower = c->SG[i].Addr.lower;
1946 addr64.val32.upper = c->SG[i].Addr.upper;
1947 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1952 static int hpsa_map_one(struct pci_dev *pdev,
1953 struct CommandList *cp,
1960 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1961 cp->Header.SGList = 0;
1962 cp->Header.SGTotal = 0;
1966 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1967 if (dma_mapping_error(&pdev->dev, addr64)) {
1968 /* Prevent subsequent unmap of something never mapped */
1969 cp->Header.SGList = 0;
1970 cp->Header.SGTotal = 0;
1973 cp->SG[0].Addr.lower =
1974 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1975 cp->SG[0].Addr.upper =
1976 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1977 cp->SG[0].Len = buflen;
1978 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1979 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1980 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1984 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1985 struct CommandList *c)
1987 DECLARE_COMPLETION_ONSTACK(wait);
1990 enqueue_cmd_and_start_io(h, c);
1991 wait_for_completion(&wait);
1994 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1995 struct CommandList *c)
1997 unsigned long flags;
1999 /* If controller lockup detected, fake a hardware error. */
2000 spin_lock_irqsave(&h->lock, flags);
2001 if (unlikely(h->lockup_detected)) {
2002 spin_unlock_irqrestore(&h->lock, flags);
2003 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2005 spin_unlock_irqrestore(&h->lock, flags);
2006 hpsa_scsi_do_simple_cmd_core(h, c);
2010 #define MAX_DRIVER_CMD_RETRIES 25
2011 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2012 struct CommandList *c, int data_direction)
2014 int backoff_time = 10, retry_count = 0;
2017 memset(c->err_info, 0, sizeof(*c->err_info));
2018 hpsa_scsi_do_simple_cmd_core(h, c);
2020 if (retry_count > 3) {
2021 msleep(backoff_time);
2022 if (backoff_time < 1000)
2025 } while ((check_for_unit_attention(h, c) ||
2026 check_for_busy(h, c)) &&
2027 retry_count <= MAX_DRIVER_CMD_RETRIES);
2028 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2031 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2032 struct CommandList *c)
2034 const u8 *cdb = c->Request.CDB;
2035 const u8 *lun = c->Header.LUN.LunAddrBytes;
2037 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2038 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2039 txt, lun[0], lun[1], lun[2], lun[3],
2040 lun[4], lun[5], lun[6], lun[7],
2041 cdb[0], cdb[1], cdb[2], cdb[3],
2042 cdb[4], cdb[5], cdb[6], cdb[7],
2043 cdb[8], cdb[9], cdb[10], cdb[11],
2044 cdb[12], cdb[13], cdb[14], cdb[15]);
2047 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2048 struct CommandList *cp)
2050 const struct ErrorInfo *ei = cp->err_info;
2051 struct device *d = &cp->h->pdev->dev;
2052 const u8 *sd = ei->SenseInfo;
2054 switch (ei->CommandStatus) {
2055 case CMD_TARGET_STATUS:
2056 hpsa_print_cmd(h, "SCSI status", cp);
2057 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2058 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2059 sd[2] & 0x0f, sd[12], sd[13]);
2061 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2062 if (ei->ScsiStatus == 0)
2063 dev_warn(d, "SCSI status is abnormally zero. "
2064 "(probably indicates selection timeout "
2065 "reported incorrectly due to a known "
2066 "firmware bug, circa July, 2001.)\n");
2068 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2070 case CMD_DATA_OVERRUN:
2071 hpsa_print_cmd(h, "overrun condition", cp);
2074 /* controller unfortunately reports SCSI passthru's
2075 * to non-existent targets as invalid commands.
2077 hpsa_print_cmd(h, "invalid command", cp);
2078 dev_warn(d, "probably means device no longer present\n");
2081 case CMD_PROTOCOL_ERR:
2082 hpsa_print_cmd(h, "protocol error", cp);
2084 case CMD_HARDWARE_ERR:
2085 hpsa_print_cmd(h, "hardware error", cp);
2087 case CMD_CONNECTION_LOST:
2088 hpsa_print_cmd(h, "connection lost", cp);
2091 hpsa_print_cmd(h, "aborted", cp);
2093 case CMD_ABORT_FAILED:
2094 hpsa_print_cmd(h, "abort failed", cp);
2096 case CMD_UNSOLICITED_ABORT:
2097 hpsa_print_cmd(h, "unsolicited abort", cp);
2100 hpsa_print_cmd(h, "timed out", cp);
2102 case CMD_UNABORTABLE:
2103 hpsa_print_cmd(h, "unabortable", cp);
2106 hpsa_print_cmd(h, "unknown status", cp);
2107 dev_warn(d, "Unknown command status %x\n",
2112 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2113 u16 page, unsigned char *buf,
2114 unsigned char bufsize)
2117 struct CommandList *c;
2118 struct ErrorInfo *ei;
2120 c = cmd_special_alloc(h);
2122 if (c == NULL) { /* trouble... */
2123 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2127 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2128 page, scsi3addr, TYPE_CMD)) {
2132 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2134 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2135 hpsa_scsi_interpret_error(h, c);
2139 cmd_special_free(h, c);
2143 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2144 unsigned char *scsi3addr, unsigned char page,
2145 struct bmic_controller_parameters *buf, size_t bufsize)
2148 struct CommandList *c;
2149 struct ErrorInfo *ei;
2151 c = cmd_special_alloc(h);
2153 if (c == NULL) { /* trouble... */
2154 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2158 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2159 page, scsi3addr, TYPE_CMD)) {
2163 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2165 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2166 hpsa_scsi_interpret_error(h, c);
2170 cmd_special_free(h, c);
2174 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2178 struct CommandList *c;
2179 struct ErrorInfo *ei;
2181 c = cmd_special_alloc(h);
2183 if (c == NULL) { /* trouble... */
2184 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2188 /* fill_cmd can't fail here, no data buffer to map. */
2189 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2190 scsi3addr, TYPE_MSG);
2191 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2192 hpsa_scsi_do_simple_cmd_core(h, c);
2193 /* no unmap needed here because no data xfer. */
2196 if (ei->CommandStatus != 0) {
2197 hpsa_scsi_interpret_error(h, c);
2200 cmd_special_free(h, c);
2204 static void hpsa_get_raid_level(struct ctlr_info *h,
2205 unsigned char *scsi3addr, unsigned char *raid_level)
2210 *raid_level = RAID_UNKNOWN;
2211 buf = kzalloc(64, GFP_KERNEL);
2214 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2216 *raid_level = buf[8];
2217 if (*raid_level > RAID_UNKNOWN)
2218 *raid_level = RAID_UNKNOWN;
2223 #define HPSA_MAP_DEBUG
2224 #ifdef HPSA_MAP_DEBUG
2225 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2226 struct raid_map_data *map_buff)
2228 struct raid_map_disk_data *dd = &map_buff->data[0];
2230 u16 map_cnt, row_cnt, disks_per_row;
2235 /* Show details only if debugging has been activated. */
2236 if (h->raid_offload_debug < 2)
2239 dev_info(&h->pdev->dev, "structure_size = %u\n",
2240 le32_to_cpu(map_buff->structure_size));
2241 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2242 le32_to_cpu(map_buff->volume_blk_size));
2243 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2244 le64_to_cpu(map_buff->volume_blk_cnt));
2245 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2246 map_buff->phys_blk_shift);
2247 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2248 map_buff->parity_rotation_shift);
2249 dev_info(&h->pdev->dev, "strip_size = %u\n",
2250 le16_to_cpu(map_buff->strip_size));
2251 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2252 le64_to_cpu(map_buff->disk_starting_blk));
2253 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2254 le64_to_cpu(map_buff->disk_blk_cnt));
2255 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2256 le16_to_cpu(map_buff->data_disks_per_row));
2257 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2258 le16_to_cpu(map_buff->metadata_disks_per_row));
2259 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2260 le16_to_cpu(map_buff->row_cnt));
2261 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2262 le16_to_cpu(map_buff->layout_map_count));
2263 dev_info(&h->pdev->dev, "flags = %u\n",
2264 le16_to_cpu(map_buff->flags));
2265 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2266 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2268 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2269 dev_info(&h->pdev->dev, "dekindex = %u\n",
2270 le16_to_cpu(map_buff->dekindex));
2272 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2273 for (map = 0; map < map_cnt; map++) {
2274 dev_info(&h->pdev->dev, "Map%u:\n", map);
2275 row_cnt = le16_to_cpu(map_buff->row_cnt);
2276 for (row = 0; row < row_cnt; row++) {
2277 dev_info(&h->pdev->dev, " Row%u:\n", row);
2279 le16_to_cpu(map_buff->data_disks_per_row);
2280 for (col = 0; col < disks_per_row; col++, dd++)
2281 dev_info(&h->pdev->dev,
2282 " D%02u: h=0x%04x xor=%u,%u\n",
2283 col, dd->ioaccel_handle,
2284 dd->xor_mult[0], dd->xor_mult[1]);
2286 le16_to_cpu(map_buff->metadata_disks_per_row);
2287 for (col = 0; col < disks_per_row; col++, dd++)
2288 dev_info(&h->pdev->dev,
2289 " M%02u: h=0x%04x xor=%u,%u\n",
2290 col, dd->ioaccel_handle,
2291 dd->xor_mult[0], dd->xor_mult[1]);
2296 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2297 __attribute__((unused)) int rc,
2298 __attribute__((unused)) struct raid_map_data *map_buff)
2303 static int hpsa_get_raid_map(struct ctlr_info *h,
2304 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2307 struct CommandList *c;
2308 struct ErrorInfo *ei;
2310 c = cmd_special_alloc(h);
2312 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2315 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2316 sizeof(this_device->raid_map), 0,
2317 scsi3addr, TYPE_CMD)) {
2318 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2319 cmd_special_free(h, c);
2322 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2324 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2325 hpsa_scsi_interpret_error(h, c);
2326 cmd_special_free(h, c);
2329 cmd_special_free(h, c);
2331 /* @todo in the future, dynamically allocate RAID map memory */
2332 if (le32_to_cpu(this_device->raid_map.structure_size) >
2333 sizeof(this_device->raid_map)) {
2334 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2337 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2341 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2342 unsigned char scsi3addr[], u8 page)
2347 unsigned char *buf, bufsize;
2349 buf = kzalloc(256, GFP_KERNEL);
2353 /* Get the size of the page list first */
2354 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2355 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2356 buf, HPSA_VPD_HEADER_SZ);
2358 goto exit_unsupported;
2360 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2361 bufsize = pages + HPSA_VPD_HEADER_SZ;
2365 /* Get the whole VPD page list */
2366 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2367 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2370 goto exit_unsupported;
2373 for (i = 1; i <= pages; i++)
2374 if (buf[3 + i] == page)
2375 goto exit_supported;
2384 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2385 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2391 this_device->offload_config = 0;
2392 this_device->offload_enabled = 0;
2394 buf = kzalloc(64, GFP_KERNEL);
2397 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2399 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2400 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2404 #define IOACCEL_STATUS_BYTE 4
2405 #define OFFLOAD_CONFIGURED_BIT 0x01
2406 #define OFFLOAD_ENABLED_BIT 0x02
2407 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2408 this_device->offload_config =
2409 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2410 if (this_device->offload_config) {
2411 this_device->offload_enabled =
2412 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2413 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2414 this_device->offload_enabled = 0;
2421 /* Get the device id from inquiry page 0x83 */
2422 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2423 unsigned char *device_id, int buflen)
2430 buf = kzalloc(64, GFP_KERNEL);
2433 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2435 memcpy(device_id, &buf[8], buflen);
2440 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2441 struct ReportLUNdata *buf, int bufsize,
2442 int extended_response)
2445 struct CommandList *c;
2446 unsigned char scsi3addr[8];
2447 struct ErrorInfo *ei;
2449 c = cmd_special_alloc(h);
2450 if (c == NULL) { /* trouble... */
2451 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2454 /* address the controller */
2455 memset(scsi3addr, 0, sizeof(scsi3addr));
2456 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2457 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2461 if (extended_response)
2462 c->Request.CDB[1] = extended_response;
2463 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2465 if (ei->CommandStatus != 0 &&
2466 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2467 hpsa_scsi_interpret_error(h, c);
2470 if (buf->extended_response_flag != extended_response) {
2471 dev_err(&h->pdev->dev,
2472 "report luns requested format %u, got %u\n",
2474 buf->extended_response_flag);
2479 cmd_special_free(h, c);
2483 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2484 struct ReportLUNdata *buf,
2485 int bufsize, int extended_response)
2487 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2490 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2491 struct ReportLUNdata *buf, int bufsize)
2493 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2496 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2497 int bus, int target, int lun)
2500 device->target = target;
2504 /* Use VPD inquiry to get details of volume status */
2505 static int hpsa_get_volume_status(struct ctlr_info *h,
2506 unsigned char scsi3addr[])
2513 buf = kzalloc(64, GFP_KERNEL);
2515 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2517 /* Does controller have VPD for logical volume status? */
2518 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
2519 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
2523 /* Get the size of the VPD return buffer */
2524 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2525 buf, HPSA_VPD_HEADER_SZ);
2527 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2532 /* Now get the whole VPD buffer */
2533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2534 buf, size + HPSA_VPD_HEADER_SZ);
2536 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2539 status = buf[4]; /* status byte */
2545 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2548 /* Determine offline status of a volume.
2551 * -1 (offline for unknown reasons)
2552 * # (integer code indicating one of several NOT READY states
2553 * describing why a volume is to be kept offline)
2555 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
2556 unsigned char scsi3addr[])
2558 struct CommandList *c;
2559 unsigned char *sense, sense_key, asc, ascq;
2563 #define ASC_LUN_NOT_READY 0x04
2564 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2565 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2570 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2571 hpsa_scsi_do_simple_cmd_core(h, c);
2572 sense = c->err_info->SenseInfo;
2573 sense_key = sense[2];
2576 cmd_status = c->err_info->CommandStatus;
2577 scsi_status = c->err_info->ScsiStatus;
2579 /* Is the volume 'not ready'? */
2580 if (cmd_status != CMD_TARGET_STATUS ||
2581 scsi_status != SAM_STAT_CHECK_CONDITION ||
2582 sense_key != NOT_READY ||
2583 asc != ASC_LUN_NOT_READY) {
2587 /* Determine the reason for not ready state */
2588 ldstat = hpsa_get_volume_status(h, scsi3addr);
2590 /* Keep volume offline in certain cases: */
2592 case HPSA_LV_UNDERGOING_ERASE:
2593 case HPSA_LV_UNDERGOING_RPI:
2594 case HPSA_LV_PENDING_RPI:
2595 case HPSA_LV_ENCRYPTED_NO_KEY:
2596 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2597 case HPSA_LV_UNDERGOING_ENCRYPTION:
2598 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2599 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2601 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2602 /* If VPD status page isn't available,
2603 * use ASC/ASCQ to determine state
2605 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2606 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2615 static int hpsa_update_device_info(struct ctlr_info *h,
2616 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2617 unsigned char *is_OBDR_device)
2620 #define OBDR_SIG_OFFSET 43
2621 #define OBDR_TAPE_SIG "$DR-10"
2622 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2623 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2625 unsigned char *inq_buff;
2626 unsigned char *obdr_sig;
2628 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2632 /* Do an inquiry to the device to see what it is. */
2633 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2634 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2635 /* Inquiry failed (msg printed already) */
2636 dev_err(&h->pdev->dev,
2637 "hpsa_update_device_info: inquiry failed\n");
2641 this_device->devtype = (inq_buff[0] & 0x1f);
2642 memcpy(this_device->scsi3addr, scsi3addr, 8);
2643 memcpy(this_device->vendor, &inq_buff[8],
2644 sizeof(this_device->vendor));
2645 memcpy(this_device->model, &inq_buff[16],
2646 sizeof(this_device->model));
2647 memset(this_device->device_id, 0,
2648 sizeof(this_device->device_id));
2649 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2650 sizeof(this_device->device_id));
2652 if (this_device->devtype == TYPE_DISK &&
2653 is_logical_dev_addr_mode(scsi3addr)) {
2654 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2655 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2656 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2657 this_device->volume_offline =
2658 hpsa_volume_offline(h, scsi3addr);
2660 this_device->raid_level = RAID_UNKNOWN;
2661 this_device->offload_config = 0;
2662 this_device->offload_enabled = 0;
2663 this_device->volume_offline = 0;
2666 if (is_OBDR_device) {
2667 /* See if this is a One-Button-Disaster-Recovery device
2668 * by looking for "$DR-10" at offset 43 in inquiry data.
2670 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2671 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2672 strncmp(obdr_sig, OBDR_TAPE_SIG,
2673 OBDR_SIG_LEN) == 0);
2684 static unsigned char *ext_target_model[] = {
2694 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2698 for (i = 0; ext_target_model[i]; i++)
2699 if (strncmp(device->model, ext_target_model[i],
2700 strlen(ext_target_model[i])) == 0)
2705 /* Helper function to assign bus, target, lun mapping of devices.
2706 * Puts non-external target logical volumes on bus 0, external target logical
2707 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2708 * Logical drive target and lun are assigned at this time, but
2709 * physical device lun and target assignment are deferred (assigned
2710 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2712 static void figure_bus_target_lun(struct ctlr_info *h,
2713 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2715 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2717 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2718 /* physical device, target and lun filled in later */
2719 if (is_hba_lunid(lunaddrbytes))
2720 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2722 /* defer target, lun assignment for physical devices */
2723 hpsa_set_bus_target_lun(device, 2, -1, -1);
2726 /* It's a logical device */
2727 if (is_ext_target(h, device)) {
2728 /* external target way, put logicals on bus 1
2729 * and match target/lun numbers box
2730 * reports, other smart array, bus 0, target 0, match lunid
2732 hpsa_set_bus_target_lun(device,
2733 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2736 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2740 * If there is no lun 0 on a target, linux won't find any devices.
2741 * For the external targets (arrays), we have to manually detect the enclosure
2742 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2743 * it for some reason. *tmpdevice is the target we're adding,
2744 * this_device is a pointer into the current element of currentsd[]
2745 * that we're building up in update_scsi_devices(), below.
2746 * lunzerobits is a bitmap that tracks which targets already have a
2748 * Returns 1 if an enclosure was added, 0 if not.
2750 static int add_ext_target_dev(struct ctlr_info *h,
2751 struct hpsa_scsi_dev_t *tmpdevice,
2752 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2753 unsigned long lunzerobits[], int *n_ext_target_devs)
2755 unsigned char scsi3addr[8];
2757 if (test_bit(tmpdevice->target, lunzerobits))
2758 return 0; /* There is already a lun 0 on this target. */
2760 if (!is_logical_dev_addr_mode(lunaddrbytes))
2761 return 0; /* It's the logical targets that may lack lun 0. */
2763 if (!is_ext_target(h, tmpdevice))
2764 return 0; /* Only external target devices have this problem. */
2766 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2769 memset(scsi3addr, 0, 8);
2770 scsi3addr[3] = tmpdevice->target;
2771 if (is_hba_lunid(scsi3addr))
2772 return 0; /* Don't add the RAID controller here. */
2774 if (is_scsi_rev_5(h))
2775 return 0; /* p1210m doesn't need to do this. */
2777 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2778 dev_warn(&h->pdev->dev, "Maximum number of external "
2779 "target devices exceeded. Check your hardware "
2784 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2786 (*n_ext_target_devs)++;
2787 hpsa_set_bus_target_lun(this_device,
2788 tmpdevice->bus, tmpdevice->target, 0);
2789 set_bit(tmpdevice->target, lunzerobits);
2794 * Get address of physical disk used for an ioaccel2 mode command:
2795 * 1. Extract ioaccel2 handle from the command.
2796 * 2. Find a matching ioaccel2 handle from list of physical disks.
2798 * 1 and set scsi3addr to address of matching physical
2799 * 0 if no matching physical disk was found.
2801 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2802 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2804 struct ReportExtendedLUNdata *physicals = NULL;
2805 int responsesize = 24; /* size of physical extended response */
2806 int extended = 2; /* flag forces reporting 'other dev info'. */
2807 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2808 u32 nphysicals = 0; /* number of reported physical devs */
2809 int found = 0; /* found match (1) or not (0) */
2810 u32 find; /* handle we need to match */
2812 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2813 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2814 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2815 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2816 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2818 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2819 return 0; /* no match */
2821 /* point to the ioaccel2 device handle */
2822 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2824 return 0; /* no match */
2826 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2828 return 0; /* no match */
2830 d = scmd->device->hostdata;
2832 return 0; /* no match */
2834 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2835 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2836 find = c2a->scsi_nexus;
2838 if (h->raid_offload_debug > 0)
2839 dev_info(&h->pdev->dev,
2840 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2841 __func__, scsi_nexus,
2842 d->device_id[0], d->device_id[1], d->device_id[2],
2843 d->device_id[3], d->device_id[4], d->device_id[5],
2844 d->device_id[6], d->device_id[7], d->device_id[8],
2845 d->device_id[9], d->device_id[10], d->device_id[11],
2846 d->device_id[12], d->device_id[13], d->device_id[14],
2849 /* Get the list of physical devices */
2850 physicals = kzalloc(reportsize, GFP_KERNEL);
2851 if (physicals == NULL)
2853 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2854 reportsize, extended)) {
2855 dev_err(&h->pdev->dev,
2856 "Can't lookup %s device handle: report physical LUNs failed.\n",
2857 "HP SSD Smart Path");
2861 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2865 /* find ioaccel2 handle in list of physicals: */
2866 for (i = 0; i < nphysicals; i++) {
2867 /* handle is in bytes 28-31 of each lun */
2868 if (memcmp(&((struct ReportExtendedLUNdata *)
2869 physicals)->LUN[i][20], &find, 4) != 0) {
2870 continue; /* didn't match */
2873 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
2874 physicals)->LUN[i][0], 8);
2875 if (h->raid_offload_debug > 0)
2876 dev_info(&h->pdev->dev,
2877 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2879 ((struct ReportExtendedLUNdata *)
2880 physicals)->LUN[i][20],
2881 scsi3addr[0], scsi3addr[1], scsi3addr[2],
2882 scsi3addr[3], scsi3addr[4], scsi3addr[5],
2883 scsi3addr[6], scsi3addr[7]);
2884 break; /* found it */
2895 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2896 * logdev. The number of luns in physdev and logdev are returned in
2897 * *nphysicals and *nlogicals, respectively.
2898 * Returns 0 on success, -1 otherwise.
2900 static int hpsa_gather_lun_info(struct ctlr_info *h,
2902 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2903 struct ReportLUNdata *logdev, u32 *nlogicals)
2905 int physical_entry_size = 8;
2909 /* For I/O accelerator mode we need to read physical device handles */
2910 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2911 h->transMethod & CFGTBL_Trans_io_accel2) {
2912 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2913 physical_entry_size = 24;
2915 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
2917 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2920 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2921 physical_entry_size;
2922 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2923 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2924 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2925 *nphysicals - HPSA_MAX_PHYS_LUN);
2926 *nphysicals = HPSA_MAX_PHYS_LUN;
2928 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2929 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2932 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2933 /* Reject Logicals in excess of our max capability. */
2934 if (*nlogicals > HPSA_MAX_LUN) {
2935 dev_warn(&h->pdev->dev,
2936 "maximum logical LUNs (%d) exceeded. "
2937 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2938 *nlogicals - HPSA_MAX_LUN);
2939 *nlogicals = HPSA_MAX_LUN;
2941 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2942 dev_warn(&h->pdev->dev,
2943 "maximum logical + physical LUNs (%d) exceeded. "
2944 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2945 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2946 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2951 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
2952 int nphysicals, int nlogicals,
2953 struct ReportExtendedLUNdata *physdev_list,
2954 struct ReportLUNdata *logdev_list)
2956 /* Helper function, figure out where the LUN ID info is coming from
2957 * given index i, lists of physical and logical devices, where in
2958 * the list the raid controller is supposed to appear (first or last)
2961 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2962 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2964 if (i == raid_ctlr_position)
2965 return RAID_CTLR_LUNID;
2967 if (i < logicals_start)
2968 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
2970 if (i < last_device)
2971 return &logdev_list->LUN[i - nphysicals -
2972 (raid_ctlr_position == 0)][0];
2977 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2980 int hba_mode_enabled;
2981 struct bmic_controller_parameters *ctlr_params;
2982 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2987 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2988 sizeof(struct bmic_controller_parameters));
2995 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2997 return hba_mode_enabled;
3000 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3002 /* the idea here is we could get notified
3003 * that some devices have changed, so we do a report
3004 * physical luns and report logical luns cmd, and adjust
3005 * our list of devices accordingly.
3007 * The scsi3addr's of devices won't change so long as the
3008 * adapter is not reset. That means we can rescan and
3009 * tell which devices we already know about, vs. new
3010 * devices, vs. disappearing devices.
3012 struct ReportExtendedLUNdata *physdev_list = NULL;
3013 struct ReportLUNdata *logdev_list = NULL;
3016 int physical_mode = 0;
3017 u32 ndev_allocated = 0;
3018 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3020 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3021 int i, n_ext_target_devs, ndevs_to_allocate;
3022 int raid_ctlr_position;
3023 int rescan_hba_mode;
3024 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3026 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3027 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3028 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3029 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3031 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
3032 dev_err(&h->pdev->dev, "out of memory\n");
3035 memset(lunzerobits, 0, sizeof(lunzerobits));
3037 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3038 if (rescan_hba_mode < 0)
3041 if (!h->hba_mode_enabled && rescan_hba_mode)
3042 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3043 else if (h->hba_mode_enabled && !rescan_hba_mode)
3044 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3046 h->hba_mode_enabled = rescan_hba_mode;
3048 if (hpsa_gather_lun_info(h, reportlunsize,
3049 (struct ReportLUNdata *) physdev_list, &nphysicals,
3050 &physical_mode, logdev_list, &nlogicals))
3053 /* We might see up to the maximum number of logical and physical disks
3054 * plus external target devices, and a device for the local RAID
3057 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3059 /* Allocate the per device structures */
3060 for (i = 0; i < ndevs_to_allocate; i++) {
3061 if (i >= HPSA_MAX_DEVICES) {
3062 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3063 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3064 ndevs_to_allocate - HPSA_MAX_DEVICES);
3068 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3069 if (!currentsd[i]) {
3070 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3071 __FILE__, __LINE__);
3077 if (unlikely(is_scsi_rev_5(h)))
3078 raid_ctlr_position = 0;
3080 raid_ctlr_position = nphysicals + nlogicals;
3082 /* adjust our table of devices */
3083 n_ext_target_devs = 0;
3084 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3085 u8 *lunaddrbytes, is_OBDR = 0;
3087 /* Figure out where the LUN ID info is coming from */
3088 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3089 i, nphysicals, nlogicals, physdev_list, logdev_list);
3090 /* skip masked physical devices. */
3091 if (lunaddrbytes[3] & 0xC0 &&
3092 i < nphysicals + (raid_ctlr_position == 0))
3095 /* Get device type, vendor, model, device id */
3096 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3098 continue; /* skip it if we can't talk to it. */
3099 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3100 this_device = currentsd[ncurrent];
3103 * For external target devices, we have to insert a LUN 0 which
3104 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3105 * is nonetheless an enclosure device there. We have to
3106 * present that otherwise linux won't find anything if
3107 * there is no lun 0.
3109 if (add_ext_target_dev(h, tmpdevice, this_device,
3110 lunaddrbytes, lunzerobits,
3111 &n_ext_target_devs)) {
3113 this_device = currentsd[ncurrent];
3116 *this_device = *tmpdevice;
3118 switch (this_device->devtype) {
3120 /* We don't *really* support actual CD-ROM devices,
3121 * just "One Button Disaster Recovery" tape drive
3122 * which temporarily pretends to be a CD-ROM drive.
3123 * So we check that the device is really an OBDR tape
3124 * device by checking for "$DR-10" in bytes 43-48 of
3131 if (h->hba_mode_enabled) {
3132 /* never use raid mapper in HBA mode */
3133 this_device->offload_enabled = 0;
3136 } else if (h->acciopath_status) {
3137 if (i >= nphysicals) {
3147 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3148 memcpy(&this_device->ioaccel_handle,
3150 sizeof(this_device->ioaccel_handle));
3155 case TYPE_MEDIUM_CHANGER:
3159 /* Only present the Smartarray HBA as a RAID controller.
3160 * If it's a RAID controller other than the HBA itself
3161 * (an external RAID controller, MSA500 or similar)
3164 if (!is_hba_lunid(lunaddrbytes))
3171 if (ncurrent >= HPSA_MAX_DEVICES)
3174 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3177 for (i = 0; i < ndev_allocated; i++)
3178 kfree(currentsd[i]);
3180 kfree(physdev_list);
3184 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3185 * dma mapping and fills in the scatter gather entries of the
3188 static int hpsa_scatter_gather(struct ctlr_info *h,
3189 struct CommandList *cp,
3190 struct scsi_cmnd *cmd)
3193 struct scatterlist *sg;
3195 int use_sg, i, sg_index, chained;
3196 struct SGDescriptor *curr_sg;
3198 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3200 use_sg = scsi_dma_map(cmd);
3205 goto sglist_finished;
3210 scsi_for_each_sg(cmd, sg, use_sg, i) {
3211 if (i == h->max_cmd_sg_entries - 1 &&
3212 use_sg > h->max_cmd_sg_entries) {
3214 curr_sg = h->cmd_sg_list[cp->cmdindex];
3217 addr64 = (u64) sg_dma_address(sg);
3218 len = sg_dma_len(sg);
3219 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3220 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3222 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
3226 if (use_sg + chained > h->maxSG)
3227 h->maxSG = use_sg + chained;
3230 cp->Header.SGList = h->max_cmd_sg_entries;
3231 cp->Header.SGTotal = (u16) (use_sg + 1);
3232 if (hpsa_map_sg_chain_block(h, cp)) {
3233 scsi_dma_unmap(cmd);
3241 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3242 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
3246 #define IO_ACCEL_INELIGIBLE (1)
3247 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3253 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3260 if (*cdb_len == 6) {
3261 block = (((u32) cdb[2]) << 8) | cdb[3];
3264 BUG_ON(*cdb_len != 12);
3265 block = (((u32) cdb[2]) << 24) |
3266 (((u32) cdb[3]) << 16) |
3267 (((u32) cdb[4]) << 8) |
3270 (((u32) cdb[6]) << 24) |
3271 (((u32) cdb[7]) << 16) |
3272 (((u32) cdb[8]) << 8) |
3275 if (block_cnt > 0xffff)
3276 return IO_ACCEL_INELIGIBLE;
3278 cdb[0] = is_write ? WRITE_10 : READ_10;
3280 cdb[2] = (u8) (block >> 24);
3281 cdb[3] = (u8) (block >> 16);
3282 cdb[4] = (u8) (block >> 8);
3283 cdb[5] = (u8) (block);
3285 cdb[7] = (u8) (block_cnt >> 8);
3286 cdb[8] = (u8) (block_cnt);
3294 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3295 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3298 struct scsi_cmnd *cmd = c->scsi_cmd;
3299 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3301 unsigned int total_len = 0;
3302 struct scatterlist *sg;
3305 struct SGDescriptor *curr_sg;
3306 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3308 /* TODO: implement chaining support */
3309 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3310 return IO_ACCEL_INELIGIBLE;
3312 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3314 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3315 return IO_ACCEL_INELIGIBLE;
3317 c->cmd_type = CMD_IOACCEL1;
3319 /* Adjust the DMA address to point to the accelerated command buffer */
3320 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3321 (c->cmdindex * sizeof(*cp));
3322 BUG_ON(c->busaddr & 0x0000007F);
3324 use_sg = scsi_dma_map(cmd);
3330 scsi_for_each_sg(cmd, sg, use_sg, i) {
3331 addr64 = (u64) sg_dma_address(sg);
3332 len = sg_dma_len(sg);
3334 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3335 curr_sg->Addr.upper =
3336 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3339 if (i == (scsi_sg_count(cmd) - 1))
3340 curr_sg->Ext = HPSA_SG_LAST;
3342 curr_sg->Ext = 0; /* we are not chaining */
3346 switch (cmd->sc_data_direction) {
3348 control |= IOACCEL1_CONTROL_DATA_OUT;
3350 case DMA_FROM_DEVICE:
3351 control |= IOACCEL1_CONTROL_DATA_IN;
3354 control |= IOACCEL1_CONTROL_NODATAXFER;
3357 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3358 cmd->sc_data_direction);
3363 control |= IOACCEL1_CONTROL_NODATAXFER;
3366 c->Header.SGList = use_sg;
3367 /* Fill out the command structure to submit */
3368 cp->dev_handle = ioaccel_handle & 0xFFFF;
3369 cp->transfer_len = total_len;
3370 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
3371 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
3372 cp->control = control;
3373 memcpy(cp->CDB, cdb, cdb_len);
3374 memcpy(cp->CISS_LUN, scsi3addr, 8);
3375 /* Tag was already set at init time. */
3376 enqueue_cmd_and_start_io(h, c);
3381 * Queue a command directly to a device behind the controller using the
3382 * I/O accelerator path.
3384 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3385 struct CommandList *c)
3387 struct scsi_cmnd *cmd = c->scsi_cmd;
3388 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3390 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3391 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3395 * Set encryption parameters for the ioaccel2 request
3397 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3398 struct CommandList *c, struct io_accel2_cmd *cp)
3400 struct scsi_cmnd *cmd = c->scsi_cmd;
3401 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3402 struct raid_map_data *map = &dev->raid_map;
3405 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3407 /* Are we doing encryption on this device */
3408 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3410 /* Set the data encryption key index. */
3411 cp->dekindex = map->dekindex;
3413 /* Set the encryption enable flag, encoded into direction field. */
3414 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3416 /* Set encryption tweak values based on logical block address
3417 * If block size is 512, tweak value is LBA.
3418 * For other block sizes, tweak is (LBA * block size)/ 512)
3420 switch (cmd->cmnd[0]) {
3421 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3424 if (map->volume_blk_size == 512) {
3426 (((u32) cmd->cmnd[2]) << 8) |
3428 cp->tweak_upper = 0;
3431 (((u64) cmd->cmnd[2]) << 8) |
3433 first_block = (first_block * map->volume_blk_size)/512;
3434 cp->tweak_lower = (u32)first_block;
3435 cp->tweak_upper = (u32)(first_block >> 32);
3440 if (map->volume_blk_size == 512) {
3442 (((u32) cmd->cmnd[2]) << 24) |
3443 (((u32) cmd->cmnd[3]) << 16) |
3444 (((u32) cmd->cmnd[4]) << 8) |
3446 cp->tweak_upper = 0;
3449 (((u64) cmd->cmnd[2]) << 24) |
3450 (((u64) cmd->cmnd[3]) << 16) |
3451 (((u64) cmd->cmnd[4]) << 8) |
3453 first_block = (first_block * map->volume_blk_size)/512;
3454 cp->tweak_lower = (u32)first_block;
3455 cp->tweak_upper = (u32)(first_block >> 32);
3458 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3461 if (map->volume_blk_size == 512) {
3463 (((u32) cmd->cmnd[2]) << 24) |
3464 (((u32) cmd->cmnd[3]) << 16) |
3465 (((u32) cmd->cmnd[4]) << 8) |
3467 cp->tweak_upper = 0;
3470 (((u64) cmd->cmnd[2]) << 24) |
3471 (((u64) cmd->cmnd[3]) << 16) |
3472 (((u64) cmd->cmnd[4]) << 8) |
3474 first_block = (first_block * map->volume_blk_size)/512;
3475 cp->tweak_lower = (u32)first_block;
3476 cp->tweak_upper = (u32)(first_block >> 32);
3481 if (map->volume_blk_size == 512) {
3483 (((u32) cmd->cmnd[6]) << 24) |
3484 (((u32) cmd->cmnd[7]) << 16) |
3485 (((u32) cmd->cmnd[8]) << 8) |
3488 (((u32) cmd->cmnd[2]) << 24) |
3489 (((u32) cmd->cmnd[3]) << 16) |
3490 (((u32) cmd->cmnd[4]) << 8) |
3494 (((u64) cmd->cmnd[2]) << 56) |
3495 (((u64) cmd->cmnd[3]) << 48) |
3496 (((u64) cmd->cmnd[4]) << 40) |
3497 (((u64) cmd->cmnd[5]) << 32) |
3498 (((u64) cmd->cmnd[6]) << 24) |
3499 (((u64) cmd->cmnd[7]) << 16) |
3500 (((u64) cmd->cmnd[8]) << 8) |
3502 first_block = (first_block * map->volume_blk_size)/512;
3503 cp->tweak_lower = (u32)first_block;
3504 cp->tweak_upper = (u32)(first_block >> 32);
3508 dev_err(&h->pdev->dev,
3509 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3516 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3517 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3520 struct scsi_cmnd *cmd = c->scsi_cmd;
3521 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3522 struct ioaccel2_sg_element *curr_sg;
3524 struct scatterlist *sg;
3529 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3530 return IO_ACCEL_INELIGIBLE;
3532 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3533 return IO_ACCEL_INELIGIBLE;
3534 c->cmd_type = CMD_IOACCEL2;
3535 /* Adjust the DMA address to point to the accelerated command buffer */
3536 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3537 (c->cmdindex * sizeof(*cp));
3538 BUG_ON(c->busaddr & 0x0000007F);
3540 memset(cp, 0, sizeof(*cp));
3541 cp->IU_type = IOACCEL2_IU_TYPE;
3543 use_sg = scsi_dma_map(cmd);
3548 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3550 scsi_for_each_sg(cmd, sg, use_sg, i) {
3551 addr64 = (u64) sg_dma_address(sg);
3552 len = sg_dma_len(sg);
3554 curr_sg->address = cpu_to_le64(addr64);
3555 curr_sg->length = cpu_to_le32(len);
3556 curr_sg->reserved[0] = 0;
3557 curr_sg->reserved[1] = 0;
3558 curr_sg->reserved[2] = 0;
3559 curr_sg->chain_indicator = 0;
3563 switch (cmd->sc_data_direction) {
3565 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3566 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3568 case DMA_FROM_DEVICE:
3569 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3570 cp->direction |= IOACCEL2_DIR_DATA_IN;
3573 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3574 cp->direction |= IOACCEL2_DIR_NO_DATA;
3577 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3578 cmd->sc_data_direction);
3583 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3584 cp->direction |= IOACCEL2_DIR_NO_DATA;
3587 /* Set encryption parameters, if necessary */
3588 set_encrypt_ioaccel2(h, c, cp);
3590 cp->scsi_nexus = ioaccel_handle;
3591 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
3593 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3595 /* fill in sg elements */
3596 cp->sg_count = (u8) use_sg;
3598 cp->data_len = cpu_to_le32(total_len);
3599 cp->err_ptr = cpu_to_le64(c->busaddr +
3600 offsetof(struct io_accel2_cmd, error_data));
3601 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3603 enqueue_cmd_and_start_io(h, c);
3608 * Queue a command to the correct I/O accelerator path.
3610 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3611 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3614 if (h->transMethod & CFGTBL_Trans_io_accel1)
3615 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3616 cdb, cdb_len, scsi3addr);
3618 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3619 cdb, cdb_len, scsi3addr);
3622 static void raid_map_helper(struct raid_map_data *map,
3623 int offload_to_mirror, u32 *map_index, u32 *current_group)
3625 if (offload_to_mirror == 0) {
3626 /* use physical disk in the first mirrored group. */
3627 *map_index %= map->data_disks_per_row;
3631 /* determine mirror group that *map_index indicates */
3632 *current_group = *map_index / map->data_disks_per_row;
3633 if (offload_to_mirror == *current_group)
3635 if (*current_group < (map->layout_map_count - 1)) {
3636 /* select map index from next group */
3637 *map_index += map->data_disks_per_row;
3640 /* select map index from first group */
3641 *map_index %= map->data_disks_per_row;
3644 } while (offload_to_mirror != *current_group);
3648 * Attempt to perform offload RAID mapping for a logical volume I/O.
3650 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3651 struct CommandList *c)
3653 struct scsi_cmnd *cmd = c->scsi_cmd;
3654 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3655 struct raid_map_data *map = &dev->raid_map;
3656 struct raid_map_disk_data *dd = &map->data[0];
3659 u64 first_block, last_block;
3662 u64 first_row, last_row;
3663 u32 first_row_offset, last_row_offset;
3664 u32 first_column, last_column;
3665 u64 r0_first_row, r0_last_row;
3666 u32 r5or6_blocks_per_row;
3667 u64 r5or6_first_row, r5or6_last_row;
3668 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3669 u32 r5or6_first_column, r5or6_last_column;
3670 u32 total_disks_per_row;
3672 u32 first_group, last_group, current_group;
3679 #if BITS_PER_LONG == 32
3682 int offload_to_mirror;
3684 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3686 /* check for valid opcode, get LBA and block count */
3687 switch (cmd->cmnd[0]) {
3692 (((u64) cmd->cmnd[2]) << 8) |
3694 block_cnt = cmd->cmnd[4];
3700 (((u64) cmd->cmnd[2]) << 24) |
3701 (((u64) cmd->cmnd[3]) << 16) |
3702 (((u64) cmd->cmnd[4]) << 8) |
3705 (((u32) cmd->cmnd[7]) << 8) |
3712 (((u64) cmd->cmnd[2]) << 24) |
3713 (((u64) cmd->cmnd[3]) << 16) |
3714 (((u64) cmd->cmnd[4]) << 8) |
3717 (((u32) cmd->cmnd[6]) << 24) |
3718 (((u32) cmd->cmnd[7]) << 16) |
3719 (((u32) cmd->cmnd[8]) << 8) |
3726 (((u64) cmd->cmnd[2]) << 56) |
3727 (((u64) cmd->cmnd[3]) << 48) |
3728 (((u64) cmd->cmnd[4]) << 40) |
3729 (((u64) cmd->cmnd[5]) << 32) |
3730 (((u64) cmd->cmnd[6]) << 24) |
3731 (((u64) cmd->cmnd[7]) << 16) |
3732 (((u64) cmd->cmnd[8]) << 8) |
3735 (((u32) cmd->cmnd[10]) << 24) |
3736 (((u32) cmd->cmnd[11]) << 16) |
3737 (((u32) cmd->cmnd[12]) << 8) |
3741 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3743 BUG_ON(block_cnt == 0);
3744 last_block = first_block + block_cnt - 1;
3746 /* check for write to non-RAID-0 */
3747 if (is_write && dev->raid_level != 0)
3748 return IO_ACCEL_INELIGIBLE;
3750 /* check for invalid block or wraparound */
3751 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3752 return IO_ACCEL_INELIGIBLE;
3754 /* calculate stripe information for the request */
3755 blocks_per_row = map->data_disks_per_row * map->strip_size;
3756 #if BITS_PER_LONG == 32
3757 tmpdiv = first_block;
3758 (void) do_div(tmpdiv, blocks_per_row);
3760 tmpdiv = last_block;
3761 (void) do_div(tmpdiv, blocks_per_row);
3763 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3764 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3765 tmpdiv = first_row_offset;
3766 (void) do_div(tmpdiv, map->strip_size);
3767 first_column = tmpdiv;
3768 tmpdiv = last_row_offset;
3769 (void) do_div(tmpdiv, map->strip_size);
3770 last_column = tmpdiv;
3772 first_row = first_block / blocks_per_row;
3773 last_row = last_block / blocks_per_row;
3774 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3775 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3776 first_column = first_row_offset / map->strip_size;
3777 last_column = last_row_offset / map->strip_size;
3780 /* if this isn't a single row/column then give to the controller */
3781 if ((first_row != last_row) || (first_column != last_column))
3782 return IO_ACCEL_INELIGIBLE;
3784 /* proceeding with driver mapping */
3785 total_disks_per_row = map->data_disks_per_row +
3786 map->metadata_disks_per_row;
3787 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3789 map_index = (map_row * total_disks_per_row) + first_column;
3791 switch (dev->raid_level) {
3793 break; /* nothing special to do */
3795 /* Handles load balance across RAID 1 members.
3796 * (2-drive R1 and R10 with even # of drives.)
3797 * Appropriate for SSDs, not optimal for HDDs
3799 BUG_ON(map->layout_map_count != 2);
3800 if (dev->offload_to_mirror)
3801 map_index += map->data_disks_per_row;
3802 dev->offload_to_mirror = !dev->offload_to_mirror;
3805 /* Handles N-way mirrors (R1-ADM)
3806 * and R10 with # of drives divisible by 3.)
3808 BUG_ON(map->layout_map_count != 3);
3810 offload_to_mirror = dev->offload_to_mirror;
3811 raid_map_helper(map, offload_to_mirror,
3812 &map_index, ¤t_group);
3813 /* set mirror group to use next time */
3815 (offload_to_mirror >= map->layout_map_count - 1)
3816 ? 0 : offload_to_mirror + 1;
3817 /* FIXME: remove after debug/dev */
3818 BUG_ON(offload_to_mirror >= map->layout_map_count);
3819 dev_warn(&h->pdev->dev,
3820 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3821 map_index, offload_to_mirror);
3822 dev->offload_to_mirror = offload_to_mirror;
3823 /* Avoid direct use of dev->offload_to_mirror within this
3824 * function since multiple threads might simultaneously
3825 * increment it beyond the range of dev->layout_map_count -1.
3830 if (map->layout_map_count <= 1)
3833 /* Verify first and last block are in same RAID group */
3834 r5or6_blocks_per_row =
3835 map->strip_size * map->data_disks_per_row;
3836 BUG_ON(r5or6_blocks_per_row == 0);
3837 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3838 #if BITS_PER_LONG == 32
3839 tmpdiv = first_block;
3840 first_group = do_div(tmpdiv, stripesize);
3841 tmpdiv = first_group;
3842 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3843 first_group = tmpdiv;
3844 tmpdiv = last_block;
3845 last_group = do_div(tmpdiv, stripesize);
3846 tmpdiv = last_group;
3847 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3848 last_group = tmpdiv;
3850 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3851 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3853 if (first_group != last_group)
3854 return IO_ACCEL_INELIGIBLE;
3856 /* Verify request is in a single row of RAID 5/6 */
3857 #if BITS_PER_LONG == 32
3858 tmpdiv = first_block;
3859 (void) do_div(tmpdiv, stripesize);
3860 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3861 tmpdiv = last_block;
3862 (void) do_div(tmpdiv, stripesize);
3863 r5or6_last_row = r0_last_row = tmpdiv;
3865 first_row = r5or6_first_row = r0_first_row =
3866 first_block / stripesize;
3867 r5or6_last_row = r0_last_row = last_block / stripesize;
3869 if (r5or6_first_row != r5or6_last_row)
3870 return IO_ACCEL_INELIGIBLE;
3873 /* Verify request is in a single column */
3874 #if BITS_PER_LONG == 32
3875 tmpdiv = first_block;
3876 first_row_offset = do_div(tmpdiv, stripesize);
3877 tmpdiv = first_row_offset;
3878 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3879 r5or6_first_row_offset = first_row_offset;
3880 tmpdiv = last_block;
3881 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3882 tmpdiv = r5or6_last_row_offset;
3883 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3884 tmpdiv = r5or6_first_row_offset;
3885 (void) do_div(tmpdiv, map->strip_size);
3886 first_column = r5or6_first_column = tmpdiv;
3887 tmpdiv = r5or6_last_row_offset;
3888 (void) do_div(tmpdiv, map->strip_size);
3889 r5or6_last_column = tmpdiv;
3891 first_row_offset = r5or6_first_row_offset =
3892 (u32)((first_block % stripesize) %
3893 r5or6_blocks_per_row);
3895 r5or6_last_row_offset =
3896 (u32)((last_block % stripesize) %
3897 r5or6_blocks_per_row);
3899 first_column = r5or6_first_column =
3900 r5or6_first_row_offset / map->strip_size;
3902 r5or6_last_row_offset / map->strip_size;
3904 if (r5or6_first_column != r5or6_last_column)
3905 return IO_ACCEL_INELIGIBLE;
3907 /* Request is eligible */
3908 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3911 map_index = (first_group *
3912 (map->row_cnt * total_disks_per_row)) +
3913 (map_row * total_disks_per_row) + first_column;
3916 return IO_ACCEL_INELIGIBLE;
3919 disk_handle = dd[map_index].ioaccel_handle;
3920 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3921 (first_row_offset - (first_column * map->strip_size));
3922 disk_block_cnt = block_cnt;
3924 /* handle differing logical/physical block sizes */
3925 if (map->phys_blk_shift) {
3926 disk_block <<= map->phys_blk_shift;
3927 disk_block_cnt <<= map->phys_blk_shift;
3929 BUG_ON(disk_block_cnt > 0xffff);
3931 /* build the new CDB for the physical disk I/O */
3932 if (disk_block > 0xffffffff) {
3933 cdb[0] = is_write ? WRITE_16 : READ_16;
3935 cdb[2] = (u8) (disk_block >> 56);
3936 cdb[3] = (u8) (disk_block >> 48);
3937 cdb[4] = (u8) (disk_block >> 40);
3938 cdb[5] = (u8) (disk_block >> 32);
3939 cdb[6] = (u8) (disk_block >> 24);
3940 cdb[7] = (u8) (disk_block >> 16);
3941 cdb[8] = (u8) (disk_block >> 8);
3942 cdb[9] = (u8) (disk_block);
3943 cdb[10] = (u8) (disk_block_cnt >> 24);
3944 cdb[11] = (u8) (disk_block_cnt >> 16);
3945 cdb[12] = (u8) (disk_block_cnt >> 8);
3946 cdb[13] = (u8) (disk_block_cnt);
3951 cdb[0] = is_write ? WRITE_10 : READ_10;
3953 cdb[2] = (u8) (disk_block >> 24);
3954 cdb[3] = (u8) (disk_block >> 16);
3955 cdb[4] = (u8) (disk_block >> 8);
3956 cdb[5] = (u8) (disk_block);
3958 cdb[7] = (u8) (disk_block_cnt >> 8);
3959 cdb[8] = (u8) (disk_block_cnt);
3963 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3967 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3968 void (*done)(struct scsi_cmnd *))
3970 struct ctlr_info *h;
3971 struct hpsa_scsi_dev_t *dev;
3972 unsigned char scsi3addr[8];
3973 struct CommandList *c;
3974 unsigned long flags;
3977 /* Get the ptr to our adapter structure out of cmd->host. */
3978 h = sdev_to_hba(cmd->device);
3979 dev = cmd->device->hostdata;
3981 cmd->result = DID_NO_CONNECT << 16;
3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3987 spin_lock_irqsave(&h->lock, flags);
3988 if (unlikely(h->lockup_detected)) {
3989 spin_unlock_irqrestore(&h->lock, flags);
3990 cmd->result = DID_ERROR << 16;
3994 spin_unlock_irqrestore(&h->lock, flags);
3996 if (c == NULL) { /* trouble... */
3997 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3998 return SCSI_MLQUEUE_HOST_BUSY;
4001 /* Fill in the command list header */
4003 cmd->scsi_done = done; /* save this for use by completion code */
4005 /* save c in case we have to abort it */
4006 cmd->host_scribble = (unsigned char *) c;
4008 c->cmd_type = CMD_SCSI;
4011 /* Call alternate submit routine for I/O accelerated commands.
4012 * Retries always go down the normal I/O path.
4014 if (likely(cmd->retries == 0 &&
4015 cmd->request->cmd_type == REQ_TYPE_FS &&
4016 h->acciopath_status)) {
4017 if (dev->offload_enabled) {
4018 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4020 return 0; /* Sent on ioaccel path */
4021 if (rc < 0) { /* scsi_dma_map failed. */
4023 return SCSI_MLQUEUE_HOST_BUSY;
4025 } else if (dev->ioaccel_handle) {
4026 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4028 return 0; /* Sent on direct map path */
4029 if (rc < 0) { /* scsi_dma_map failed. */
4031 return SCSI_MLQUEUE_HOST_BUSY;
4036 c->Header.ReplyQueue = 0; /* unused in simple mode */
4037 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4038 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
4039 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
4041 /* Fill in the request block... */
4043 c->Request.Timeout = 0;
4044 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4045 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4046 c->Request.CDBLen = cmd->cmd_len;
4047 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4048 c->Request.Type.Type = TYPE_CMD;
4049 c->Request.Type.Attribute = ATTR_SIMPLE;
4050 switch (cmd->sc_data_direction) {
4052 c->Request.Type.Direction = XFER_WRITE;
4054 case DMA_FROM_DEVICE:
4055 c->Request.Type.Direction = XFER_READ;
4058 c->Request.Type.Direction = XFER_NONE;
4060 case DMA_BIDIRECTIONAL:
4061 /* This can happen if a buggy application does a scsi passthru
4062 * and sets both inlen and outlen to non-zero. ( see
4063 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4066 c->Request.Type.Direction = XFER_RSVD;
4067 /* This is technically wrong, and hpsa controllers should
4068 * reject it with CMD_INVALID, which is the most correct
4069 * response, but non-fibre backends appear to let it
4070 * slide by, and give the same results as if this field
4071 * were set correctly. Either way is acceptable for
4072 * our purposes here.
4078 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4079 cmd->sc_data_direction);
4084 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4086 return SCSI_MLQUEUE_HOST_BUSY;
4088 enqueue_cmd_and_start_io(h, c);
4089 /* the cmd'll come back via intr handler in complete_scsi_command() */
4093 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4095 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4097 unsigned long flags;
4100 * Don't let rescans be initiated on a controller known
4101 * to be locked up. If the controller locks up *during*
4102 * a rescan, that thread is probably hosed, but at least
4103 * we can prevent new rescan threads from piling up on a
4104 * locked up controller.
4106 spin_lock_irqsave(&h->lock, flags);
4107 if (unlikely(h->lockup_detected)) {
4108 spin_unlock_irqrestore(&h->lock, flags);
4109 spin_lock_irqsave(&h->scan_lock, flags);
4110 h->scan_finished = 1;
4111 wake_up_all(&h->scan_wait_queue);
4112 spin_unlock_irqrestore(&h->scan_lock, flags);
4115 spin_unlock_irqrestore(&h->lock, flags);
4119 static void hpsa_scan_start(struct Scsi_Host *sh)
4121 struct ctlr_info *h = shost_to_hba(sh);
4122 unsigned long flags;
4124 if (do_not_scan_if_controller_locked_up(h))
4127 /* wait until any scan already in progress is finished. */
4129 spin_lock_irqsave(&h->scan_lock, flags);
4130 if (h->scan_finished)
4132 spin_unlock_irqrestore(&h->scan_lock, flags);
4133 wait_event(h->scan_wait_queue, h->scan_finished);
4134 /* Note: We don't need to worry about a race between this
4135 * thread and driver unload because the midlayer will
4136 * have incremented the reference count, so unload won't
4137 * happen if we're in here.
4140 h->scan_finished = 0; /* mark scan as in progress */
4141 spin_unlock_irqrestore(&h->scan_lock, flags);
4143 if (do_not_scan_if_controller_locked_up(h))
4146 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4148 spin_lock_irqsave(&h->scan_lock, flags);
4149 h->scan_finished = 1; /* mark scan as finished. */
4150 wake_up_all(&h->scan_wait_queue);
4151 spin_unlock_irqrestore(&h->scan_lock, flags);
4154 static int hpsa_scan_finished(struct Scsi_Host *sh,
4155 unsigned long elapsed_time)
4157 struct ctlr_info *h = shost_to_hba(sh);
4158 unsigned long flags;
4161 spin_lock_irqsave(&h->scan_lock, flags);
4162 finished = h->scan_finished;
4163 spin_unlock_irqrestore(&h->scan_lock, flags);
4167 static int hpsa_change_queue_depth(struct scsi_device *sdev,
4168 int qdepth, int reason)
4170 struct ctlr_info *h = sdev_to_hba(sdev);
4172 if (reason != SCSI_QDEPTH_DEFAULT)
4178 if (qdepth > h->nr_cmds)
4179 qdepth = h->nr_cmds;
4180 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4181 return sdev->queue_depth;
4184 static void hpsa_unregister_scsi(struct ctlr_info *h)
4186 /* we are being forcibly unloaded, and may not refuse. */
4187 scsi_remove_host(h->scsi_host);
4188 scsi_host_put(h->scsi_host);
4189 h->scsi_host = NULL;
4192 static int hpsa_register_scsi(struct ctlr_info *h)
4194 struct Scsi_Host *sh;
4197 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4204 sh->max_channel = 3;
4205 sh->max_cmd_len = MAX_COMMAND_SIZE;
4206 sh->max_lun = HPSA_MAX_LUN;
4207 sh->max_id = HPSA_MAX_LUN;
4208 sh->can_queue = h->nr_cmds;
4209 if (h->hba_mode_enabled)
4210 sh->cmd_per_lun = 7;
4212 sh->cmd_per_lun = h->nr_cmds;
4213 sh->sg_tablesize = h->maxsgentries;
4215 sh->hostdata[0] = (unsigned long) h;
4216 sh->irq = h->intr[h->intr_mode];
4217 sh->unique_id = sh->irq;
4218 error = scsi_add_host(sh, &h->pdev->dev);
4225 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4226 " failed for controller %d\n", __func__, h->ctlr);
4230 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4231 " failed for controller %d\n", __func__, h->ctlr);
4235 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4236 unsigned char lunaddr[])
4240 int waittime = 1; /* seconds */
4241 struct CommandList *c;
4243 c = cmd_special_alloc(h);
4245 dev_warn(&h->pdev->dev, "out of memory in "
4246 "wait_for_device_to_become_ready.\n");
4250 /* Send test unit ready until device ready, or give up. */
4251 while (count < HPSA_TUR_RETRY_LIMIT) {
4253 /* Wait for a bit. do this first, because if we send
4254 * the TUR right away, the reset will just abort it.
4256 msleep(1000 * waittime);
4258 rc = 0; /* Device ready. */
4260 /* Increase wait time with each try, up to a point. */
4261 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4262 waittime = waittime * 2;
4264 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4265 (void) fill_cmd(c, TEST_UNIT_READY, h,
4266 NULL, 0, 0, lunaddr, TYPE_CMD);
4267 hpsa_scsi_do_simple_cmd_core(h, c);
4268 /* no unmap needed here because no data xfer. */
4270 if (c->err_info->CommandStatus == CMD_SUCCESS)
4273 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4274 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4275 (c->err_info->SenseInfo[2] == NO_SENSE ||
4276 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4279 dev_warn(&h->pdev->dev, "waiting %d secs "
4280 "for device to become ready.\n", waittime);
4281 rc = 1; /* device not ready. */
4285 dev_warn(&h->pdev->dev, "giving up on device.\n");
4287 dev_warn(&h->pdev->dev, "device is ready.\n");
4289 cmd_special_free(h, c);
4293 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4294 * complaining. Doing a host- or bus-reset can't do anything good here.
4296 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4299 struct ctlr_info *h;
4300 struct hpsa_scsi_dev_t *dev;
4302 /* find the controller to which the command to be aborted was sent */
4303 h = sdev_to_hba(scsicmd->device);
4304 if (h == NULL) /* paranoia */
4306 dev = scsicmd->device->hostdata;
4308 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4309 "device lookup failed.\n");
4312 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4313 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4314 /* send a reset to the SCSI LUN which the command was sent to */
4315 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4316 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4319 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4323 static void swizzle_abort_tag(u8 *tag)
4327 memcpy(original_tag, tag, 8);
4328 tag[0] = original_tag[3];
4329 tag[1] = original_tag[2];
4330 tag[2] = original_tag[1];
4331 tag[3] = original_tag[0];
4332 tag[4] = original_tag[7];
4333 tag[5] = original_tag[6];
4334 tag[6] = original_tag[5];
4335 tag[7] = original_tag[4];
4338 static void hpsa_get_tag(struct ctlr_info *h,
4339 struct CommandList *c, u32 *taglower, u32 *tagupper)
4341 if (c->cmd_type == CMD_IOACCEL1) {
4342 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4343 &h->ioaccel_cmd_pool[c->cmdindex];
4344 *tagupper = cm1->Tag.upper;
4345 *taglower = cm1->Tag.lower;
4348 if (c->cmd_type == CMD_IOACCEL2) {
4349 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4350 &h->ioaccel2_cmd_pool[c->cmdindex];
4351 /* upper tag not used in ioaccel2 mode */
4352 memset(tagupper, 0, sizeof(*tagupper));
4353 *taglower = cm2->Tag;
4356 *tagupper = c->Header.Tag.upper;
4357 *taglower = c->Header.Tag.lower;
4361 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4362 struct CommandList *abort, int swizzle)
4365 struct CommandList *c;
4366 struct ErrorInfo *ei;
4367 u32 tagupper, taglower;
4369 c = cmd_special_alloc(h);
4370 if (c == NULL) { /* trouble... */
4371 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4375 /* fill_cmd can't fail here, no buffer to map */
4376 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4377 0, 0, scsi3addr, TYPE_MSG);
4379 swizzle_abort_tag(&c->Request.CDB[4]);
4380 hpsa_scsi_do_simple_cmd_core(h, c);
4381 hpsa_get_tag(h, abort, &taglower, &tagupper);
4382 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4383 __func__, tagupper, taglower);
4384 /* no unmap needed here because no data xfer. */
4387 switch (ei->CommandStatus) {
4390 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4394 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4395 __func__, tagupper, taglower);
4396 hpsa_scsi_interpret_error(h, c);
4400 cmd_special_free(h, c);
4401 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4402 __func__, tagupper, taglower);
4407 * hpsa_find_cmd_in_queue
4409 * Used to determine whether a command (find) is still present
4410 * in queue_head. Optionally excludes the last element of queue_head.
4412 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4413 * not yet been submitted, and so can be aborted by the driver without
4414 * sending an abort to the hardware.
4416 * Returns pointer to command if found in queue, NULL otherwise.
4418 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4419 struct scsi_cmnd *find, struct list_head *queue_head)
4421 unsigned long flags;
4422 struct CommandList *c = NULL; /* ptr into cmpQ */
4426 spin_lock_irqsave(&h->lock, flags);
4427 list_for_each_entry(c, queue_head, list) {
4428 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4430 if (c->scsi_cmd == find) {
4431 spin_unlock_irqrestore(&h->lock, flags);
4435 spin_unlock_irqrestore(&h->lock, flags);
4439 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4440 u8 *tag, struct list_head *queue_head)
4442 unsigned long flags;
4443 struct CommandList *c;
4445 spin_lock_irqsave(&h->lock, flags);
4446 list_for_each_entry(c, queue_head, list) {
4447 if (memcmp(&c->Header.Tag, tag, 8) != 0)
4449 spin_unlock_irqrestore(&h->lock, flags);
4452 spin_unlock_irqrestore(&h->lock, flags);
4456 /* ioaccel2 path firmware cannot handle abort task requests.
4457 * Change abort requests to physical target reset, and send to the
4458 * address of the physical disk used for the ioaccel 2 command.
4459 * Return 0 on success (IO_OK)
4463 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4464 unsigned char *scsi3addr, struct CommandList *abort)
4467 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4468 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4469 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4470 unsigned char *psa = &phys_scsi3addr[0];
4472 /* Get a pointer to the hpsa logical device. */
4473 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4474 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4476 dev_warn(&h->pdev->dev,
4477 "Cannot abort: no device pointer for command.\n");
4478 return -1; /* not abortable */
4481 if (h->raid_offload_debug > 0)
4482 dev_info(&h->pdev->dev,
4483 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4484 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4485 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4486 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4488 if (!dev->offload_enabled) {
4489 dev_warn(&h->pdev->dev,
4490 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4491 return -1; /* not abortable */
4494 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4495 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4496 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4497 return -1; /* not abortable */
4500 /* send the reset */
4501 if (h->raid_offload_debug > 0)
4502 dev_info(&h->pdev->dev,
4503 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4504 psa[0], psa[1], psa[2], psa[3],
4505 psa[4], psa[5], psa[6], psa[7]);
4506 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4508 dev_warn(&h->pdev->dev,
4509 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4510 psa[0], psa[1], psa[2], psa[3],
4511 psa[4], psa[5], psa[6], psa[7]);
4512 return rc; /* failed to reset */
4515 /* wait for device to recover */
4516 if (wait_for_device_to_become_ready(h, psa) != 0) {
4517 dev_warn(&h->pdev->dev,
4518 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4519 psa[0], psa[1], psa[2], psa[3],
4520 psa[4], psa[5], psa[6], psa[7]);
4521 return -1; /* failed to recover */
4524 /* device recovered */
4525 dev_info(&h->pdev->dev,
4526 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4527 psa[0], psa[1], psa[2], psa[3],
4528 psa[4], psa[5], psa[6], psa[7]);
4530 return rc; /* success */
4533 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4534 * tell which kind we're dealing with, so we send the abort both ways. There
4535 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4536 * way we construct our tags but we check anyway in case the assumptions which
4537 * make this true someday become false.
4539 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4540 unsigned char *scsi3addr, struct CommandList *abort)
4543 struct CommandList *c;
4544 int rc = 0, rc2 = 0;
4546 /* ioccelerator mode 2 commands should be aborted via the
4547 * accelerated path, since RAID path is unaware of these commands,
4548 * but underlying firmware can't handle abort TMF.
4549 * Change abort to physical device reset.
4551 if (abort->cmd_type == CMD_IOACCEL2)
4552 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4554 /* we do not expect to find the swizzled tag in our queue, but
4555 * check anyway just to be sure the assumptions which make this
4556 * the case haven't become wrong.
4558 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4559 swizzle_abort_tag(swizzled_tag);
4560 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4562 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4563 return hpsa_send_abort(h, scsi3addr, abort, 0);
4565 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4567 /* if the command is still in our queue, we can't conclude that it was
4568 * aborted (it might have just completed normally) but in any case
4569 * we don't need to try to abort it another way.
4571 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4573 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4577 /* Send an abort for the specified command.
4578 * If the device and controller support it,
4579 * send a task abort request.
4581 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4585 struct ctlr_info *h;
4586 struct hpsa_scsi_dev_t *dev;
4587 struct CommandList *abort; /* pointer to command to be aborted */
4588 struct CommandList *found;
4589 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4590 char msg[256]; /* For debug messaging. */
4592 u32 tagupper, taglower;
4594 /* Find the controller of the command to be aborted */
4595 h = sdev_to_hba(sc->device);
4597 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4600 /* Check that controller supports some kind of task abort */
4601 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4602 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4605 memset(msg, 0, sizeof(msg));
4606 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
4607 h->scsi_host->host_no, sc->device->channel,
4608 sc->device->id, sc->device->lun);
4610 /* Find the device of the command to be aborted */
4611 dev = sc->device->hostdata;
4613 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4618 /* Get SCSI command to be aborted */
4619 abort = (struct CommandList *) sc->host_scribble;
4620 if (abort == NULL) {
4621 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4625 hpsa_get_tag(h, abort, &taglower, &tagupper);
4626 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4627 as = (struct scsi_cmnd *) abort->scsi_cmd;
4629 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4630 as->cmnd[0], as->serial_number);
4631 dev_dbg(&h->pdev->dev, "%s\n", msg);
4632 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4633 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4635 /* Search reqQ to See if command is queued but not submitted,
4636 * if so, complete the command with aborted status and remove
4639 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4641 found->err_info->CommandStatus = CMD_ABORTED;
4643 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4648 /* not in reqQ, if also not in cmpQ, must have already completed */
4649 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4651 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
4657 * Command is in flight, or possibly already completed
4658 * by the firmware (but not to the scsi mid layer) but we can't
4659 * distinguish which. Send the abort down.
4661 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4663 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4664 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4665 h->scsi_host->host_no,
4666 dev->bus, dev->target, dev->lun);
4669 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4671 /* If the abort(s) above completed and actually aborted the
4672 * command, then the command to be aborted should already be
4673 * completed. If not, wait around a bit more to see if they
4674 * manage to complete normally.
4676 #define ABORT_COMPLETE_WAIT_SECS 30
4677 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4678 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4683 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4684 msg, ABORT_COMPLETE_WAIT_SECS);
4690 * For operations that cannot sleep, a command block is allocated at init,
4691 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4692 * which ones are free or in use. Lock must be held when calling this.
4693 * cmd_free() is the complement.
4695 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4697 struct CommandList *c;
4699 union u64bit temp64;
4700 dma_addr_t cmd_dma_handle, err_dma_handle;
4701 unsigned long flags;
4703 spin_lock_irqsave(&h->lock, flags);
4705 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4706 if (i == h->nr_cmds) {
4707 spin_unlock_irqrestore(&h->lock, flags);
4710 } while (test_and_set_bit
4711 (i & (BITS_PER_LONG - 1),
4712 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
4713 spin_unlock_irqrestore(&h->lock, flags);
4715 c = h->cmd_pool + i;
4716 memset(c, 0, sizeof(*c));
4717 cmd_dma_handle = h->cmd_pool_dhandle
4719 c->err_info = h->errinfo_pool + i;
4720 memset(c->err_info, 0, sizeof(*c->err_info));
4721 err_dma_handle = h->errinfo_pool_dhandle
4722 + i * sizeof(*c->err_info);
4726 INIT_LIST_HEAD(&c->list);
4727 c->busaddr = (u32) cmd_dma_handle;
4728 temp64.val = (u64) err_dma_handle;
4729 c->ErrDesc.Addr.lower = temp64.val32.lower;
4730 c->ErrDesc.Addr.upper = temp64.val32.upper;
4731 c->ErrDesc.Len = sizeof(*c->err_info);
4737 /* For operations that can wait for kmalloc to possibly sleep,
4738 * this routine can be called. Lock need not be held to call
4739 * cmd_special_alloc. cmd_special_free() is the complement.
4741 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4743 struct CommandList *c;
4744 union u64bit temp64;
4745 dma_addr_t cmd_dma_handle, err_dma_handle;
4747 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
4750 memset(c, 0, sizeof(*c));
4752 c->cmd_type = CMD_SCSI;
4755 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
4758 if (c->err_info == NULL) {
4759 pci_free_consistent(h->pdev,
4760 sizeof(*c), c, cmd_dma_handle);
4763 memset(c->err_info, 0, sizeof(*c->err_info));
4765 INIT_LIST_HEAD(&c->list);
4766 c->busaddr = (u32) cmd_dma_handle;
4767 temp64.val = (u64) err_dma_handle;
4768 c->ErrDesc.Addr.lower = temp64.val32.lower;
4769 c->ErrDesc.Addr.upper = temp64.val32.upper;
4770 c->ErrDesc.Len = sizeof(*c->err_info);
4776 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4779 unsigned long flags;
4781 i = c - h->cmd_pool;
4782 spin_lock_irqsave(&h->lock, flags);
4783 clear_bit(i & (BITS_PER_LONG - 1),
4784 h->cmd_pool_bits + (i / BITS_PER_LONG));
4785 spin_unlock_irqrestore(&h->lock, flags);
4788 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4790 union u64bit temp64;
4792 temp64.val32.lower = c->ErrDesc.Addr.lower;
4793 temp64.val32.upper = c->ErrDesc.Addr.upper;
4794 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4795 c->err_info, (dma_addr_t) temp64.val);
4796 pci_free_consistent(h->pdev, sizeof(*c),
4797 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
4800 #ifdef CONFIG_COMPAT
4802 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4804 IOCTL32_Command_struct __user *arg32 =
4805 (IOCTL32_Command_struct __user *) arg;
4806 IOCTL_Command_struct arg64;
4807 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4811 memset(&arg64, 0, sizeof(arg64));
4813 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4814 sizeof(arg64.LUN_info));
4815 err |= copy_from_user(&arg64.Request, &arg32->Request,
4816 sizeof(arg64.Request));
4817 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4818 sizeof(arg64.error_info));
4819 err |= get_user(arg64.buf_size, &arg32->buf_size);
4820 err |= get_user(cp, &arg32->buf);
4821 arg64.buf = compat_ptr(cp);
4822 err |= copy_to_user(p, &arg64, sizeof(arg64));
4827 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
4830 err |= copy_in_user(&arg32->error_info, &p->error_info,
4831 sizeof(arg32->error_info));
4837 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4840 BIG_IOCTL32_Command_struct __user *arg32 =
4841 (BIG_IOCTL32_Command_struct __user *) arg;
4842 BIG_IOCTL_Command_struct arg64;
4843 BIG_IOCTL_Command_struct __user *p =
4844 compat_alloc_user_space(sizeof(arg64));
4848 memset(&arg64, 0, sizeof(arg64));
4850 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4851 sizeof(arg64.LUN_info));
4852 err |= copy_from_user(&arg64.Request, &arg32->Request,
4853 sizeof(arg64.Request));
4854 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4855 sizeof(arg64.error_info));
4856 err |= get_user(arg64.buf_size, &arg32->buf_size);
4857 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4858 err |= get_user(cp, &arg32->buf);
4859 arg64.buf = compat_ptr(cp);
4860 err |= copy_to_user(p, &arg64, sizeof(arg64));
4865 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
4868 err |= copy_in_user(&arg32->error_info, &p->error_info,
4869 sizeof(arg32->error_info));
4875 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
4878 case CCISS_GETPCIINFO:
4879 case CCISS_GETINTINFO:
4880 case CCISS_SETINTINFO:
4881 case CCISS_GETNODENAME:
4882 case CCISS_SETNODENAME:
4883 case CCISS_GETHEARTBEAT:
4884 case CCISS_GETBUSTYPES:
4885 case CCISS_GETFIRMVER:
4886 case CCISS_GETDRIVVER:
4887 case CCISS_REVALIDVOLS:
4888 case CCISS_DEREGDISK:
4889 case CCISS_REGNEWDISK:
4891 case CCISS_RESCANDISK:
4892 case CCISS_GETLUNINFO:
4893 return hpsa_ioctl(dev, cmd, arg);
4895 case CCISS_PASSTHRU32:
4896 return hpsa_ioctl32_passthru(dev, cmd, arg);
4897 case CCISS_BIG_PASSTHRU32:
4898 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4901 return -ENOIOCTLCMD;
4906 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4908 struct hpsa_pci_info pciinfo;
4912 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4913 pciinfo.bus = h->pdev->bus->number;
4914 pciinfo.dev_fn = h->pdev->devfn;
4915 pciinfo.board_id = h->board_id;
4916 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4921 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4923 DriverVer_type DriverVer;
4924 unsigned char vmaj, vmin, vsubmin;
4927 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4928 &vmaj, &vmin, &vsubmin);
4930 dev_info(&h->pdev->dev, "driver version string '%s' "
4931 "unrecognized.", HPSA_DRIVER_VERSION);
4936 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4939 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4944 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4946 IOCTL_Command_struct iocommand;
4947 struct CommandList *c;
4949 union u64bit temp64;
4954 if (!capable(CAP_SYS_RAWIO))
4956 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4958 if ((iocommand.buf_size < 1) &&
4959 (iocommand.Request.Type.Direction != XFER_NONE)) {
4962 if (iocommand.buf_size > 0) {
4963 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4966 if (iocommand.Request.Type.Direction == XFER_WRITE) {
4967 /* Copy the data into the buffer we created */
4968 if (copy_from_user(buff, iocommand.buf,
4969 iocommand.buf_size)) {
4974 memset(buff, 0, iocommand.buf_size);
4977 c = cmd_special_alloc(h);
4982 /* Fill in the command type */
4983 c->cmd_type = CMD_IOCTL_PEND;
4984 /* Fill in Command Header */
4985 c->Header.ReplyQueue = 0; /* unused in simple mode */
4986 if (iocommand.buf_size > 0) { /* buffer to fill */
4987 c->Header.SGList = 1;
4988 c->Header.SGTotal = 1;
4989 } else { /* no buffers to fill */
4990 c->Header.SGList = 0;
4991 c->Header.SGTotal = 0;
4993 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4994 /* use the kernel address the cmd block for tag */
4995 c->Header.Tag.lower = c->busaddr;
4997 /* Fill in Request block */
4998 memcpy(&c->Request, &iocommand.Request,
4999 sizeof(c->Request));
5001 /* Fill in the scatter gather information */
5002 if (iocommand.buf_size > 0) {
5003 temp64.val = pci_map_single(h->pdev, buff,
5004 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5005 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5006 c->SG[0].Addr.lower = 0;
5007 c->SG[0].Addr.upper = 0;
5012 c->SG[0].Addr.lower = temp64.val32.lower;
5013 c->SG[0].Addr.upper = temp64.val32.upper;
5014 c->SG[0].Len = iocommand.buf_size;
5015 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
5017 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5018 if (iocommand.buf_size > 0)
5019 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5020 check_ioctl_unit_attention(h, c);
5022 /* Copy the error information out */
5023 memcpy(&iocommand.error_info, c->err_info,
5024 sizeof(iocommand.error_info));
5025 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5029 if (iocommand.Request.Type.Direction == XFER_READ &&
5030 iocommand.buf_size > 0) {
5031 /* Copy the data out of the buffer we created */
5032 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5038 cmd_special_free(h, c);
5044 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5046 BIG_IOCTL_Command_struct *ioc;
5047 struct CommandList *c;
5048 unsigned char **buff = NULL;
5049 int *buff_size = NULL;
5050 union u64bit temp64;
5056 BYTE __user *data_ptr;
5060 if (!capable(CAP_SYS_RAWIO))
5062 ioc = (BIG_IOCTL_Command_struct *)
5063 kmalloc(sizeof(*ioc), GFP_KERNEL);
5068 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5072 if ((ioc->buf_size < 1) &&
5073 (ioc->Request.Type.Direction != XFER_NONE)) {
5077 /* Check kmalloc limits using all SGs */
5078 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5082 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5086 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5091 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5096 left = ioc->buf_size;
5097 data_ptr = ioc->buf;
5099 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5100 buff_size[sg_used] = sz;
5101 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5102 if (buff[sg_used] == NULL) {
5106 if (ioc->Request.Type.Direction == XFER_WRITE) {
5107 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5112 memset(buff[sg_used], 0, sz);
5117 c = cmd_special_alloc(h);
5122 c->cmd_type = CMD_IOCTL_PEND;
5123 c->Header.ReplyQueue = 0;
5124 c->Header.SGList = c->Header.SGTotal = sg_used;
5125 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5126 c->Header.Tag.lower = c->busaddr;
5127 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5128 if (ioc->buf_size > 0) {
5130 for (i = 0; i < sg_used; i++) {
5131 temp64.val = pci_map_single(h->pdev, buff[i],
5132 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5133 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5134 c->SG[i].Addr.lower = 0;
5135 c->SG[i].Addr.upper = 0;
5137 hpsa_pci_unmap(h->pdev, c, i,
5138 PCI_DMA_BIDIRECTIONAL);
5142 c->SG[i].Addr.lower = temp64.val32.lower;
5143 c->SG[i].Addr.upper = temp64.val32.upper;
5144 c->SG[i].Len = buff_size[i];
5145 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
5148 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5150 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5151 check_ioctl_unit_attention(h, c);
5152 /* Copy the error information out */
5153 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5154 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5158 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
5159 /* Copy the data out of the buffer we created */
5160 BYTE __user *ptr = ioc->buf;
5161 for (i = 0; i < sg_used; i++) {
5162 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5166 ptr += buff_size[i];
5171 cmd_special_free(h, c);
5174 for (i = 0; i < sg_used; i++)
5183 static void check_ioctl_unit_attention(struct ctlr_info *h,
5184 struct CommandList *c)
5186 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5187 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5188 (void) check_for_unit_attention(h, c);
5191 static int increment_passthru_count(struct ctlr_info *h)
5193 unsigned long flags;
5195 spin_lock_irqsave(&h->passthru_count_lock, flags);
5196 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5197 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5200 h->passthru_count++;
5201 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5205 static void decrement_passthru_count(struct ctlr_info *h)
5207 unsigned long flags;
5209 spin_lock_irqsave(&h->passthru_count_lock, flags);
5210 if (h->passthru_count <= 0) {
5211 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5212 /* not expecting to get here. */
5213 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5216 h->passthru_count--;
5217 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5223 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
5225 struct ctlr_info *h;
5226 void __user *argp = (void __user *)arg;
5229 h = sdev_to_hba(dev);
5232 case CCISS_DEREGDISK:
5233 case CCISS_REGNEWDISK:
5235 hpsa_scan_start(h->scsi_host);
5237 case CCISS_GETPCIINFO:
5238 return hpsa_getpciinfo_ioctl(h, argp);
5239 case CCISS_GETDRIVVER:
5240 return hpsa_getdrivver_ioctl(h, argp);
5241 case CCISS_PASSTHRU:
5242 if (increment_passthru_count(h))
5244 rc = hpsa_passthru_ioctl(h, argp);
5245 decrement_passthru_count(h);
5247 case CCISS_BIG_PASSTHRU:
5248 if (increment_passthru_count(h))
5250 rc = hpsa_big_passthru_ioctl(h, argp);
5251 decrement_passthru_count(h);
5258 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5261 struct CommandList *c;
5266 /* fill_cmd can't fail here, no data buffer to map */
5267 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5268 RAID_CTLR_LUNID, TYPE_MSG);
5269 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5271 enqueue_cmd_and_start_io(h, c);
5272 /* Don't wait for completion, the reset won't complete. Don't free
5273 * the command either. This is the last command we will send before
5274 * re-initializing everything, so it doesn't matter and won't leak.
5279 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5280 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5283 int pci_dir = XFER_NONE;
5284 struct CommandList *a; /* for commands to be aborted */
5286 c->cmd_type = CMD_IOCTL_PEND;
5287 c->Header.ReplyQueue = 0;
5288 if (buff != NULL && size > 0) {
5289 c->Header.SGList = 1;
5290 c->Header.SGTotal = 1;
5292 c->Header.SGList = 0;
5293 c->Header.SGTotal = 0;
5295 c->Header.Tag.lower = c->busaddr;
5296 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5298 c->Request.Type.Type = cmd_type;
5299 if (cmd_type == TYPE_CMD) {
5302 /* are we trying to read a vital product page */
5303 if (page_code & VPD_PAGE) {
5304 c->Request.CDB[1] = 0x01;
5305 c->Request.CDB[2] = (page_code & 0xff);
5307 c->Request.CDBLen = 6;
5308 c->Request.Type.Attribute = ATTR_SIMPLE;
5309 c->Request.Type.Direction = XFER_READ;
5310 c->Request.Timeout = 0;
5311 c->Request.CDB[0] = HPSA_INQUIRY;
5312 c->Request.CDB[4] = size & 0xFF;
5314 case HPSA_REPORT_LOG:
5315 case HPSA_REPORT_PHYS:
5316 /* Talking to controller so It's a physical command
5317 mode = 00 target = 0. Nothing to write.
5319 c->Request.CDBLen = 12;
5320 c->Request.Type.Attribute = ATTR_SIMPLE;
5321 c->Request.Type.Direction = XFER_READ;
5322 c->Request.Timeout = 0;
5323 c->Request.CDB[0] = cmd;
5324 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5325 c->Request.CDB[7] = (size >> 16) & 0xFF;
5326 c->Request.CDB[8] = (size >> 8) & 0xFF;
5327 c->Request.CDB[9] = size & 0xFF;
5329 case HPSA_CACHE_FLUSH:
5330 c->Request.CDBLen = 12;
5331 c->Request.Type.Attribute = ATTR_SIMPLE;
5332 c->Request.Type.Direction = XFER_WRITE;
5333 c->Request.Timeout = 0;
5334 c->Request.CDB[0] = BMIC_WRITE;
5335 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5336 c->Request.CDB[7] = (size >> 8) & 0xFF;
5337 c->Request.CDB[8] = size & 0xFF;
5339 case TEST_UNIT_READY:
5340 c->Request.CDBLen = 6;
5341 c->Request.Type.Attribute = ATTR_SIMPLE;
5342 c->Request.Type.Direction = XFER_NONE;
5343 c->Request.Timeout = 0;
5345 case HPSA_GET_RAID_MAP:
5346 c->Request.CDBLen = 12;
5347 c->Request.Type.Attribute = ATTR_SIMPLE;
5348 c->Request.Type.Direction = XFER_READ;
5349 c->Request.Timeout = 0;
5350 c->Request.CDB[0] = HPSA_CISS_READ;
5351 c->Request.CDB[1] = cmd;
5352 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5353 c->Request.CDB[7] = (size >> 16) & 0xFF;
5354 c->Request.CDB[8] = (size >> 8) & 0xFF;
5355 c->Request.CDB[9] = size & 0xFF;
5357 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5358 c->Request.CDBLen = 10;
5359 c->Request.Type.Attribute = ATTR_SIMPLE;
5360 c->Request.Type.Direction = XFER_READ;
5361 c->Request.Timeout = 0;
5362 c->Request.CDB[0] = BMIC_READ;
5363 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5364 c->Request.CDB[7] = (size >> 16) & 0xFF;
5365 c->Request.CDB[8] = (size >> 8) & 0xFF;
5368 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5372 } else if (cmd_type == TYPE_MSG) {
5375 case HPSA_DEVICE_RESET_MSG:
5376 c->Request.CDBLen = 16;
5377 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
5378 c->Request.Type.Attribute = ATTR_SIMPLE;
5379 c->Request.Type.Direction = XFER_NONE;
5380 c->Request.Timeout = 0; /* Don't time out */
5381 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5382 c->Request.CDB[0] = cmd;
5383 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5384 /* If bytes 4-7 are zero, it means reset the */
5386 c->Request.CDB[4] = 0x00;
5387 c->Request.CDB[5] = 0x00;
5388 c->Request.CDB[6] = 0x00;
5389 c->Request.CDB[7] = 0x00;
5391 case HPSA_ABORT_MSG:
5392 a = buff; /* point to command to be aborted */
5393 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5394 a->Header.Tag.upper, a->Header.Tag.lower,
5395 c->Header.Tag.upper, c->Header.Tag.lower);
5396 c->Request.CDBLen = 16;
5397 c->Request.Type.Type = TYPE_MSG;
5398 c->Request.Type.Attribute = ATTR_SIMPLE;
5399 c->Request.Type.Direction = XFER_WRITE;
5400 c->Request.Timeout = 0; /* Don't time out */
5401 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5402 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5403 c->Request.CDB[2] = 0x00; /* reserved */
5404 c->Request.CDB[3] = 0x00; /* reserved */
5405 /* Tag to abort goes in CDB[4]-CDB[11] */
5406 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
5407 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
5408 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
5409 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
5410 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
5411 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
5412 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
5413 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
5414 c->Request.CDB[12] = 0x00; /* reserved */
5415 c->Request.CDB[13] = 0x00; /* reserved */
5416 c->Request.CDB[14] = 0x00; /* reserved */
5417 c->Request.CDB[15] = 0x00; /* reserved */
5420 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5425 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5429 switch (c->Request.Type.Direction) {
5431 pci_dir = PCI_DMA_FROMDEVICE;
5434 pci_dir = PCI_DMA_TODEVICE;
5437 pci_dir = PCI_DMA_NONE;
5440 pci_dir = PCI_DMA_BIDIRECTIONAL;
5442 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5448 * Map (physical) PCI mem into (virtual) kernel space
5450 static void __iomem *remap_pci_mem(ulong base, ulong size)
5452 ulong page_base = ((ulong) base) & PAGE_MASK;
5453 ulong page_offs = ((ulong) base) - page_base;
5454 void __iomem *page_remapped = ioremap_nocache(page_base,
5457 return page_remapped ? (page_remapped + page_offs) : NULL;
5460 /* Takes cmds off the submission queue and sends them to the hardware,
5461 * then puts them on the queue of cmds waiting for completion.
5463 static void start_io(struct ctlr_info *h)
5465 struct CommandList *c;
5466 unsigned long flags;
5468 spin_lock_irqsave(&h->lock, flags);
5469 while (!list_empty(&h->reqQ)) {
5470 c = list_entry(h->reqQ.next, struct CommandList, list);
5471 /* can't do anything if fifo is full */
5472 if ((h->access.fifo_full(h))) {
5473 h->fifo_recently_full = 1;
5474 dev_warn(&h->pdev->dev, "fifo full\n");
5477 h->fifo_recently_full = 0;
5479 /* Get the first entry from the Request Q */
5483 /* Put job onto the completed Q */
5486 /* Must increment commands_outstanding before unlocking
5487 * and submitting to avoid race checking for fifo full
5490 h->commands_outstanding++;
5491 if (h->commands_outstanding > h->max_outstanding)
5492 h->max_outstanding = h->commands_outstanding;
5494 /* Tell the controller execute command */
5495 spin_unlock_irqrestore(&h->lock, flags);
5496 h->access.submit_command(h, c);
5497 spin_lock_irqsave(&h->lock, flags);
5499 spin_unlock_irqrestore(&h->lock, flags);
5502 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5504 return h->access.command_completed(h, q);
5507 static inline bool interrupt_pending(struct ctlr_info *h)
5509 return h->access.intr_pending(h);
5512 static inline long interrupt_not_for_us(struct ctlr_info *h)
5514 return (h->access.intr_pending(h) == 0) ||
5515 (h->interrupts_enabled == 0);
5518 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5521 if (unlikely(tag_index >= h->nr_cmds)) {
5522 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5528 static inline void finish_cmd(struct CommandList *c)
5530 unsigned long flags;
5531 int io_may_be_stalled = 0;
5532 struct ctlr_info *h = c->h;
5534 spin_lock_irqsave(&h->lock, flags);
5538 * Check for possibly stalled i/o.
5540 * If a fifo_full condition is encountered, requests will back up
5541 * in h->reqQ. This queue is only emptied out by start_io which is
5542 * only called when a new i/o request comes in. If no i/o's are
5543 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5544 * start_io from here if we detect such a danger.
5546 * Normally, we shouldn't hit this case, but pounding on the
5547 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5548 * commands_outstanding is low. We want to avoid calling
5549 * start_io from in here as much as possible, and esp. don't
5550 * want to get in a cycle where we call start_io every time
5553 if (unlikely(h->fifo_recently_full) &&
5554 h->commands_outstanding < 5)
5555 io_may_be_stalled = 1;
5557 spin_unlock_irqrestore(&h->lock, flags);
5559 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5560 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5561 || c->cmd_type == CMD_IOACCEL2))
5562 complete_scsi_command(c);
5563 else if (c->cmd_type == CMD_IOCTL_PEND)
5564 complete(c->waiting);
5565 if (unlikely(io_may_be_stalled))
5569 static inline u32 hpsa_tag_contains_index(u32 tag)
5571 return tag & DIRECT_LOOKUP_BIT;
5574 static inline u32 hpsa_tag_to_index(u32 tag)
5576 return tag >> DIRECT_LOOKUP_SHIFT;
5580 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5582 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5583 #define HPSA_SIMPLE_ERROR_BITS 0x03
5584 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5585 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5586 return tag & ~HPSA_PERF_ERROR_BITS;
5589 /* process completion of an indexed ("direct lookup") command */
5590 static inline void process_indexed_cmd(struct ctlr_info *h,
5594 struct CommandList *c;
5596 tag_index = hpsa_tag_to_index(raw_tag);
5597 if (!bad_tag(h, tag_index, raw_tag)) {
5598 c = h->cmd_pool + tag_index;
5603 /* process completion of a non-indexed command */
5604 static inline void process_nonindexed_cmd(struct ctlr_info *h,
5608 struct CommandList *c = NULL;
5609 unsigned long flags;
5611 tag = hpsa_tag_discard_error_bits(h, raw_tag);
5612 spin_lock_irqsave(&h->lock, flags);
5613 list_for_each_entry(c, &h->cmpQ, list) {
5614 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
5615 spin_unlock_irqrestore(&h->lock, flags);
5620 spin_unlock_irqrestore(&h->lock, flags);
5621 bad_tag(h, h->nr_cmds + 1, raw_tag);
5624 /* Some controllers, like p400, will give us one interrupt
5625 * after a soft reset, even if we turned interrupts off.
5626 * Only need to check for this in the hpsa_xxx_discard_completions
5629 static int ignore_bogus_interrupt(struct ctlr_info *h)
5631 if (likely(!reset_devices))
5634 if (likely(h->interrupts_enabled))
5637 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5638 "(known firmware bug.) Ignoring.\n");
5644 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5645 * Relies on (h-q[x] == x) being true for x such that
5646 * 0 <= x < MAX_REPLY_QUEUES.
5648 static struct ctlr_info *queue_to_hba(u8 *queue)
5650 return container_of((queue - *queue), struct ctlr_info, q[0]);
5653 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5655 struct ctlr_info *h = queue_to_hba(queue);
5656 u8 q = *(u8 *) queue;
5659 if (ignore_bogus_interrupt(h))
5662 if (interrupt_not_for_us(h))
5664 h->last_intr_timestamp = get_jiffies_64();
5665 while (interrupt_pending(h)) {
5666 raw_tag = get_next_completion(h, q);
5667 while (raw_tag != FIFO_EMPTY)
5668 raw_tag = next_command(h, q);
5673 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5675 struct ctlr_info *h = queue_to_hba(queue);
5677 u8 q = *(u8 *) queue;
5679 if (ignore_bogus_interrupt(h))
5682 h->last_intr_timestamp = get_jiffies_64();
5683 raw_tag = get_next_completion(h, q);
5684 while (raw_tag != FIFO_EMPTY)
5685 raw_tag = next_command(h, q);
5689 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5691 struct ctlr_info *h = queue_to_hba((u8 *) queue);
5693 u8 q = *(u8 *) queue;
5695 if (interrupt_not_for_us(h))
5697 h->last_intr_timestamp = get_jiffies_64();
5698 while (interrupt_pending(h)) {
5699 raw_tag = get_next_completion(h, q);
5700 while (raw_tag != FIFO_EMPTY) {
5701 if (likely(hpsa_tag_contains_index(raw_tag)))
5702 process_indexed_cmd(h, raw_tag);
5704 process_nonindexed_cmd(h, raw_tag);
5705 raw_tag = next_command(h, q);
5711 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5713 struct ctlr_info *h = queue_to_hba(queue);
5715 u8 q = *(u8 *) queue;
5717 h->last_intr_timestamp = get_jiffies_64();
5718 raw_tag = get_next_completion(h, q);
5719 while (raw_tag != FIFO_EMPTY) {
5720 if (likely(hpsa_tag_contains_index(raw_tag)))
5721 process_indexed_cmd(h, raw_tag);
5723 process_nonindexed_cmd(h, raw_tag);
5724 raw_tag = next_command(h, q);
5729 /* Send a message CDB to the firmware. Careful, this only works
5730 * in simple mode, not performant mode due to the tag lookup.
5731 * We only ever use this immediately after a controller reset.
5733 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5737 struct CommandListHeader CommandHeader;
5738 struct RequestBlock Request;
5739 struct ErrDescriptor ErrorDescriptor;
5741 struct Command *cmd;
5742 static const size_t cmd_sz = sizeof(*cmd) +
5743 sizeof(cmd->ErrorDescriptor);
5745 uint32_t paddr32, tag;
5746 void __iomem *vaddr;
5749 vaddr = pci_ioremap_bar(pdev, 0);
5753 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5754 * CCISS commands, so they must be allocated from the lower 4GiB of
5757 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5763 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5769 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5770 * although there's no guarantee, we assume that the address is at
5771 * least 4-byte aligned (most likely, it's page-aligned).
5775 cmd->CommandHeader.ReplyQueue = 0;
5776 cmd->CommandHeader.SGList = 0;
5777 cmd->CommandHeader.SGTotal = 0;
5778 cmd->CommandHeader.Tag.lower = paddr32;
5779 cmd->CommandHeader.Tag.upper = 0;
5780 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5782 cmd->Request.CDBLen = 16;
5783 cmd->Request.Type.Type = TYPE_MSG;
5784 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5785 cmd->Request.Type.Direction = XFER_NONE;
5786 cmd->Request.Timeout = 0; /* Don't time out */
5787 cmd->Request.CDB[0] = opcode;
5788 cmd->Request.CDB[1] = type;
5789 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5790 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
5791 cmd->ErrorDescriptor.Addr.upper = 0;
5792 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
5794 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5796 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5797 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5798 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
5800 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5805 /* we leak the DMA buffer here ... no choice since the controller could
5806 * still complete the command.
5808 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5809 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5814 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5816 if (tag & HPSA_ERROR_BIT) {
5817 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5822 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5827 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5829 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5830 void * __iomem vaddr, u32 use_doorbell)
5836 /* For everything after the P600, the PCI power state method
5837 * of resetting the controller doesn't work, so we have this
5838 * other way using the doorbell register.
5840 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5841 writel(use_doorbell, vaddr + SA5_DOORBELL);
5843 /* PMC hardware guys tell us we need a 5 second delay after
5844 * doorbell reset and before any attempt to talk to the board
5845 * at all to ensure that this actually works and doesn't fall
5846 * over in some weird corner cases.
5849 } else { /* Try to do it the PCI power state way */
5851 /* Quoting from the Open CISS Specification: "The Power
5852 * Management Control/Status Register (CSR) controls the power
5853 * state of the device. The normal operating state is D0,
5854 * CSR=00h. The software off state is D3, CSR=03h. To reset
5855 * the controller, place the interface device in D3 then to D0,
5856 * this causes a secondary PCI reset which will reset the
5859 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5862 "hpsa_reset_controller: "
5863 "PCI PM not supported\n");
5866 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5867 /* enter the D3hot power management state */
5868 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5869 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5871 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5875 /* enter the D0 power management state */
5876 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5878 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5881 * The P600 requires a small delay when changing states.
5882 * Otherwise we may think the board did not reset and we bail.
5883 * This for kdump only and is particular to the P600.
5890 static void init_driver_version(char *driver_version, int len)
5892 memset(driver_version, 0, len);
5893 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5896 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5898 char *driver_version;
5899 int i, size = sizeof(cfgtable->driver_version);
5901 driver_version = kmalloc(size, GFP_KERNEL);
5902 if (!driver_version)
5905 init_driver_version(driver_version, size);
5906 for (i = 0; i < size; i++)
5907 writeb(driver_version[i], &cfgtable->driver_version[i]);
5908 kfree(driver_version);
5912 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5913 unsigned char *driver_ver)
5917 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5918 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5921 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5924 char *driver_ver, *old_driver_ver;
5925 int rc, size = sizeof(cfgtable->driver_version);
5927 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5928 if (!old_driver_ver)
5930 driver_ver = old_driver_ver + size;
5932 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5933 * should have been changed, otherwise we know the reset failed.
5935 init_driver_version(old_driver_ver, size);
5936 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5937 rc = !memcmp(driver_ver, old_driver_ver, size);
5938 kfree(old_driver_ver);
5941 /* This does a hard reset of the controller using PCI power management
5942 * states or the using the doorbell register.
5944 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5948 u64 cfg_base_addr_index;
5949 void __iomem *vaddr;
5950 unsigned long paddr;
5951 u32 misc_fw_support;
5953 struct CfgTable __iomem *cfgtable;
5956 u16 command_register;
5958 /* For controllers as old as the P600, this is very nearly
5961 * pci_save_state(pci_dev);
5962 * pci_set_power_state(pci_dev, PCI_D3hot);
5963 * pci_set_power_state(pci_dev, PCI_D0);
5964 * pci_restore_state(pci_dev);
5966 * For controllers newer than the P600, the pci power state
5967 * method of resetting doesn't work so we have another way
5968 * using the doorbell register.
5971 rc = hpsa_lookup_board_id(pdev, &board_id);
5972 if (rc < 0 || !ctlr_is_resettable(board_id)) {
5973 dev_warn(&pdev->dev, "Not resetting device.\n");
5977 /* if controller is soft- but not hard resettable... */
5978 if (!ctlr_is_hard_resettable(board_id))
5979 return -ENOTSUPP; /* try soft reset later. */
5981 /* Save the PCI command register */
5982 pci_read_config_word(pdev, 4, &command_register);
5983 /* Turn the board off. This is so that later pci_restore_state()
5984 * won't turn the board on before the rest of config space is ready.
5986 pci_disable_device(pdev);
5987 pci_save_state(pdev);
5989 /* find the first memory BAR, so we can find the cfg table */
5990 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5993 vaddr = remap_pci_mem(paddr, 0x250);
5997 /* find cfgtable in order to check if reset via doorbell is supported */
5998 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5999 &cfg_base_addr_index, &cfg_offset);
6002 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6003 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6008 rc = write_driver_ver_to_cfgtable(cfgtable);
6012 /* If reset via doorbell register is supported, use that.
6013 * There are two such methods. Favor the newest method.
6015 misc_fw_support = readl(&cfgtable->misc_fw_support);
6016 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6018 use_doorbell = DOORBELL_CTLR_RESET2;
6020 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6022 dev_warn(&pdev->dev, "Soft reset not supported. "
6023 "Firmware update is required.\n");
6024 rc = -ENOTSUPP; /* try soft reset */
6025 goto unmap_cfgtable;
6029 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6031 goto unmap_cfgtable;
6033 pci_restore_state(pdev);
6034 rc = pci_enable_device(pdev);
6036 dev_warn(&pdev->dev, "failed to enable device.\n");
6037 goto unmap_cfgtable;
6039 pci_write_config_word(pdev, 4, command_register);
6041 /* Some devices (notably the HP Smart Array 5i Controller)
6042 need a little pause here */
6043 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6045 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6047 dev_warn(&pdev->dev,
6048 "failed waiting for board to become ready "
6049 "after hard reset\n");
6050 goto unmap_cfgtable;
6053 rc = controller_reset_failed(vaddr);
6055 goto unmap_cfgtable;
6057 dev_warn(&pdev->dev, "Unable to successfully reset "
6058 "controller. Will try soft reset.\n");
6061 dev_info(&pdev->dev, "board ready after hard reset.\n");
6073 * We cannot read the structure directly, for portability we must use
6075 * This is for debug only.
6077 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
6083 dev_info(dev, "Controller Configuration information\n");
6084 dev_info(dev, "------------------------------------\n");
6085 for (i = 0; i < 4; i++)
6086 temp_name[i] = readb(&(tb->Signature[i]));
6087 temp_name[4] = '\0';
6088 dev_info(dev, " Signature = %s\n", temp_name);
6089 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6090 dev_info(dev, " Transport methods supported = 0x%x\n",
6091 readl(&(tb->TransportSupport)));
6092 dev_info(dev, " Transport methods active = 0x%x\n",
6093 readl(&(tb->TransportActive)));
6094 dev_info(dev, " Requested transport Method = 0x%x\n",
6095 readl(&(tb->HostWrite.TransportRequest)));
6096 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6097 readl(&(tb->HostWrite.CoalIntDelay)));
6098 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6099 readl(&(tb->HostWrite.CoalIntCount)));
6100 dev_info(dev, " Max outstanding commands = 0x%d\n",
6101 readl(&(tb->CmdsOutMax)));
6102 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6103 for (i = 0; i < 16; i++)
6104 temp_name[i] = readb(&(tb->ServerName[i]));
6105 temp_name[16] = '\0';
6106 dev_info(dev, " Server Name = %s\n", temp_name);
6107 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6108 readl(&(tb->HeartBeat)));
6109 #endif /* HPSA_DEBUG */
6112 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6114 int i, offset, mem_type, bar_type;
6116 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6119 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6120 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6121 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6124 mem_type = pci_resource_flags(pdev, i) &
6125 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6127 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6128 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6129 offset += 4; /* 32 bit */
6131 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6134 default: /* reserved in PCI 2.2 */
6135 dev_warn(&pdev->dev,
6136 "base address is invalid\n");
6141 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6147 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6148 * controllers that are capable. If not, we use IO-APIC mode.
6151 static void hpsa_interrupt_mode(struct ctlr_info *h)
6153 #ifdef CONFIG_PCI_MSI
6155 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6157 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6158 hpsa_msix_entries[i].vector = 0;
6159 hpsa_msix_entries[i].entry = i;
6162 /* Some boards advertise MSI but don't really support it */
6163 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6164 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6165 goto default_int_mode;
6166 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6167 dev_info(&h->pdev->dev, "MSIX\n");
6168 h->msix_vector = MAX_REPLY_QUEUES;
6169 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6172 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6173 "available\n", err);
6174 h->msix_vector = err;
6175 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6179 for (i = 0; i < h->msix_vector; i++)
6180 h->intr[i] = hpsa_msix_entries[i].vector;
6183 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
6186 goto default_int_mode;
6189 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6190 dev_info(&h->pdev->dev, "MSI\n");
6191 if (!pci_enable_msi(h->pdev))
6194 dev_warn(&h->pdev->dev, "MSI init failed\n");
6197 #endif /* CONFIG_PCI_MSI */
6198 /* if we get here we're going to use the default interrupt mode */
6199 h->intr[h->intr_mode] = h->pdev->irq;
6202 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6205 u32 subsystem_vendor_id, subsystem_device_id;
6207 subsystem_vendor_id = pdev->subsystem_vendor;
6208 subsystem_device_id = pdev->subsystem_device;
6209 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6210 subsystem_vendor_id;
6212 for (i = 0; i < ARRAY_SIZE(products); i++)
6213 if (*board_id == products[i].board_id)
6216 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6217 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6219 dev_warn(&pdev->dev, "unrecognized board ID: "
6220 "0x%08x, ignoring.\n", *board_id);
6223 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6226 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6227 unsigned long *memory_bar)
6231 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6232 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6233 /* addressing mode bits already removed */
6234 *memory_bar = pci_resource_start(pdev, i);
6235 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6239 dev_warn(&pdev->dev, "no memory BAR found\n");
6243 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6249 iterations = HPSA_BOARD_READY_ITERATIONS;
6251 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6253 for (i = 0; i < iterations; i++) {
6254 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6255 if (wait_for_ready) {
6256 if (scratchpad == HPSA_FIRMWARE_READY)
6259 if (scratchpad != HPSA_FIRMWARE_READY)
6262 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6264 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6268 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6269 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6272 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6273 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6274 *cfg_base_addr &= (u32) 0x0000ffff;
6275 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6276 if (*cfg_base_addr_index == -1) {
6277 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6283 static int hpsa_find_cfgtables(struct ctlr_info *h)
6287 u64 cfg_base_addr_index;
6291 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6292 &cfg_base_addr_index, &cfg_offset);
6295 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6296 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6299 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6302 /* Find performant mode table. */
6303 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6304 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6305 cfg_base_addr_index)+cfg_offset+trans_offset,
6306 sizeof(*h->transtable));
6312 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6314 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6316 /* Limit commands in memory limited kdump scenario. */
6317 if (reset_devices && h->max_commands > 32)
6318 h->max_commands = 32;
6320 if (h->max_commands < 16) {
6321 dev_warn(&h->pdev->dev, "Controller reports "
6322 "max supported commands of %d, an obvious lie. "
6323 "Using 16. Ensure that firmware is up to date.\n",
6325 h->max_commands = 16;
6329 /* Interrogate the hardware for some limits:
6330 * max commands, max SG elements without chaining, and with chaining,
6331 * SG chain block size, etc.
6333 static void hpsa_find_board_params(struct ctlr_info *h)
6335 hpsa_get_max_perf_mode_cmds(h);
6336 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6337 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6338 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6340 * Limit in-command s/g elements to 32 save dma'able memory.
6341 * Howvever spec says if 0, use 31
6343 h->max_cmd_sg_entries = 31;
6344 if (h->maxsgentries > 512) {
6345 h->max_cmd_sg_entries = 32;
6346 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
6347 h->maxsgentries--; /* save one for chain pointer */
6349 h->maxsgentries = 31; /* default to traditional values */
6353 /* Find out what task management functions are supported and cache */
6354 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6355 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6356 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6357 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6358 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6361 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6363 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6364 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6370 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6375 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6376 driver_support = readl(&(h->cfgtable->driver_support));
6377 driver_support |= ENABLE_SCSI_PREFETCH;
6379 driver_support |= ENABLE_UNIT_ATTN;
6380 writel(driver_support, &(h->cfgtable->driver_support));
6383 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6384 * in a prefetch beyond physical memory.
6386 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6390 if (h->board_id != 0x3225103C)
6392 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6393 dma_prefetch |= 0x8000;
6394 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6397 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6401 unsigned long flags;
6402 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6403 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6404 spin_lock_irqsave(&h->lock, flags);
6405 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6406 spin_unlock_irqrestore(&h->lock, flags);
6407 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6409 /* delay and try again */
6414 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6418 unsigned long flags;
6420 /* under certain very rare conditions, this can take awhile.
6421 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6422 * as we enter this code.)
6424 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6425 spin_lock_irqsave(&h->lock, flags);
6426 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6427 spin_unlock_irqrestore(&h->lock, flags);
6428 if (!(doorbell_value & CFGTBL_ChangeReq))
6430 /* delay and try again */
6431 usleep_range(10000, 20000);
6435 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6439 trans_support = readl(&(h->cfgtable->TransportSupport));
6440 if (!(trans_support & SIMPLE_MODE))
6443 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6445 /* Update the field, and then ring the doorbell */
6446 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6447 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6448 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6449 hpsa_wait_for_mode_change_ack(h);
6450 print_cfg_table(&h->pdev->dev, h->cfgtable);
6451 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6453 h->transMethod = CFGTBL_Trans_Simple;
6456 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6460 static int hpsa_pci_init(struct ctlr_info *h)
6462 int prod_index, err;
6464 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6467 h->product_name = products[prod_index].product_name;
6468 h->access = *(products[prod_index].access);
6470 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6471 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6473 err = pci_enable_device(h->pdev);
6475 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6479 /* Enable bus mastering (pci_disable_device may disable this) */
6480 pci_set_master(h->pdev);
6482 err = pci_request_regions(h->pdev, HPSA);
6484 dev_err(&h->pdev->dev,
6485 "cannot obtain PCI resources, aborting\n");
6488 hpsa_interrupt_mode(h);
6489 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6491 goto err_out_free_res;
6492 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6495 goto err_out_free_res;
6497 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6499 goto err_out_free_res;
6500 err = hpsa_find_cfgtables(h);
6502 goto err_out_free_res;
6503 hpsa_find_board_params(h);
6505 if (!hpsa_CISS_signature_present(h)) {
6507 goto err_out_free_res;
6509 hpsa_set_driver_support_bits(h);
6510 hpsa_p600_dma_prefetch_quirk(h);
6511 err = hpsa_enter_simple_mode(h);
6513 goto err_out_free_res;
6518 iounmap(h->transtable);
6520 iounmap(h->cfgtable);
6523 pci_disable_device(h->pdev);
6524 pci_release_regions(h->pdev);
6528 static void hpsa_hba_inquiry(struct ctlr_info *h)
6532 #define HBA_INQUIRY_BYTE_COUNT 64
6533 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6534 if (!h->hba_inquiry_data)
6536 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6537 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6539 kfree(h->hba_inquiry_data);
6540 h->hba_inquiry_data = NULL;
6544 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6551 /* Reset the controller with a PCI power-cycle or via doorbell */
6552 rc = hpsa_kdump_hard_reset_controller(pdev);
6554 /* -ENOTSUPP here means we cannot reset the controller
6555 * but it's already (and still) up and running in
6556 * "performant mode". Or, it might be 640x, which can't reset
6557 * due to concerns about shared bbwc between 6402/6404 pair.
6559 if (rc == -ENOTSUPP)
6560 return rc; /* just try to do the kdump anyhow. */
6564 /* Now try to get the controller to respond to a no-op */
6565 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
6566 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6567 if (hpsa_noop(pdev) == 0)
6570 dev_warn(&pdev->dev, "no-op failed%s\n",
6571 (i < 11 ? "; re-trying" : ""));
6576 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6578 h->cmd_pool_bits = kzalloc(
6579 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6580 sizeof(unsigned long), GFP_KERNEL);
6581 h->cmd_pool = pci_alloc_consistent(h->pdev,
6582 h->nr_cmds * sizeof(*h->cmd_pool),
6583 &(h->cmd_pool_dhandle));
6584 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6585 h->nr_cmds * sizeof(*h->errinfo_pool),
6586 &(h->errinfo_pool_dhandle));
6587 if ((h->cmd_pool_bits == NULL)
6588 || (h->cmd_pool == NULL)
6589 || (h->errinfo_pool == NULL)) {
6590 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6596 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6598 kfree(h->cmd_pool_bits);
6600 pci_free_consistent(h->pdev,
6601 h->nr_cmds * sizeof(struct CommandList),
6602 h->cmd_pool, h->cmd_pool_dhandle);
6603 if (h->ioaccel2_cmd_pool)
6604 pci_free_consistent(h->pdev,
6605 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6606 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6607 if (h->errinfo_pool)
6608 pci_free_consistent(h->pdev,
6609 h->nr_cmds * sizeof(struct ErrorInfo),
6611 h->errinfo_pool_dhandle);
6612 if (h->ioaccel_cmd_pool)
6613 pci_free_consistent(h->pdev,
6614 h->nr_cmds * sizeof(struct io_accel1_cmd),
6615 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6618 static int hpsa_request_irq(struct ctlr_info *h,
6619 irqreturn_t (*msixhandler)(int, void *),
6620 irqreturn_t (*intxhandler)(int, void *))
6625 * initialize h->q[x] = x so that interrupt handlers know which
6628 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6631 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6632 /* If performant mode and MSI-X, use multiple reply queues */
6633 for (i = 0; i < h->msix_vector; i++)
6634 rc = request_irq(h->intr[i], msixhandler,
6638 /* Use single reply pool */
6639 if (h->msix_vector > 0 || h->msi_vector) {
6640 rc = request_irq(h->intr[h->intr_mode],
6641 msixhandler, 0, h->devname,
6642 &h->q[h->intr_mode]);
6644 rc = request_irq(h->intr[h->intr_mode],
6645 intxhandler, IRQF_SHARED, h->devname,
6646 &h->q[h->intr_mode]);
6650 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6651 h->intr[h->intr_mode], h->devname);
6657 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6659 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6660 HPSA_RESET_TYPE_CONTROLLER)) {
6661 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6665 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6666 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6667 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6671 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6672 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6673 dev_warn(&h->pdev->dev, "Board failed to become ready "
6674 "after soft reset.\n");
6681 static void free_irqs(struct ctlr_info *h)
6685 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6686 /* Single reply queue, only one irq to free */
6688 free_irq(h->intr[i], &h->q[i]);
6692 for (i = 0; i < h->msix_vector; i++)
6693 free_irq(h->intr[i], &h->q[i]);
6696 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6699 #ifdef CONFIG_PCI_MSI
6700 if (h->msix_vector) {
6701 if (h->pdev->msix_enabled)
6702 pci_disable_msix(h->pdev);
6703 } else if (h->msi_vector) {
6704 if (h->pdev->msi_enabled)
6705 pci_disable_msi(h->pdev);
6707 #endif /* CONFIG_PCI_MSI */
6710 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6712 hpsa_free_irqs_and_disable_msix(h);
6713 hpsa_free_sg_chain_blocks(h);
6714 hpsa_free_cmd_pool(h);
6715 kfree(h->ioaccel1_blockFetchTable);
6716 kfree(h->blockFetchTable);
6717 pci_free_consistent(h->pdev, h->reply_pool_size,
6718 h->reply_pool, h->reply_pool_dhandle);
6722 iounmap(h->transtable);
6724 iounmap(h->cfgtable);
6725 pci_release_regions(h->pdev);
6729 /* Called when controller lockup detected. */
6730 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6732 struct CommandList *c = NULL;
6734 assert_spin_locked(&h->lock);
6735 /* Mark all outstanding commands as failed and complete them. */
6736 while (!list_empty(list)) {
6737 c = list_entry(list->next, struct CommandList, list);
6738 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6743 static void controller_lockup_detected(struct ctlr_info *h)
6745 unsigned long flags;
6747 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6748 spin_lock_irqsave(&h->lock, flags);
6749 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6750 spin_unlock_irqrestore(&h->lock, flags);
6751 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6752 h->lockup_detected);
6753 pci_disable_device(h->pdev);
6754 spin_lock_irqsave(&h->lock, flags);
6755 fail_all_cmds_on_list(h, &h->cmpQ);
6756 fail_all_cmds_on_list(h, &h->reqQ);
6757 spin_unlock_irqrestore(&h->lock, flags);
6760 static void detect_controller_lockup(struct ctlr_info *h)
6764 unsigned long flags;
6766 now = get_jiffies_64();
6767 /* If we've received an interrupt recently, we're ok. */
6768 if (time_after64(h->last_intr_timestamp +
6769 (h->heartbeat_sample_interval), now))
6773 * If we've already checked the heartbeat recently, we're ok.
6774 * This could happen if someone sends us a signal. We
6775 * otherwise don't care about signals in this thread.
6777 if (time_after64(h->last_heartbeat_timestamp +
6778 (h->heartbeat_sample_interval), now))
6781 /* If heartbeat has not changed since we last looked, we're not ok. */
6782 spin_lock_irqsave(&h->lock, flags);
6783 heartbeat = readl(&h->cfgtable->HeartBeat);
6784 spin_unlock_irqrestore(&h->lock, flags);
6785 if (h->last_heartbeat == heartbeat) {
6786 controller_lockup_detected(h);
6791 h->last_heartbeat = heartbeat;
6792 h->last_heartbeat_timestamp = now;
6795 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6800 /* Clear the driver-requested rescan flag */
6801 h->drv_req_rescan = 0;
6803 /* Ask the controller to clear the events we're handling. */
6804 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6805 | CFGTBL_Trans_io_accel2)) &&
6806 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6807 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6809 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6810 event_type = "state change";
6811 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6812 event_type = "configuration change";
6813 /* Stop sending new RAID offload reqs via the IO accelerator */
6814 scsi_block_requests(h->scsi_host);
6815 for (i = 0; i < h->ndevices; i++)
6816 h->dev[i]->offload_enabled = 0;
6817 hpsa_drain_accel_commands(h);
6818 /* Set 'accelerator path config change' bit */
6819 dev_warn(&h->pdev->dev,
6820 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6821 h->events, event_type);
6822 writel(h->events, &(h->cfgtable->clear_event_notify));
6823 /* Set the "clear event notify field update" bit 6 */
6824 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6825 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6826 hpsa_wait_for_clear_event_notify_ack(h);
6827 scsi_unblock_requests(h->scsi_host);
6829 /* Acknowledge controller notification events. */
6830 writel(h->events, &(h->cfgtable->clear_event_notify));
6831 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6832 hpsa_wait_for_clear_event_notify_ack(h);
6834 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6835 hpsa_wait_for_mode_change_ack(h);
6841 /* Check a register on the controller to see if there are configuration
6842 * changes (added/changed/removed logical drives, etc.) which mean that
6843 * we should rescan the controller for devices.
6844 * Also check flag for driver-initiated rescan.
6846 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6848 if (h->drv_req_rescan)
6851 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6854 h->events = readl(&(h->cfgtable->event_notify));
6855 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6859 * Check if any of the offline devices have become ready
6861 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6863 unsigned long flags;
6864 struct offline_device_entry *d;
6865 struct list_head *this, *tmp;
6867 spin_lock_irqsave(&h->offline_device_lock, flags);
6868 list_for_each_safe(this, tmp, &h->offline_device_list) {
6869 d = list_entry(this, struct offline_device_entry,
6871 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6872 if (!hpsa_volume_offline(h, d->scsi3addr))
6874 spin_lock_irqsave(&h->offline_device_lock, flags);
6876 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6881 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6883 unsigned long flags;
6884 struct ctlr_info *h = container_of(to_delayed_work(work),
6885 struct ctlr_info, monitor_ctlr_work);
6886 detect_controller_lockup(h);
6887 if (h->lockup_detected)
6890 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6891 scsi_host_get(h->scsi_host);
6892 h->drv_req_rescan = 0;
6893 hpsa_ack_ctlr_events(h);
6894 hpsa_scan_start(h->scsi_host);
6895 scsi_host_put(h->scsi_host);
6898 spin_lock_irqsave(&h->lock, flags);
6899 if (h->remove_in_progress) {
6900 spin_unlock_irqrestore(&h->lock, flags);
6903 schedule_delayed_work(&h->monitor_ctlr_work,
6904 h->heartbeat_sample_interval);
6905 spin_unlock_irqrestore(&h->lock, flags);
6908 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6911 struct ctlr_info *h;
6912 int try_soft_reset = 0;
6913 unsigned long flags;
6915 if (number_of_controllers == 0)
6916 printk(KERN_INFO DRIVER_NAME "\n");
6918 rc = hpsa_init_reset_devices(pdev);
6920 if (rc != -ENOTSUPP)
6922 /* If the reset fails in a particular way (it has no way to do
6923 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6924 * a soft reset once we get the controller configured up to the
6925 * point that it can accept a command.
6931 reinit_after_soft_reset:
6933 /* Command structures must be aligned on a 32-byte boundary because
6934 * the 5 lower bits of the address are used by the hardware. and by
6935 * the driver. See comments in hpsa.h for more info.
6937 #define COMMANDLIST_ALIGNMENT 128
6938 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6939 h = kzalloc(sizeof(*h), GFP_KERNEL);
6944 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6945 INIT_LIST_HEAD(&h->cmpQ);
6946 INIT_LIST_HEAD(&h->reqQ);
6947 INIT_LIST_HEAD(&h->offline_device_list);
6948 spin_lock_init(&h->lock);
6949 spin_lock_init(&h->offline_device_lock);
6950 spin_lock_init(&h->scan_lock);
6951 spin_lock_init(&h->passthru_count_lock);
6952 rc = hpsa_pci_init(h);
6956 sprintf(h->devname, HPSA "%d", number_of_controllers);
6957 h->ctlr = number_of_controllers;
6958 number_of_controllers++;
6960 /* configure PCI DMA stuff */
6961 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6965 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6969 dev_err(&pdev->dev, "no suitable DMA available\n");
6974 /* make sure the board interrupts are off */
6975 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6977 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
6979 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6980 h->devname, pdev->device,
6981 h->intr[h->intr_mode], dac ? "" : " not");
6982 if (hpsa_allocate_cmd_pool(h))
6984 if (hpsa_allocate_sg_chain_blocks(h))
6986 init_waitqueue_head(&h->scan_wait_queue);
6987 h->scan_finished = 1; /* no scan currently in progress */
6989 pci_set_drvdata(pdev, h);
6991 h->hba_mode_enabled = 0;
6992 h->scsi_host = NULL;
6993 spin_lock_init(&h->devlock);
6994 hpsa_put_ctlr_into_performant_mode(h);
6996 /* At this point, the controller is ready to take commands.
6997 * Now, if reset_devices and the hard reset didn't work, try
6998 * the soft reset and see if that works.
7000 if (try_soft_reset) {
7002 /* This is kind of gross. We may or may not get a completion
7003 * from the soft reset command, and if we do, then the value
7004 * from the fifo may or may not be valid. So, we wait 10 secs
7005 * after the reset throwing away any completions we get during
7006 * that time. Unregister the interrupt handler and register
7007 * fake ones to scoop up any residual completions.
7009 spin_lock_irqsave(&h->lock, flags);
7010 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7011 spin_unlock_irqrestore(&h->lock, flags);
7013 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
7014 hpsa_intx_discard_completions);
7016 dev_warn(&h->pdev->dev, "Failed to request_irq after "
7021 rc = hpsa_kdump_soft_reset(h);
7023 /* Neither hard nor soft reset worked, we're hosed. */
7026 dev_info(&h->pdev->dev, "Board READY.\n");
7027 dev_info(&h->pdev->dev,
7028 "Waiting for stale completions to drain.\n");
7029 h->access.set_intr_mask(h, HPSA_INTR_ON);
7031 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7033 rc = controller_reset_failed(h->cfgtable);
7035 dev_info(&h->pdev->dev,
7036 "Soft reset appears to have failed.\n");
7038 /* since the controller's reset, we have to go back and re-init
7039 * everything. Easiest to just forget what we've done and do it
7042 hpsa_undo_allocations_after_kdump_soft_reset(h);
7045 /* don't go to clean4, we already unallocated */
7048 goto reinit_after_soft_reset;
7051 /* Enable Accelerated IO path at driver layer */
7052 h->acciopath_status = 1;
7054 h->drv_req_rescan = 0;
7056 /* Turn the interrupts on so we can service requests */
7057 h->access.set_intr_mask(h, HPSA_INTR_ON);
7059 hpsa_hba_inquiry(h);
7060 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
7062 /* Monitor the controller for firmware lockups */
7063 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7064 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7065 schedule_delayed_work(&h->monitor_ctlr_work,
7066 h->heartbeat_sample_interval);
7070 hpsa_free_sg_chain_blocks(h);
7071 hpsa_free_cmd_pool(h);
7079 static void hpsa_flush_cache(struct ctlr_info *h)
7082 struct CommandList *c;
7083 unsigned long flags;
7085 /* Don't bother trying to flush the cache if locked up */
7086 spin_lock_irqsave(&h->lock, flags);
7087 if (unlikely(h->lockup_detected)) {
7088 spin_unlock_irqrestore(&h->lock, flags);
7091 spin_unlock_irqrestore(&h->lock, flags);
7093 flush_buf = kzalloc(4, GFP_KERNEL);
7097 c = cmd_special_alloc(h);
7099 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7102 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7103 RAID_CTLR_LUNID, TYPE_CMD)) {
7106 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7107 if (c->err_info->CommandStatus != 0)
7109 dev_warn(&h->pdev->dev,
7110 "error flushing cache on controller\n");
7111 cmd_special_free(h, c);
7116 static void hpsa_shutdown(struct pci_dev *pdev)
7118 struct ctlr_info *h;
7120 h = pci_get_drvdata(pdev);
7121 /* Turn board interrupts off and send the flush cache command
7122 * sendcmd will turn off interrupt, and send the flush...
7123 * To write all data in the battery backed cache to disks
7125 hpsa_flush_cache(h);
7126 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7127 hpsa_free_irqs_and_disable_msix(h);
7130 static void hpsa_free_device_info(struct ctlr_info *h)
7134 for (i = 0; i < h->ndevices; i++)
7138 static void hpsa_remove_one(struct pci_dev *pdev)
7140 struct ctlr_info *h;
7141 unsigned long flags;
7143 if (pci_get_drvdata(pdev) == NULL) {
7144 dev_err(&pdev->dev, "unable to remove device\n");
7147 h = pci_get_drvdata(pdev);
7149 /* Get rid of any controller monitoring work items */
7150 spin_lock_irqsave(&h->lock, flags);
7151 h->remove_in_progress = 1;
7152 cancel_delayed_work(&h->monitor_ctlr_work);
7153 spin_unlock_irqrestore(&h->lock, flags);
7155 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7156 hpsa_shutdown(pdev);
7158 iounmap(h->transtable);
7159 iounmap(h->cfgtable);
7160 hpsa_free_device_info(h);
7161 hpsa_free_sg_chain_blocks(h);
7162 pci_free_consistent(h->pdev,
7163 h->nr_cmds * sizeof(struct CommandList),
7164 h->cmd_pool, h->cmd_pool_dhandle);
7165 pci_free_consistent(h->pdev,
7166 h->nr_cmds * sizeof(struct ErrorInfo),
7167 h->errinfo_pool, h->errinfo_pool_dhandle);
7168 pci_free_consistent(h->pdev, h->reply_pool_size,
7169 h->reply_pool, h->reply_pool_dhandle);
7170 kfree(h->cmd_pool_bits);
7171 kfree(h->blockFetchTable);
7172 kfree(h->ioaccel1_blockFetchTable);
7173 kfree(h->ioaccel2_blockFetchTable);
7174 kfree(h->hba_inquiry_data);
7175 pci_disable_device(pdev);
7176 pci_release_regions(pdev);
7180 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7181 __attribute__((unused)) pm_message_t state)
7186 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7191 static struct pci_driver hpsa_pci_driver = {
7193 .probe = hpsa_init_one,
7194 .remove = hpsa_remove_one,
7195 .id_table = hpsa_pci_device_id, /* id_table */
7196 .shutdown = hpsa_shutdown,
7197 .suspend = hpsa_suspend,
7198 .resume = hpsa_resume,
7201 /* Fill in bucket_map[], given nsgs (the max number of
7202 * scatter gather elements supported) and bucket[],
7203 * which is an array of 8 integers. The bucket[] array
7204 * contains 8 different DMA transfer sizes (in 16
7205 * byte increments) which the controller uses to fetch
7206 * commands. This function fills in bucket_map[], which
7207 * maps a given number of scatter gather elements to one of
7208 * the 8 DMA transfer sizes. The point of it is to allow the
7209 * controller to only do as much DMA as needed to fetch the
7210 * command, with the DMA transfer size encoded in the lower
7211 * bits of the command address.
7213 static void calc_bucket_map(int bucket[], int num_buckets,
7214 int nsgs, int min_blocks, int *bucket_map)
7218 /* Note, bucket_map must have nsgs+1 entries. */
7219 for (i = 0; i <= nsgs; i++) {
7220 /* Compute size of a command with i SG entries */
7221 size = i + min_blocks;
7222 b = num_buckets; /* Assume the biggest bucket */
7223 /* Find the bucket that is just big enough */
7224 for (j = 0; j < num_buckets; j++) {
7225 if (bucket[j] >= size) {
7230 /* for a command with i SG entries, use bucket b. */
7235 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7238 unsigned long register_value;
7239 unsigned long transMethod = CFGTBL_Trans_Performant |
7240 (trans_support & CFGTBL_Trans_use_short_tags) |
7241 CFGTBL_Trans_enable_directed_msix |
7242 (trans_support & (CFGTBL_Trans_io_accel1 |
7243 CFGTBL_Trans_io_accel2));
7244 struct access_method access = SA5_performant_access;
7246 /* This is a bit complicated. There are 8 registers on
7247 * the controller which we write to to tell it 8 different
7248 * sizes of commands which there may be. It's a way of
7249 * reducing the DMA done to fetch each command. Encoded into
7250 * each command's tag are 3 bits which communicate to the controller
7251 * which of the eight sizes that command fits within. The size of
7252 * each command depends on how many scatter gather entries there are.
7253 * Each SG entry requires 16 bytes. The eight registers are programmed
7254 * with the number of 16-byte blocks a command of that size requires.
7255 * The smallest command possible requires 5 such 16 byte blocks.
7256 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7257 * blocks. Note, this only extends to the SG entries contained
7258 * within the command block, and does not extend to chained blocks
7259 * of SG elements. bft[] contains the eight values we write to
7260 * the registers. They are not evenly distributed, but have more
7261 * sizes for small commands, and fewer sizes for larger commands.
7263 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7264 #define MIN_IOACCEL2_BFT_ENTRY 5
7265 #define HPSA_IOACCEL2_HEADER_SZ 4
7266 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7267 13, 14, 15, 16, 17, 18, 19,
7268 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7269 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7270 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7271 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7272 16 * MIN_IOACCEL2_BFT_ENTRY);
7273 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7274 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7275 /* 5 = 1 s/g entry or 4k
7276 * 6 = 2 s/g entry or 8k
7277 * 8 = 4 s/g entry or 16k
7278 * 10 = 6 s/g entry or 24k
7281 /* Controller spec: zero out this buffer. */
7282 memset(h->reply_pool, 0, h->reply_pool_size);
7284 bft[7] = SG_ENTRIES_IN_CMD + 4;
7285 calc_bucket_map(bft, ARRAY_SIZE(bft),
7286 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7287 for (i = 0; i < 8; i++)
7288 writel(bft[i], &h->transtable->BlockFetch[i]);
7290 /* size of controller ring buffer */
7291 writel(h->max_commands, &h->transtable->RepQSize);
7292 writel(h->nreply_queues, &h->transtable->RepQCount);
7293 writel(0, &h->transtable->RepQCtrAddrLow32);
7294 writel(0, &h->transtable->RepQCtrAddrHigh32);
7296 for (i = 0; i < h->nreply_queues; i++) {
7297 writel(0, &h->transtable->RepQAddr[i].upper);
7298 writel(h->reply_pool_dhandle +
7299 (h->max_commands * sizeof(u64) * i),
7300 &h->transtable->RepQAddr[i].lower);
7303 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7304 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7306 * enable outbound interrupt coalescing in accelerator mode;
7308 if (trans_support & CFGTBL_Trans_io_accel1) {
7309 access = SA5_ioaccel_mode1_access;
7310 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7311 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7313 if (trans_support & CFGTBL_Trans_io_accel2) {
7314 access = SA5_ioaccel_mode2_access;
7315 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7316 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7319 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7320 hpsa_wait_for_mode_change_ack(h);
7321 register_value = readl(&(h->cfgtable->TransportActive));
7322 if (!(register_value & CFGTBL_Trans_Performant)) {
7323 dev_warn(&h->pdev->dev, "unable to get board into"
7324 " performant mode\n");
7327 /* Change the access methods to the performant access methods */
7329 h->transMethod = transMethod;
7331 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7332 (trans_support & CFGTBL_Trans_io_accel2)))
7335 if (trans_support & CFGTBL_Trans_io_accel1) {
7336 /* Set up I/O accelerator mode */
7337 for (i = 0; i < h->nreply_queues; i++) {
7338 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7339 h->reply_queue[i].current_entry =
7340 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7342 bft[7] = h->ioaccel_maxsg + 8;
7343 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7344 h->ioaccel1_blockFetchTable);
7346 /* initialize all reply queue entries to unused */
7347 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
7348 h->reply_pool_size);
7350 /* set all the constant fields in the accelerator command
7351 * frames once at init time to save CPU cycles later.
7353 for (i = 0; i < h->nr_cmds; i++) {
7354 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7356 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7357 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7358 (i * sizeof(struct ErrorInfo)));
7359 cp->err_info_len = sizeof(struct ErrorInfo);
7360 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7361 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7362 cp->timeout_sec = 0;
7364 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7367 cp->host_addr.lower =
7368 (u32) (h->ioaccel_cmd_pool_dhandle +
7369 (i * sizeof(struct io_accel1_cmd)));
7370 cp->host_addr.upper = 0;
7372 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7373 u64 cfg_offset, cfg_base_addr_index;
7374 u32 bft2_offset, cfg_base_addr;
7377 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7378 &cfg_base_addr_index, &cfg_offset);
7379 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7380 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7381 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7382 4, h->ioaccel2_blockFetchTable);
7383 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7384 BUILD_BUG_ON(offsetof(struct CfgTable,
7385 io_accel_request_size_offset) != 0xb8);
7386 h->ioaccel2_bft2_regs =
7387 remap_pci_mem(pci_resource_start(h->pdev,
7388 cfg_base_addr_index) +
7389 cfg_offset + bft2_offset,
7391 sizeof(*h->ioaccel2_bft2_regs));
7392 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7393 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7395 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7396 hpsa_wait_for_mode_change_ack(h);
7399 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7402 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7403 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7404 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7406 /* Command structures must be aligned on a 128-byte boundary
7407 * because the 7 lower bits of the address are used by the
7410 #define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7411 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7412 IOACCEL1_COMMANDLIST_ALIGNMENT);
7413 h->ioaccel_cmd_pool =
7414 pci_alloc_consistent(h->pdev,
7415 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7416 &(h->ioaccel_cmd_pool_dhandle));
7418 h->ioaccel1_blockFetchTable =
7419 kmalloc(((h->ioaccel_maxsg + 1) *
7420 sizeof(u32)), GFP_KERNEL);
7422 if ((h->ioaccel_cmd_pool == NULL) ||
7423 (h->ioaccel1_blockFetchTable == NULL))
7426 memset(h->ioaccel_cmd_pool, 0,
7427 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7431 if (h->ioaccel_cmd_pool)
7432 pci_free_consistent(h->pdev,
7433 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7434 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7435 kfree(h->ioaccel1_blockFetchTable);
7439 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7441 /* Allocate ioaccel2 mode command blocks and block fetch table */
7444 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7445 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7446 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7448 #define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7449 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7450 IOACCEL2_COMMANDLIST_ALIGNMENT);
7451 h->ioaccel2_cmd_pool =
7452 pci_alloc_consistent(h->pdev,
7453 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7454 &(h->ioaccel2_cmd_pool_dhandle));
7456 h->ioaccel2_blockFetchTable =
7457 kmalloc(((h->ioaccel_maxsg + 1) *
7458 sizeof(u32)), GFP_KERNEL);
7460 if ((h->ioaccel2_cmd_pool == NULL) ||
7461 (h->ioaccel2_blockFetchTable == NULL))
7464 memset(h->ioaccel2_cmd_pool, 0,
7465 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7469 if (h->ioaccel2_cmd_pool)
7470 pci_free_consistent(h->pdev,
7471 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7472 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7473 kfree(h->ioaccel2_blockFetchTable);
7477 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7480 unsigned long transMethod = CFGTBL_Trans_Performant |
7481 CFGTBL_Trans_use_short_tags;
7484 if (hpsa_simple_mode)
7487 trans_support = readl(&(h->cfgtable->TransportSupport));
7488 if (!(trans_support & PERFORMANT_MODE))
7491 /* Check for I/O accelerator mode support */
7492 if (trans_support & CFGTBL_Trans_io_accel1) {
7493 transMethod |= CFGTBL_Trans_io_accel1 |
7494 CFGTBL_Trans_enable_directed_msix;
7495 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7498 if (trans_support & CFGTBL_Trans_io_accel2) {
7499 transMethod |= CFGTBL_Trans_io_accel2 |
7500 CFGTBL_Trans_enable_directed_msix;
7501 if (ioaccel2_alloc_cmds_and_bft(h))
7506 /* TODO, check that this next line h->nreply_queues is correct */
7507 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7508 hpsa_get_max_perf_mode_cmds(h);
7509 /* Performant mode ring buffer and supporting data structures */
7510 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
7511 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
7512 &(h->reply_pool_dhandle));
7514 for (i = 0; i < h->nreply_queues; i++) {
7515 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
7516 h->reply_queue[i].size = h->max_commands;
7517 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7518 h->reply_queue[i].current_entry = 0;
7521 /* Need a block fetch table for performant mode */
7522 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7523 sizeof(u32)), GFP_KERNEL);
7525 if ((h->reply_pool == NULL)
7526 || (h->blockFetchTable == NULL))
7529 hpsa_enter_performant_mode(h, trans_support);
7534 pci_free_consistent(h->pdev, h->reply_pool_size,
7535 h->reply_pool, h->reply_pool_dhandle);
7536 kfree(h->blockFetchTable);
7539 static int is_accelerated_cmd(struct CommandList *c)
7541 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7544 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7546 struct CommandList *c = NULL;
7547 unsigned long flags;
7550 do { /* wait for all outstanding commands to drain out */
7552 spin_lock_irqsave(&h->lock, flags);
7553 list_for_each_entry(c, &h->cmpQ, list)
7554 accel_cmds_out += is_accelerated_cmd(c);
7555 list_for_each_entry(c, &h->reqQ, list)
7556 accel_cmds_out += is_accelerated_cmd(c);
7557 spin_unlock_irqrestore(&h->lock, flags);
7558 if (accel_cmds_out <= 0)
7565 * This is it. Register the PCI driver information for the cards we control
7566 * the OS will call our registered routines when it finds one of our cards.
7568 static int __init hpsa_init(void)
7570 return pci_register_driver(&hpsa_pci_driver);
7573 static void __exit hpsa_cleanup(void)
7575 pci_unregister_driver(&hpsa_pci_driver);
7578 static void __attribute__((unused)) verify_offsets(void)
7580 #define VERIFY_OFFSET(member, offset) \
7581 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7583 VERIFY_OFFSET(structure_size, 0);
7584 VERIFY_OFFSET(volume_blk_size, 4);
7585 VERIFY_OFFSET(volume_blk_cnt, 8);
7586 VERIFY_OFFSET(phys_blk_shift, 16);
7587 VERIFY_OFFSET(parity_rotation_shift, 17);
7588 VERIFY_OFFSET(strip_size, 18);
7589 VERIFY_OFFSET(disk_starting_blk, 20);
7590 VERIFY_OFFSET(disk_blk_cnt, 28);
7591 VERIFY_OFFSET(data_disks_per_row, 36);
7592 VERIFY_OFFSET(metadata_disks_per_row, 38);
7593 VERIFY_OFFSET(row_cnt, 40);
7594 VERIFY_OFFSET(layout_map_count, 42);
7595 VERIFY_OFFSET(flags, 44);
7596 VERIFY_OFFSET(dekindex, 46);
7597 /* VERIFY_OFFSET(reserved, 48 */
7598 VERIFY_OFFSET(data, 64);
7600 #undef VERIFY_OFFSET
7602 #define VERIFY_OFFSET(member, offset) \
7603 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7605 VERIFY_OFFSET(IU_type, 0);
7606 VERIFY_OFFSET(direction, 1);
7607 VERIFY_OFFSET(reply_queue, 2);
7608 /* VERIFY_OFFSET(reserved1, 3); */
7609 VERIFY_OFFSET(scsi_nexus, 4);
7610 VERIFY_OFFSET(Tag, 8);
7611 VERIFY_OFFSET(cdb, 16);
7612 VERIFY_OFFSET(cciss_lun, 32);
7613 VERIFY_OFFSET(data_len, 40);
7614 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7615 VERIFY_OFFSET(sg_count, 45);
7616 /* VERIFY_OFFSET(reserved3 */
7617 VERIFY_OFFSET(err_ptr, 48);
7618 VERIFY_OFFSET(err_len, 56);
7619 /* VERIFY_OFFSET(reserved4 */
7620 VERIFY_OFFSET(sg, 64);
7622 #undef VERIFY_OFFSET
7624 #define VERIFY_OFFSET(member, offset) \
7625 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7627 VERIFY_OFFSET(dev_handle, 0x00);
7628 VERIFY_OFFSET(reserved1, 0x02);
7629 VERIFY_OFFSET(function, 0x03);
7630 VERIFY_OFFSET(reserved2, 0x04);
7631 VERIFY_OFFSET(err_info, 0x0C);
7632 VERIFY_OFFSET(reserved3, 0x10);
7633 VERIFY_OFFSET(err_info_len, 0x12);
7634 VERIFY_OFFSET(reserved4, 0x13);
7635 VERIFY_OFFSET(sgl_offset, 0x14);
7636 VERIFY_OFFSET(reserved5, 0x15);
7637 VERIFY_OFFSET(transfer_len, 0x1C);
7638 VERIFY_OFFSET(reserved6, 0x20);
7639 VERIFY_OFFSET(io_flags, 0x24);
7640 VERIFY_OFFSET(reserved7, 0x26);
7641 VERIFY_OFFSET(LUN, 0x34);
7642 VERIFY_OFFSET(control, 0x3C);
7643 VERIFY_OFFSET(CDB, 0x40);
7644 VERIFY_OFFSET(reserved8, 0x50);
7645 VERIFY_OFFSET(host_context_flags, 0x60);
7646 VERIFY_OFFSET(timeout_sec, 0x62);
7647 VERIFY_OFFSET(ReplyQueue, 0x64);
7648 VERIFY_OFFSET(reserved9, 0x65);
7649 VERIFY_OFFSET(Tag, 0x68);
7650 VERIFY_OFFSET(host_addr, 0x70);
7651 VERIFY_OFFSET(CISS_LUN, 0x78);
7652 VERIFY_OFFSET(SG, 0x78 + 8);
7653 #undef VERIFY_OFFSET
7656 module_init(hpsa_init);
7657 module_exit(hpsa_cleanup);