]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/stex.c
stex: Support to Pegasus series.
[karo-tx-linux.git] / drivers / scsi / stex.c
1 /*
2  * SuperTrak EX Series Storage Controller driver for Linux
3  *
4  *      Copyright (C) 2005-2015 Promise Technology Inc.
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  *      Written By:
12  *              Ed Lin <promise_linux@promise.com>
13  *
14  */
15
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/pci.h>
23 #include <linux/blkdev.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/module.h>
27 #include <linux/spinlock.h>
28 #include <linux/ktime.h>
29 #include <asm/io.h>
30 #include <asm/irq.h>
31 #include <asm/byteorder.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_dbg.h>
38 #include <scsi/scsi_eh.h>
39
40 #define DRV_NAME "stex"
41 #define ST_DRIVER_VERSION       "5.00.0000.01"
42 #define ST_VER_MAJOR            5
43 #define ST_VER_MINOR            00
44 #define ST_OEM                          0000
45 #define ST_BUILD_VER            01
46
47 enum {
48         /* MU register offset */
49         IMR0    = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
50         IMR1    = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
51         OMR0    = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
52         OMR1    = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
53         IDBL    = 0x20, /* MU_INBOUND_DOORBELL */
54         IIS     = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
55         IIM     = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
56         ODBL    = 0x2c, /* MU_OUTBOUND_DOORBELL */
57         OIS     = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
58         OIM     = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
59
60         YIOA_STATUS                             = 0x00,
61         YH2I_INT                                = 0x20,
62         YINT_EN                                 = 0x34,
63         YI2H_INT                                = 0x9c,
64         YI2H_INT_C                              = 0xa0,
65         YH2I_REQ                                = 0xc0,
66         YH2I_REQ_HI                             = 0xc4,
67
68         /* MU register value */
69         MU_INBOUND_DOORBELL_HANDSHAKE           = (1 << 0),
70         MU_INBOUND_DOORBELL_REQHEADCHANGED      = (1 << 1),
71         MU_INBOUND_DOORBELL_STATUSTAILCHANGED   = (1 << 2),
72         MU_INBOUND_DOORBELL_HMUSTOPPED          = (1 << 3),
73         MU_INBOUND_DOORBELL_RESET               = (1 << 4),
74
75         MU_OUTBOUND_DOORBELL_HANDSHAKE          = (1 << 0),
76         MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = (1 << 1),
77         MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED  = (1 << 2),
78         MU_OUTBOUND_DOORBELL_BUSCHANGE          = (1 << 3),
79         MU_OUTBOUND_DOORBELL_HASEVENT           = (1 << 4),
80         MU_OUTBOUND_DOORBELL_REQUEST_RESET      = (1 << 27),
81
82         /* MU status code */
83         MU_STATE_STARTING                       = 1,
84         MU_STATE_STARTED                        = 2,
85         MU_STATE_RESETTING                      = 3,
86         MU_STATE_FAILED                         = 4,
87
88         MU_MAX_DELAY                            = 120,
89         MU_HANDSHAKE_SIGNATURE                  = 0x55aaaa55,
90         MU_HANDSHAKE_SIGNATURE_HALF             = 0x5a5a0000,
91         MU_HARD_RESET_WAIT                      = 30000,
92         HMU_PARTNER_TYPE                        = 2,
93
94         /* firmware returned values */
95         SRB_STATUS_SUCCESS                      = 0x01,
96         SRB_STATUS_ERROR                        = 0x04,
97         SRB_STATUS_BUSY                         = 0x05,
98         SRB_STATUS_INVALID_REQUEST              = 0x06,
99         SRB_STATUS_SELECTION_TIMEOUT            = 0x0A,
100         SRB_SEE_SENSE                           = 0x80,
101
102         /* task attribute */
103         TASK_ATTRIBUTE_SIMPLE                   = 0x0,
104         TASK_ATTRIBUTE_HEADOFQUEUE              = 0x1,
105         TASK_ATTRIBUTE_ORDERED                  = 0x2,
106         TASK_ATTRIBUTE_ACA                      = 0x4,
107
108         SS_STS_NORMAL                           = 0x80000000,
109         SS_STS_DONE                             = 0x40000000,
110         SS_STS_HANDSHAKE                        = 0x20000000,
111
112         SS_HEAD_HANDSHAKE                       = 0x80,
113
114         SS_H2I_INT_RESET                        = 0x100,
115
116         SS_I2H_REQUEST_RESET                    = 0x2000,
117
118         SS_MU_OPERATIONAL                       = 0x80000000,
119
120         STEX_CDB_LENGTH                         = 16,
121         STATUS_VAR_LEN                          = 128,
122
123         /* sg flags */
124         SG_CF_EOT                               = 0x80, /* end of table */
125         SG_CF_64B                               = 0x40, /* 64 bit item */
126         SG_CF_HOST                              = 0x20, /* sg in host memory */
127         MSG_DATA_DIR_ND                         = 0,
128         MSG_DATA_DIR_IN                         = 1,
129         MSG_DATA_DIR_OUT                        = 2,
130
131         st_shasta                               = 0,
132         st_vsc                                  = 1,
133         st_yosemite                             = 2,
134         st_seq                                  = 3,
135         st_yel                                  = 4,
136
137         PASSTHRU_REQ_TYPE                       = 0x00000001,
138         PASSTHRU_REQ_NO_WAKEUP                  = 0x00000100,
139         ST_INTERNAL_TIMEOUT                     = 180,
140
141         ST_TO_CMD                               = 0,
142         ST_FROM_CMD                             = 1,
143
144         /* vendor specific commands of Promise */
145         MGT_CMD                                 = 0xd8,
146         SINBAND_MGT_CMD                         = 0xd9,
147         ARRAY_CMD                               = 0xe0,
148         CONTROLLER_CMD                          = 0xe1,
149         DEBUGGING_CMD                           = 0xe2,
150         PASSTHRU_CMD                            = 0xe3,
151
152         PASSTHRU_GET_ADAPTER                    = 0x05,
153         PASSTHRU_GET_DRVVER                     = 0x10,
154
155         CTLR_CONFIG_CMD                         = 0x03,
156         CTLR_SHUTDOWN                           = 0x0d,
157
158         CTLR_POWER_STATE_CHANGE                 = 0x0e,
159         CTLR_POWER_SAVING                       = 0x01,
160
161         PASSTHRU_SIGNATURE                      = 0x4e415041,
162         MGT_CMD_SIGNATURE                       = 0xba,
163
164         INQUIRY_EVPD                            = 0x01,
165
166         ST_ADDITIONAL_MEM                       = 0x200000,
167         ST_ADDITIONAL_MEM_MIN                   = 0x80000,
168 };
169
170 struct st_sgitem {
171         u8 ctrl;        /* SG_CF_xxx */
172         u8 reserved[3];
173         __le32 count;
174         __le64 addr;
175 };
176
177 struct st_ss_sgitem {
178         __le32 addr;
179         __le32 addr_hi;
180         __le32 count;
181 };
182
183 struct st_sgtable {
184         __le16 sg_count;
185         __le16 max_sg_count;
186         __le32 sz_in_byte;
187 };
188
189 struct st_msg_header {
190         __le64 handle;
191         u8 flag;
192         u8 channel;
193         __le16 timeout;
194         u32 reserved;
195 };
196
197 struct handshake_frame {
198         __le64 rb_phy;          /* request payload queue physical address */
199         __le16 req_sz;          /* size of each request payload */
200         __le16 req_cnt;         /* count of reqs the buffer can hold */
201         __le16 status_sz;       /* size of each status payload */
202         __le16 status_cnt;      /* count of status the buffer can hold */
203         __le64 hosttime;        /* seconds from Jan 1, 1970 (GMT) */
204         u8 partner_type;        /* who sends this frame */
205         u8 reserved0[7];
206         __le32 partner_ver_major;
207         __le32 partner_ver_minor;
208         __le32 partner_ver_oem;
209         __le32 partner_ver_build;
210         __le32 extra_offset;    /* NEW */
211         __le32 extra_size;      /* NEW */
212         __le32 scratch_size;
213         u32 reserved1;
214 };
215
216 struct req_msg {
217         __le16 tag;
218         u8 lun;
219         u8 target;
220         u8 task_attr;
221         u8 task_manage;
222         u8 data_dir;
223         u8 payload_sz;          /* payload size in 4-byte, not used */
224         u8 cdb[STEX_CDB_LENGTH];
225         u32 variable[0];
226 };
227
228 struct status_msg {
229         __le16 tag;
230         u8 lun;
231         u8 target;
232         u8 srb_status;
233         u8 scsi_status;
234         u8 reserved;
235         u8 payload_sz;          /* payload size in 4-byte */
236         u8 variable[STATUS_VAR_LEN];
237 };
238
239 struct ver_info {
240         u32 major;
241         u32 minor;
242         u32 oem;
243         u32 build;
244         u32 reserved[2];
245 };
246
247 struct st_frame {
248         u32 base[6];
249         u32 rom_addr;
250
251         struct ver_info drv_ver;
252         struct ver_info bios_ver;
253
254         u32 bus;
255         u32 slot;
256         u32 irq_level;
257         u32 irq_vec;
258         u32 id;
259         u32 subid;
260
261         u32 dimm_size;
262         u8 dimm_type;
263         u8 reserved[3];
264
265         u32 channel;
266         u32 reserved1;
267 };
268
269 struct st_drvver {
270         u32 major;
271         u32 minor;
272         u32 oem;
273         u32 build;
274         u32 signature[2];
275         u8 console_id;
276         u8 host_no;
277         u8 reserved0[2];
278         u32 reserved[3];
279 };
280
281 struct st_ccb {
282         struct req_msg *req;
283         struct scsi_cmnd *cmd;
284
285         void *sense_buffer;
286         unsigned int sense_bufflen;
287         int sg_count;
288
289         u32 req_type;
290         u8 srb_status;
291         u8 scsi_status;
292         u8 reserved[2];
293 };
294
295 struct st_hba {
296         void __iomem *mmio_base;        /* iomapped PCI memory space */
297         void *dma_mem;
298         dma_addr_t dma_handle;
299         size_t dma_size;
300
301         struct Scsi_Host *host;
302         struct pci_dev *pdev;
303
304         struct req_msg * (*alloc_rq) (struct st_hba *);
305         int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
306         void (*send) (struct st_hba *, struct req_msg *, u16);
307
308         u32 req_head;
309         u32 req_tail;
310         u32 status_head;
311         u32 status_tail;
312
313         struct status_msg *status_buffer;
314         void *copy_buffer; /* temp buffer for driver-handled commands */
315         struct st_ccb *ccb;
316         struct st_ccb *wait_ccb;
317         __le32 *scratch;
318
319         char work_q_name[20];
320         struct workqueue_struct *work_q;
321         struct work_struct reset_work;
322         wait_queue_head_t reset_waitq;
323         unsigned int mu_status;
324         unsigned int cardtype;
325         int msi_enabled;
326         int out_req_cnt;
327         u32 extra_offset;
328         u16 rq_count;
329         u16 rq_size;
330         u16 sts_count;
331         u8  supports_pm;
332 };
333
334 struct st_card_info {
335         struct req_msg * (*alloc_rq) (struct st_hba *);
336         int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
337         void (*send) (struct st_hba *, struct req_msg *, u16);
338         unsigned int max_id;
339         unsigned int max_lun;
340         unsigned int max_channel;
341         u16 rq_count;
342         u16 rq_size;
343         u16 sts_count;
344 };
345
346 static int msi;
347 module_param(msi, int, 0);
348 MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
349
350 static const char console_inq_page[] =
351 {
352         0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
353         0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20,        /* "Promise " */
354         0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,        /* "RAID Con" */
355         0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,        /* "sole    " */
356         0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,        /* "1.00    " */
357         0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,        /* "SX/RSAF-" */
358         0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,        /* "TE1.00  " */
359         0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
360 };
361
362 MODULE_AUTHOR("Ed Lin");
363 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
364 MODULE_LICENSE("GPL");
365 MODULE_VERSION(ST_DRIVER_VERSION);
366
367 static struct status_msg *stex_get_status(struct st_hba *hba)
368 {
369         struct status_msg *status = hba->status_buffer + hba->status_tail;
370
371         ++hba->status_tail;
372         hba->status_tail %= hba->sts_count+1;
373
374         return status;
375 }
376
377 static void stex_invalid_field(struct scsi_cmnd *cmd,
378                                void (*done)(struct scsi_cmnd *))
379 {
380         cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
381
382         /* "Invalid field in cdb" */
383         scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
384                                 0x0);
385         done(cmd);
386 }
387
388 static struct req_msg *stex_alloc_req(struct st_hba *hba)
389 {
390         struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
391
392         ++hba->req_head;
393         hba->req_head %= hba->rq_count+1;
394
395         return req;
396 }
397
398 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
399 {
400         return (struct req_msg *)(hba->dma_mem +
401                 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
402 }
403
404 static int stex_map_sg(struct st_hba *hba,
405         struct req_msg *req, struct st_ccb *ccb)
406 {
407         struct scsi_cmnd *cmd;
408         struct scatterlist *sg;
409         struct st_sgtable *dst;
410         struct st_sgitem *table;
411         int i, nseg;
412
413         cmd = ccb->cmd;
414         nseg = scsi_dma_map(cmd);
415         BUG_ON(nseg < 0);
416         if (nseg) {
417                 dst = (struct st_sgtable *)req->variable;
418
419                 ccb->sg_count = nseg;
420                 dst->sg_count = cpu_to_le16((u16)nseg);
421                 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
422                 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
423
424                 table = (struct st_sgitem *)(dst + 1);
425                 scsi_for_each_sg(cmd, sg, nseg, i) {
426                         table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
427                         table[i].addr = cpu_to_le64(sg_dma_address(sg));
428                         table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429                 }
430                 table[--i].ctrl |= SG_CF_EOT;
431         }
432
433         return nseg;
434 }
435
436 static int stex_ss_map_sg(struct st_hba *hba,
437         struct req_msg *req, struct st_ccb *ccb)
438 {
439         struct scsi_cmnd *cmd;
440         struct scatterlist *sg;
441         struct st_sgtable *dst;
442         struct st_ss_sgitem *table;
443         int i, nseg;
444
445         cmd = ccb->cmd;
446         nseg = scsi_dma_map(cmd);
447         BUG_ON(nseg < 0);
448         if (nseg) {
449                 dst = (struct st_sgtable *)req->variable;
450
451                 ccb->sg_count = nseg;
452                 dst->sg_count = cpu_to_le16((u16)nseg);
453                 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
454                 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
455
456                 table = (struct st_ss_sgitem *)(dst + 1);
457                 scsi_for_each_sg(cmd, sg, nseg, i) {
458                         table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
459                         table[i].addr =
460                                 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
461                         table[i].addr_hi =
462                                 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
463                 }
464         }
465
466         return nseg;
467 }
468
469 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
470 {
471         struct st_frame *p;
472         size_t count = sizeof(struct st_frame);
473
474         p = hba->copy_buffer;
475         scsi_sg_copy_to_buffer(ccb->cmd, p, count);
476         memset(p->base, 0, sizeof(u32)*6);
477         *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
478         p->rom_addr = 0;
479
480         p->drv_ver.major = ST_VER_MAJOR;
481         p->drv_ver.minor = ST_VER_MINOR;
482         p->drv_ver.oem = ST_OEM;
483         p->drv_ver.build = ST_BUILD_VER;
484
485         p->bus = hba->pdev->bus->number;
486         p->slot = hba->pdev->devfn;
487         p->irq_level = 0;
488         p->irq_vec = hba->pdev->irq;
489         p->id = hba->pdev->vendor << 16 | hba->pdev->device;
490         p->subid =
491                 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
492
493         scsi_sg_copy_from_buffer(ccb->cmd, p, count);
494 }
495
496 static void
497 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
498 {
499         req->tag = cpu_to_le16(tag);
500
501         hba->ccb[tag].req = req;
502         hba->out_req_cnt++;
503
504         writel(hba->req_head, hba->mmio_base + IMR0);
505         writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
506         readl(hba->mmio_base + IDBL); /* flush */
507 }
508
509 static void
510 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
511 {
512         struct scsi_cmnd *cmd;
513         struct st_msg_header *msg_h;
514         dma_addr_t addr;
515
516         req->tag = cpu_to_le16(tag);
517
518         hba->ccb[tag].req = req;
519         hba->out_req_cnt++;
520
521         cmd = hba->ccb[tag].cmd;
522         msg_h = (struct st_msg_header *)req - 1;
523         if (likely(cmd)) {
524                 msg_h->channel = (u8)cmd->device->channel;
525                 msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
526         }
527         addr = hba->dma_handle + hba->req_head * hba->rq_size;
528         addr += (hba->ccb[tag].sg_count+4)/11;
529         msg_h->handle = cpu_to_le64(addr);
530
531         ++hba->req_head;
532         hba->req_head %= hba->rq_count+1;
533
534         writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
535         readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
536         writel(addr, hba->mmio_base + YH2I_REQ);
537         readl(hba->mmio_base + YH2I_REQ); /* flush */
538 }
539
540 static int
541 stex_slave_config(struct scsi_device *sdev)
542 {
543         sdev->use_10_for_rw = 1;
544         sdev->use_10_for_ms = 1;
545         blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
546
547         return 0;
548 }
549
550 static int
551 stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
552 {
553         struct st_hba *hba;
554         struct Scsi_Host *host;
555         unsigned int id, lun;
556         struct req_msg *req;
557         u16 tag;
558
559         host = cmd->device->host;
560         id = cmd->device->id;
561         lun = cmd->device->lun;
562         hba = (struct st_hba *) &host->hostdata[0];
563
564         if (unlikely(hba->mu_status == MU_STATE_RESETTING))
565                 return SCSI_MLQUEUE_HOST_BUSY;
566
567         switch (cmd->cmnd[0]) {
568         case MODE_SENSE_10:
569         {
570                 static char ms10_caching_page[12] =
571                         { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
572                 unsigned char page;
573
574                 page = cmd->cmnd[2] & 0x3f;
575                 if (page == 0x8 || page == 0x3f) {
576                         scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
577                                                  sizeof(ms10_caching_page));
578                         cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
579                         done(cmd);
580                 } else
581                         stex_invalid_field(cmd, done);
582                 return 0;
583         }
584         case REPORT_LUNS:
585                 /*
586                  * The shasta firmware does not report actual luns in the
587                  * target, so fail the command to force sequential lun scan.
588                  * Also, the console device does not support this command.
589                  */
590                 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
591                         stex_invalid_field(cmd, done);
592                         return 0;
593                 }
594                 break;
595         case TEST_UNIT_READY:
596                 if (id == host->max_id - 1) {
597                         cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
598                         done(cmd);
599                         return 0;
600                 }
601                 break;
602         case INQUIRY:
603                 if (lun >= host->max_lun) {
604                         cmd->result = DID_NO_CONNECT << 16;
605                         done(cmd);
606                         return 0;
607                 }
608                 if (id != host->max_id - 1)
609                         break;
610                 if (!lun && !cmd->device->channel &&
611                         (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
612                         scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
613                                                  sizeof(console_inq_page));
614                         cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
615                         done(cmd);
616                 } else
617                         stex_invalid_field(cmd, done);
618                 return 0;
619         case PASSTHRU_CMD:
620                 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
621                         struct st_drvver ver;
622                         size_t cp_len = sizeof(ver);
623
624                         ver.major = ST_VER_MAJOR;
625                         ver.minor = ST_VER_MINOR;
626                         ver.oem = ST_OEM;
627                         ver.build = ST_BUILD_VER;
628                         ver.signature[0] = PASSTHRU_SIGNATURE;
629                         ver.console_id = host->max_id - 1;
630                         ver.host_no = hba->host->host_no;
631                         cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
632                         cmd->result = sizeof(ver) == cp_len ?
633                                 DID_OK << 16 | COMMAND_COMPLETE << 8 :
634                                 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
635                         done(cmd);
636                         return 0;
637                 }
638         default:
639                 break;
640         }
641
642         cmd->scsi_done = done;
643
644         tag = cmd->request->tag;
645
646         if (unlikely(tag >= host->can_queue))
647                 return SCSI_MLQUEUE_HOST_BUSY;
648
649         req = hba->alloc_rq(hba);
650
651         req->lun = lun;
652         req->target = id;
653
654         /* cdb */
655         memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
656
657         if (cmd->sc_data_direction == DMA_FROM_DEVICE)
658                 req->data_dir = MSG_DATA_DIR_IN;
659         else if (cmd->sc_data_direction == DMA_TO_DEVICE)
660                 req->data_dir = MSG_DATA_DIR_OUT;
661         else
662                 req->data_dir = MSG_DATA_DIR_ND;
663
664         hba->ccb[tag].cmd = cmd;
665         hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
666         hba->ccb[tag].sense_buffer = cmd->sense_buffer;
667
668         if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
669                 hba->ccb[tag].sg_count = 0;
670                 memset(&req->variable[0], 0, 8);
671         }
672
673         hba->send(hba, req, tag);
674         return 0;
675 }
676
677 static DEF_SCSI_QCMD(stex_queuecommand)
678
679 static void stex_scsi_done(struct st_ccb *ccb)
680 {
681         struct scsi_cmnd *cmd = ccb->cmd;
682         int result;
683
684         if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
685                 result = ccb->scsi_status;
686                 switch (ccb->scsi_status) {
687                 case SAM_STAT_GOOD:
688                         result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
689                         break;
690                 case SAM_STAT_CHECK_CONDITION:
691                         result |= DRIVER_SENSE << 24;
692                         break;
693                 case SAM_STAT_BUSY:
694                         result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
695                         break;
696                 default:
697                         result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
698                         break;
699                 }
700         }
701         else if (ccb->srb_status & SRB_SEE_SENSE)
702                 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
703         else switch (ccb->srb_status) {
704                 case SRB_STATUS_SELECTION_TIMEOUT:
705                         result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
706                         break;
707                 case SRB_STATUS_BUSY:
708                         result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
709                         break;
710                 case SRB_STATUS_INVALID_REQUEST:
711                 case SRB_STATUS_ERROR:
712                 default:
713                         result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
714                         break;
715         }
716
717         cmd->result = result;
718         cmd->scsi_done(cmd);
719 }
720
721 static void stex_copy_data(struct st_ccb *ccb,
722         struct status_msg *resp, unsigned int variable)
723 {
724         if (resp->scsi_status != SAM_STAT_GOOD) {
725                 if (ccb->sense_buffer != NULL)
726                         memcpy(ccb->sense_buffer, resp->variable,
727                                 min(variable, ccb->sense_bufflen));
728                 return;
729         }
730
731         if (ccb->cmd == NULL)
732                 return;
733         scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
734 }
735
736 static void stex_check_cmd(struct st_hba *hba,
737         struct st_ccb *ccb, struct status_msg *resp)
738 {
739         if (ccb->cmd->cmnd[0] == MGT_CMD &&
740                 resp->scsi_status != SAM_STAT_CHECK_CONDITION)
741                 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
742                         le32_to_cpu(*(__le32 *)&resp->variable[0]));
743 }
744
745 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
746 {
747         void __iomem *base = hba->mmio_base;
748         struct status_msg *resp;
749         struct st_ccb *ccb;
750         unsigned int size;
751         u16 tag;
752
753         if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
754                 return;
755
756         /* status payloads */
757         hba->status_head = readl(base + OMR1);
758         if (unlikely(hba->status_head > hba->sts_count)) {
759                 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
760                         pci_name(hba->pdev));
761                 return;
762         }
763
764         /*
765          * it's not a valid status payload if:
766          * 1. there are no pending requests(e.g. during init stage)
767          * 2. there are some pending requests, but the controller is in
768          *     reset status, and its type is not st_yosemite
769          * firmware of st_yosemite in reset status will return pending requests
770          * to driver, so we allow it to pass
771          */
772         if (unlikely(hba->out_req_cnt <= 0 ||
773                         (hba->mu_status == MU_STATE_RESETTING &&
774                          hba->cardtype != st_yosemite))) {
775                 hba->status_tail = hba->status_head;
776                 goto update_status;
777         }
778
779         while (hba->status_tail != hba->status_head) {
780                 resp = stex_get_status(hba);
781                 tag = le16_to_cpu(resp->tag);
782                 if (unlikely(tag >= hba->host->can_queue)) {
783                         printk(KERN_WARNING DRV_NAME
784                                 "(%s): invalid tag\n", pci_name(hba->pdev));
785                         continue;
786                 }
787
788                 hba->out_req_cnt--;
789                 ccb = &hba->ccb[tag];
790                 if (unlikely(hba->wait_ccb == ccb))
791                         hba->wait_ccb = NULL;
792                 if (unlikely(ccb->req == NULL)) {
793                         printk(KERN_WARNING DRV_NAME
794                                 "(%s): lagging req\n", pci_name(hba->pdev));
795                         continue;
796                 }
797
798                 size = resp->payload_sz * sizeof(u32); /* payload size */
799                 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
800                         size > sizeof(*resp))) {
801                         printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
802                                 pci_name(hba->pdev));
803                 } else {
804                         size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
805                         if (size)
806                                 stex_copy_data(ccb, resp, size);
807                 }
808
809                 ccb->req = NULL;
810                 ccb->srb_status = resp->srb_status;
811                 ccb->scsi_status = resp->scsi_status;
812
813                 if (likely(ccb->cmd != NULL)) {
814                         if (hba->cardtype == st_yosemite)
815                                 stex_check_cmd(hba, ccb, resp);
816
817                         if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
818                                 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
819                                 stex_controller_info(hba, ccb);
820
821                         scsi_dma_unmap(ccb->cmd);
822                         stex_scsi_done(ccb);
823                 } else
824                         ccb->req_type = 0;
825         }
826
827 update_status:
828         writel(hba->status_head, base + IMR1);
829         readl(base + IMR1); /* flush */
830 }
831
832 static irqreturn_t stex_intr(int irq, void *__hba)
833 {
834         struct st_hba *hba = __hba;
835         void __iomem *base = hba->mmio_base;
836         u32 data;
837         unsigned long flags;
838
839         spin_lock_irqsave(hba->host->host_lock, flags);
840
841         data = readl(base + ODBL);
842
843         if (data && data != 0xffffffff) {
844                 /* clear the interrupt */
845                 writel(data, base + ODBL);
846                 readl(base + ODBL); /* flush */
847                 stex_mu_intr(hba, data);
848                 spin_unlock_irqrestore(hba->host->host_lock, flags);
849                 if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
850                         hba->cardtype == st_shasta))
851                         queue_work(hba->work_q, &hba->reset_work);
852                 return IRQ_HANDLED;
853         }
854
855         spin_unlock_irqrestore(hba->host->host_lock, flags);
856
857         return IRQ_NONE;
858 }
859
860 static void stex_ss_mu_intr(struct st_hba *hba)
861 {
862         struct status_msg *resp;
863         struct st_ccb *ccb;
864         __le32 *scratch;
865         unsigned int size;
866         int count = 0;
867         u32 value;
868         u16 tag;
869
870         if (unlikely(hba->out_req_cnt <= 0 ||
871                         hba->mu_status == MU_STATE_RESETTING))
872                 return;
873
874         while (count < hba->sts_count) {
875                 scratch = hba->scratch + hba->status_tail;
876                 value = le32_to_cpu(*scratch);
877                 if (unlikely(!(value & SS_STS_NORMAL)))
878                         return;
879
880                 resp = hba->status_buffer + hba->status_tail;
881                 *scratch = 0;
882                 ++count;
883                 ++hba->status_tail;
884                 hba->status_tail %= hba->sts_count+1;
885
886                 tag = (u16)value;
887                 if (unlikely(tag >= hba->host->can_queue)) {
888                         printk(KERN_WARNING DRV_NAME
889                                 "(%s): invalid tag\n", pci_name(hba->pdev));
890                         continue;
891                 }
892
893                 hba->out_req_cnt--;
894                 ccb = &hba->ccb[tag];
895                 if (unlikely(hba->wait_ccb == ccb))
896                         hba->wait_ccb = NULL;
897                 if (unlikely(ccb->req == NULL)) {
898                         printk(KERN_WARNING DRV_NAME
899                                 "(%s): lagging req\n", pci_name(hba->pdev));
900                         continue;
901                 }
902
903                 ccb->req = NULL;
904                 if (likely(value & SS_STS_DONE)) { /* normal case */
905                         ccb->srb_status = SRB_STATUS_SUCCESS;
906                         ccb->scsi_status = SAM_STAT_GOOD;
907                 } else {
908                         ccb->srb_status = resp->srb_status;
909                         ccb->scsi_status = resp->scsi_status;
910                         size = resp->payload_sz * sizeof(u32);
911                         if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
912                                 size > sizeof(*resp))) {
913                                 printk(KERN_WARNING DRV_NAME
914                                         "(%s): bad status size\n",
915                                         pci_name(hba->pdev));
916                         } else {
917                                 size -= sizeof(*resp) - STATUS_VAR_LEN;
918                                 if (size)
919                                         stex_copy_data(ccb, resp, size);
920                         }
921                         if (likely(ccb->cmd != NULL))
922                                 stex_check_cmd(hba, ccb, resp);
923                 }
924
925                 if (likely(ccb->cmd != NULL)) {
926                         scsi_dma_unmap(ccb->cmd);
927                         stex_scsi_done(ccb);
928                 } else
929                         ccb->req_type = 0;
930         }
931 }
932
933 static irqreturn_t stex_ss_intr(int irq, void *__hba)
934 {
935         struct st_hba *hba = __hba;
936         void __iomem *base = hba->mmio_base;
937         u32 data;
938         unsigned long flags;
939
940         spin_lock_irqsave(hba->host->host_lock, flags);
941
942         data = readl(base + YI2H_INT);
943         if (data && data != 0xffffffff) {
944                 /* clear the interrupt */
945                 writel(data, base + YI2H_INT_C);
946                 stex_ss_mu_intr(hba);
947                 spin_unlock_irqrestore(hba->host->host_lock, flags);
948                 if (unlikely(data & SS_I2H_REQUEST_RESET))
949                         queue_work(hba->work_q, &hba->reset_work);
950                 return IRQ_HANDLED;
951         }
952
953         spin_unlock_irqrestore(hba->host->host_lock, flags);
954
955         return IRQ_NONE;
956 }
957
958 static int stex_common_handshake(struct st_hba *hba)
959 {
960         void __iomem *base = hba->mmio_base;
961         struct handshake_frame *h;
962         dma_addr_t status_phys;
963         u32 data;
964         unsigned long before;
965
966         if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
967                 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
968                 readl(base + IDBL);
969                 before = jiffies;
970                 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
971                         if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
972                                 printk(KERN_ERR DRV_NAME
973                                         "(%s): no handshake signature\n",
974                                         pci_name(hba->pdev));
975                                 return -1;
976                         }
977                         rmb();
978                         msleep(1);
979                 }
980         }
981
982         udelay(10);
983
984         data = readl(base + OMR1);
985         if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
986                 data &= 0x0000ffff;
987                 if (hba->host->can_queue > data) {
988                         hba->host->can_queue = data;
989                         hba->host->cmd_per_lun = data;
990                 }
991         }
992
993         h = (struct handshake_frame *)hba->status_buffer;
994         h->rb_phy = cpu_to_le64(hba->dma_handle);
995         h->req_sz = cpu_to_le16(hba->rq_size);
996         h->req_cnt = cpu_to_le16(hba->rq_count+1);
997         h->status_sz = cpu_to_le16(sizeof(struct status_msg));
998         h->status_cnt = cpu_to_le16(hba->sts_count+1);
999         h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1000         h->partner_type = HMU_PARTNER_TYPE;
1001         if (hba->extra_offset) {
1002                 h->extra_offset = cpu_to_le32(hba->extra_offset);
1003                 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1004         } else
1005                 h->extra_offset = h->extra_size = 0;
1006
1007         status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1008         writel(status_phys, base + IMR0);
1009         readl(base + IMR0);
1010         writel((status_phys >> 16) >> 16, base + IMR1);
1011         readl(base + IMR1);
1012
1013         writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1014         readl(base + OMR0);
1015         writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1016         readl(base + IDBL); /* flush */
1017
1018         udelay(10);
1019         before = jiffies;
1020         while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1021                 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1022                         printk(KERN_ERR DRV_NAME
1023                                 "(%s): no signature after handshake frame\n",
1024                                 pci_name(hba->pdev));
1025                         return -1;
1026                 }
1027                 rmb();
1028                 msleep(1);
1029         }
1030
1031         writel(0, base + IMR0);
1032         readl(base + IMR0);
1033         writel(0, base + OMR0);
1034         readl(base + OMR0);
1035         writel(0, base + IMR1);
1036         readl(base + IMR1);
1037         writel(0, base + OMR1);
1038         readl(base + OMR1); /* flush */
1039         return 0;
1040 }
1041
1042 static int stex_ss_handshake(struct st_hba *hba)
1043 {
1044         void __iomem *base = hba->mmio_base;
1045         struct st_msg_header *msg_h;
1046         struct handshake_frame *h;
1047         __le32 *scratch;
1048         u32 data, scratch_size;
1049         unsigned long before;
1050         int ret = 0;
1051
1052         before = jiffies;
1053         while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
1054                 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1055                         printk(KERN_ERR DRV_NAME
1056                                 "(%s): firmware not operational\n",
1057                                 pci_name(hba->pdev));
1058                         return -1;
1059                 }
1060                 msleep(1);
1061         }
1062
1063         msg_h = (struct st_msg_header *)hba->dma_mem;
1064         msg_h->handle = cpu_to_le64(hba->dma_handle);
1065         msg_h->flag = SS_HEAD_HANDSHAKE;
1066
1067         h = (struct handshake_frame *)(msg_h + 1);
1068         h->rb_phy = cpu_to_le64(hba->dma_handle);
1069         h->req_sz = cpu_to_le16(hba->rq_size);
1070         h->req_cnt = cpu_to_le16(hba->rq_count+1);
1071         h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1072         h->status_cnt = cpu_to_le16(hba->sts_count+1);
1073         h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1074         h->partner_type = HMU_PARTNER_TYPE;
1075         h->extra_offset = h->extra_size = 0;
1076         scratch_size = (hba->sts_count+1)*sizeof(u32);
1077         h->scratch_size = cpu_to_le32(scratch_size);
1078
1079         data = readl(base + YINT_EN);
1080         data &= ~4;
1081         writel(data, base + YINT_EN);
1082         writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1083         readl(base + YH2I_REQ_HI);
1084         writel(hba->dma_handle, base + YH2I_REQ);
1085         readl(base + YH2I_REQ); /* flush */
1086
1087         scratch = hba->scratch;
1088         before = jiffies;
1089         while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1090                 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1091                         printk(KERN_ERR DRV_NAME
1092                                 "(%s): no signature after handshake frame\n",
1093                                 pci_name(hba->pdev));
1094                         ret = -1;
1095                         break;
1096                 }
1097                 rmb();
1098                 msleep(1);
1099         }
1100
1101         memset(scratch, 0, scratch_size);
1102         msg_h->flag = 0;
1103         return ret;
1104 }
1105
1106 static int stex_handshake(struct st_hba *hba)
1107 {
1108         int err;
1109         unsigned long flags;
1110         unsigned int mu_status;
1111
1112         err = (hba->cardtype == st_yel) ?
1113                 stex_ss_handshake(hba) : stex_common_handshake(hba);
1114         spin_lock_irqsave(hba->host->host_lock, flags);
1115         mu_status = hba->mu_status;
1116         if (err == 0) {
1117                 hba->req_head = 0;
1118                 hba->req_tail = 0;
1119                 hba->status_head = 0;
1120                 hba->status_tail = 0;
1121                 hba->out_req_cnt = 0;
1122                 hba->mu_status = MU_STATE_STARTED;
1123         } else
1124                 hba->mu_status = MU_STATE_FAILED;
1125         if (mu_status == MU_STATE_RESETTING)
1126                 wake_up_all(&hba->reset_waitq);
1127         spin_unlock_irqrestore(hba->host->host_lock, flags);
1128         return err;
1129 }
1130
1131 static int stex_abort(struct scsi_cmnd *cmd)
1132 {
1133         struct Scsi_Host *host = cmd->device->host;
1134         struct st_hba *hba = (struct st_hba *)host->hostdata;
1135         u16 tag = cmd->request->tag;
1136         void __iomem *base;
1137         u32 data;
1138         int result = SUCCESS;
1139         unsigned long flags;
1140
1141         scmd_printk(KERN_INFO, cmd, "aborting command\n");
1142
1143         base = hba->mmio_base;
1144         spin_lock_irqsave(host->host_lock, flags);
1145         if (tag < host->can_queue &&
1146                 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1147                 hba->wait_ccb = &hba->ccb[tag];
1148         else
1149                 goto out;
1150
1151         if (hba->cardtype == st_yel) {
1152                 data = readl(base + YI2H_INT);
1153                 if (data == 0 || data == 0xffffffff)
1154                         goto fail_out;
1155
1156                 writel(data, base + YI2H_INT_C);
1157                 stex_ss_mu_intr(hba);
1158         } else {
1159                 data = readl(base + ODBL);
1160                 if (data == 0 || data == 0xffffffff)
1161                         goto fail_out;
1162
1163                 writel(data, base + ODBL);
1164                 readl(base + ODBL); /* flush */
1165
1166                 stex_mu_intr(hba, data);
1167         }
1168         if (hba->wait_ccb == NULL) {
1169                 printk(KERN_WARNING DRV_NAME
1170                         "(%s): lost interrupt\n", pci_name(hba->pdev));
1171                 goto out;
1172         }
1173
1174 fail_out:
1175         scsi_dma_unmap(cmd);
1176         hba->wait_ccb->req = NULL; /* nullify the req's future return */
1177         hba->wait_ccb = NULL;
1178         result = FAILED;
1179 out:
1180         spin_unlock_irqrestore(host->host_lock, flags);
1181         return result;
1182 }
1183
1184 static void stex_hard_reset(struct st_hba *hba)
1185 {
1186         struct pci_bus *bus;
1187         int i;
1188         u16 pci_cmd;
1189         u8 pci_bctl;
1190
1191         for (i = 0; i < 16; i++)
1192                 pci_read_config_dword(hba->pdev, i * 4,
1193                         &hba->pdev->saved_config_space[i]);
1194
1195         /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1196            secondary bus. Consult Intel 80331/3 developer's manual for detail */
1197         bus = hba->pdev->bus;
1198         pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1199         pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1200         pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1201
1202         /*
1203          * 1 ms may be enough for 8-port controllers. But 16-port controllers
1204          * require more time to finish bus reset. Use 100 ms here for safety
1205          */
1206         msleep(100);
1207         pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1208         pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1209
1210         for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1211                 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1212                 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1213                         break;
1214                 msleep(1);
1215         }
1216
1217         ssleep(5);
1218         for (i = 0; i < 16; i++)
1219                 pci_write_config_dword(hba->pdev, i * 4,
1220                         hba->pdev->saved_config_space[i]);
1221 }
1222
1223 static int stex_yos_reset(struct st_hba *hba)
1224 {
1225         void __iomem *base;
1226         unsigned long flags, before;
1227         int ret = 0;
1228
1229         base = hba->mmio_base;
1230         writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1231         readl(base + IDBL); /* flush */
1232         before = jiffies;
1233         while (hba->out_req_cnt > 0) {
1234                 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1235                         printk(KERN_WARNING DRV_NAME
1236                                 "(%s): reset timeout\n", pci_name(hba->pdev));
1237                         ret = -1;
1238                         break;
1239                 }
1240                 msleep(1);
1241         }
1242
1243         spin_lock_irqsave(hba->host->host_lock, flags);
1244         if (ret == -1)
1245                 hba->mu_status = MU_STATE_FAILED;
1246         else
1247                 hba->mu_status = MU_STATE_STARTED;
1248         wake_up_all(&hba->reset_waitq);
1249         spin_unlock_irqrestore(hba->host->host_lock, flags);
1250
1251         return ret;
1252 }
1253
1254 static void stex_ss_reset(struct st_hba *hba)
1255 {
1256         writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1257         readl(hba->mmio_base + YH2I_INT);
1258         ssleep(5);
1259 }
1260
1261 static int stex_do_reset(struct st_hba *hba)
1262 {
1263         struct st_ccb *ccb;
1264         unsigned long flags;
1265         unsigned int mu_status = MU_STATE_RESETTING;
1266         u16 tag;
1267
1268         spin_lock_irqsave(hba->host->host_lock, flags);
1269         if (hba->mu_status == MU_STATE_STARTING) {
1270                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1271                 printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1272                         pci_name(hba->pdev));
1273                 return 0;
1274         }
1275         while (hba->mu_status == MU_STATE_RESETTING) {
1276                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1277                 wait_event_timeout(hba->reset_waitq,
1278                                    hba->mu_status != MU_STATE_RESETTING,
1279                                    MU_MAX_DELAY * HZ);
1280                 spin_lock_irqsave(hba->host->host_lock, flags);
1281                 mu_status = hba->mu_status;
1282         }
1283
1284         if (mu_status != MU_STATE_RESETTING) {
1285                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1286                 return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1287         }
1288
1289         hba->mu_status = MU_STATE_RESETTING;
1290         spin_unlock_irqrestore(hba->host->host_lock, flags);
1291
1292         if (hba->cardtype == st_yosemite)
1293                 return stex_yos_reset(hba);
1294
1295         if (hba->cardtype == st_shasta)
1296                 stex_hard_reset(hba);
1297         else if (hba->cardtype == st_yel)
1298                 stex_ss_reset(hba);
1299
1300         spin_lock_irqsave(hba->host->host_lock, flags);
1301         for (tag = 0; tag < hba->host->can_queue; tag++) {
1302                 ccb = &hba->ccb[tag];
1303                 if (ccb->req == NULL)
1304                         continue;
1305                 ccb->req = NULL;
1306                 if (ccb->cmd) {
1307                         scsi_dma_unmap(ccb->cmd);
1308                         ccb->cmd->result = DID_RESET << 16;
1309                         ccb->cmd->scsi_done(ccb->cmd);
1310                         ccb->cmd = NULL;
1311                 }
1312         }
1313         spin_unlock_irqrestore(hba->host->host_lock, flags);
1314
1315         if (stex_handshake(hba) == 0)
1316                 return 0;
1317
1318         printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1319                 pci_name(hba->pdev));
1320         return -1;
1321 }
1322
1323 static int stex_reset(struct scsi_cmnd *cmd)
1324 {
1325         struct st_hba *hba;
1326
1327         hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1328
1329         shost_printk(KERN_INFO, cmd->device->host,
1330                      "resetting host\n");
1331
1332         return stex_do_reset(hba) ? FAILED : SUCCESS;
1333 }
1334
1335 static void stex_reset_work(struct work_struct *work)
1336 {
1337         struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1338
1339         stex_do_reset(hba);
1340 }
1341
1342 static int stex_biosparam(struct scsi_device *sdev,
1343         struct block_device *bdev, sector_t capacity, int geom[])
1344 {
1345         int heads = 255, sectors = 63;
1346
1347         if (capacity < 0x200000) {
1348                 heads = 64;
1349                 sectors = 32;
1350         }
1351
1352         sector_div(capacity, heads * sectors);
1353
1354         geom[0] = heads;
1355         geom[1] = sectors;
1356         geom[2] = capacity;
1357
1358         return 0;
1359 }
1360
1361 static struct scsi_host_template driver_template = {
1362         .module                         = THIS_MODULE,
1363         .name                           = DRV_NAME,
1364         .proc_name                      = DRV_NAME,
1365         .bios_param                     = stex_biosparam,
1366         .queuecommand                   = stex_queuecommand,
1367         .slave_configure                = stex_slave_config,
1368         .eh_abort_handler               = stex_abort,
1369         .eh_host_reset_handler          = stex_reset,
1370         .this_id                        = -1,
1371 };
1372
1373 static struct pci_device_id stex_pci_tbl[] = {
1374         /* st_shasta */
1375         { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1376                 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1377         { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1378                 st_shasta }, /* SuperTrak EX12350 */
1379         { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1380                 st_shasta }, /* SuperTrak EX4350 */
1381         { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1382                 st_shasta }, /* SuperTrak EX24350 */
1383
1384         /* st_vsc */
1385         { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1386
1387         /* st_yosemite */
1388         { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1389
1390         /* st_seq */
1391         { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1392
1393         /* st_yel */
1394         { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1395         { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1396         { }     /* terminate list */
1397 };
1398
1399 static struct st_card_info stex_card_info[] = {
1400         /* st_shasta */
1401         {
1402                 .max_id         = 17,
1403                 .max_lun        = 8,
1404                 .max_channel    = 0,
1405                 .rq_count       = 32,
1406                 .rq_size        = 1048,
1407                 .sts_count      = 32,
1408                 .alloc_rq       = stex_alloc_req,
1409                 .map_sg         = stex_map_sg,
1410                 .send           = stex_send_cmd,
1411         },
1412
1413         /* st_vsc */
1414         {
1415                 .max_id         = 129,
1416                 .max_lun        = 1,
1417                 .max_channel    = 0,
1418                 .rq_count       = 32,
1419                 .rq_size        = 1048,
1420                 .sts_count      = 32,
1421                 .alloc_rq       = stex_alloc_req,
1422                 .map_sg         = stex_map_sg,
1423                 .send           = stex_send_cmd,
1424         },
1425
1426         /* st_yosemite */
1427         {
1428                 .max_id         = 2,
1429                 .max_lun        = 256,
1430                 .max_channel    = 0,
1431                 .rq_count       = 256,
1432                 .rq_size        = 1048,
1433                 .sts_count      = 256,
1434                 .alloc_rq       = stex_alloc_req,
1435                 .map_sg         = stex_map_sg,
1436                 .send           = stex_send_cmd,
1437         },
1438
1439         /* st_seq */
1440         {
1441                 .max_id         = 129,
1442                 .max_lun        = 1,
1443                 .max_channel    = 0,
1444                 .rq_count       = 32,
1445                 .rq_size        = 1048,
1446                 .sts_count      = 32,
1447                 .alloc_rq       = stex_alloc_req,
1448                 .map_sg         = stex_map_sg,
1449                 .send           = stex_send_cmd,
1450         },
1451
1452         /* st_yel */
1453         {
1454                 .max_id         = 129,
1455                 .max_lun        = 256,
1456                 .max_channel    = 3,
1457                 .rq_count       = 801,
1458                 .rq_size        = 512,
1459                 .sts_count      = 801,
1460                 .alloc_rq       = stex_ss_alloc_req,
1461                 .map_sg         = stex_ss_map_sg,
1462                 .send           = stex_ss_send_cmd,
1463         },
1464 };
1465
1466 static int stex_set_dma_mask(struct pci_dev * pdev)
1467 {
1468         int ret;
1469
1470         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1471                 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1472                 return 0;
1473         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1474         if (!ret)
1475                 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1476         return ret;
1477 }
1478
1479 static int stex_request_irq(struct st_hba *hba)
1480 {
1481         struct pci_dev *pdev = hba->pdev;
1482         int status;
1483
1484         if (msi) {
1485                 status = pci_enable_msi(pdev);
1486                 if (status != 0)
1487                         printk(KERN_ERR DRV_NAME
1488                                 "(%s): error %d setting up MSI\n",
1489                                 pci_name(pdev), status);
1490                 else
1491                         hba->msi_enabled = 1;
1492         } else
1493                 hba->msi_enabled = 0;
1494
1495         status = request_irq(pdev->irq, hba->cardtype == st_yel ?
1496                 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1497
1498         if (status != 0) {
1499                 if (hba->msi_enabled)
1500                         pci_disable_msi(pdev);
1501         }
1502         return status;
1503 }
1504
1505 static void stex_free_irq(struct st_hba *hba)
1506 {
1507         struct pci_dev *pdev = hba->pdev;
1508
1509         free_irq(pdev->irq, hba);
1510         if (hba->msi_enabled)
1511                 pci_disable_msi(pdev);
1512 }
1513
1514 static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1515 {
1516         struct st_hba *hba;
1517         struct Scsi_Host *host;
1518         const struct st_card_info *ci = NULL;
1519         u32 sts_offset, cp_offset, scratch_offset;
1520         int err;
1521
1522         err = pci_enable_device(pdev);
1523         if (err)
1524                 return err;
1525
1526         pci_set_master(pdev);
1527
1528         host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1529
1530         if (!host) {
1531                 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1532                         pci_name(pdev));
1533                 err = -ENOMEM;
1534                 goto out_disable;
1535         }
1536
1537         hba = (struct st_hba *)host->hostdata;
1538         memset(hba, 0, sizeof(struct st_hba));
1539
1540         err = pci_request_regions(pdev, DRV_NAME);
1541         if (err < 0) {
1542                 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1543                         pci_name(pdev));
1544                 goto out_scsi_host_put;
1545         }
1546
1547         hba->mmio_base = pci_ioremap_bar(pdev, 0);
1548         if ( !hba->mmio_base) {
1549                 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1550                         pci_name(pdev));
1551                 err = -ENOMEM;
1552                 goto out_release_regions;
1553         }
1554
1555         err = stex_set_dma_mask(pdev);
1556         if (err) {
1557                 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1558                         pci_name(pdev));
1559                 goto out_iounmap;
1560         }
1561
1562         hba->cardtype = (unsigned int) id->driver_data;
1563         ci = &stex_card_info[hba->cardtype];
1564         switch (id->subdevice) {
1565         case 0x4221:
1566         case 0x4222:
1567         case 0x4223:
1568         case 0x4224:
1569         case 0x4225:
1570         case 0x4226:
1571         case 0x4227:
1572         case 0x4261:
1573         case 0x4262:
1574         case 0x4263:
1575         case 0x4264:
1576         case 0x4265:
1577                 break;
1578         default:
1579                 if (hba->cardtype == st_yel)
1580                         hba->supports_pm = 1;
1581         }
1582
1583         sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1584         if (hba->cardtype == st_yel)
1585                 sts_offset += (ci->sts_count+1) * sizeof(u32);
1586         cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1587         hba->dma_size = cp_offset + sizeof(struct st_frame);
1588         if (hba->cardtype == st_seq ||
1589                 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1590                 hba->extra_offset = hba->dma_size;
1591                 hba->dma_size += ST_ADDITIONAL_MEM;
1592         }
1593         hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1594                 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1595         if (!hba->dma_mem) {
1596                 /* Retry minimum coherent mapping for st_seq and st_vsc */
1597                 if (hba->cardtype == st_seq ||
1598                     (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1599                         printk(KERN_WARNING DRV_NAME
1600                                 "(%s): allocating min buffer for controller\n",
1601                                 pci_name(pdev));
1602                         hba->dma_size = hba->extra_offset
1603                                 + ST_ADDITIONAL_MEM_MIN;
1604                         hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1605                                 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1606                 }
1607
1608                 if (!hba->dma_mem) {
1609                         err = -ENOMEM;
1610                         printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1611                                 pci_name(pdev));
1612                         goto out_iounmap;
1613                 }
1614         }
1615
1616         hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1617         if (!hba->ccb) {
1618                 err = -ENOMEM;
1619                 printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1620                         pci_name(pdev));
1621                 goto out_pci_free;
1622         }
1623
1624         if (hba->cardtype == st_yel)
1625                 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1626         hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1627         hba->copy_buffer = hba->dma_mem + cp_offset;
1628         hba->rq_count = ci->rq_count;
1629         hba->rq_size = ci->rq_size;
1630         hba->sts_count = ci->sts_count;
1631         hba->alloc_rq = ci->alloc_rq;
1632         hba->map_sg = ci->map_sg;
1633         hba->send = ci->send;
1634         hba->mu_status = MU_STATE_STARTING;
1635
1636         if (hba->cardtype == st_yel)
1637                 host->sg_tablesize = 38;
1638         else
1639                 host->sg_tablesize = 32;
1640         host->can_queue = ci->rq_count;
1641         host->cmd_per_lun = ci->rq_count;
1642         host->max_id = ci->max_id;
1643         host->max_lun = ci->max_lun;
1644         host->max_channel = ci->max_channel;
1645         host->unique_id = host->host_no;
1646         host->max_cmd_len = STEX_CDB_LENGTH;
1647
1648         hba->host = host;
1649         hba->pdev = pdev;
1650         init_waitqueue_head(&hba->reset_waitq);
1651
1652         snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1653                  "stex_wq_%d", host->host_no);
1654         hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1655         if (!hba->work_q) {
1656                 printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1657                         pci_name(pdev));
1658                 err = -ENOMEM;
1659                 goto out_ccb_free;
1660         }
1661         INIT_WORK(&hba->reset_work, stex_reset_work);
1662
1663         err = stex_request_irq(hba);
1664         if (err) {
1665                 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1666                         pci_name(pdev));
1667                 goto out_free_wq;
1668         }
1669
1670         err = stex_handshake(hba);
1671         if (err)
1672                 goto out_free_irq;
1673
1674         pci_set_drvdata(pdev, hba);
1675
1676         err = scsi_add_host(host, &pdev->dev);
1677         if (err) {
1678                 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1679                         pci_name(pdev));
1680                 goto out_free_irq;
1681         }
1682
1683         scsi_scan_host(host);
1684
1685         return 0;
1686
1687 out_free_irq:
1688         stex_free_irq(hba);
1689 out_free_wq:
1690         destroy_workqueue(hba->work_q);
1691 out_ccb_free:
1692         kfree(hba->ccb);
1693 out_pci_free:
1694         dma_free_coherent(&pdev->dev, hba->dma_size,
1695                           hba->dma_mem, hba->dma_handle);
1696 out_iounmap:
1697         iounmap(hba->mmio_base);
1698 out_release_regions:
1699         pci_release_regions(pdev);
1700 out_scsi_host_put:
1701         scsi_host_put(host);
1702 out_disable:
1703         pci_disable_device(pdev);
1704
1705         return err;
1706 }
1707
1708 static void stex_hba_stop(struct st_hba *hba)
1709 {
1710         struct req_msg *req;
1711         struct st_msg_header *msg_h;
1712         unsigned long flags;
1713         unsigned long before;
1714         u16 tag = 0;
1715
1716         spin_lock_irqsave(hba->host->host_lock, flags);
1717         req = hba->alloc_rq(hba);
1718         if (hba->cardtype == st_yel) {
1719                 msg_h = (struct st_msg_header *)req - 1;
1720                 memset(msg_h, 0, hba->rq_size);
1721         } else
1722                 memset(req, 0, hba->rq_size);
1723
1724         if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
1725                 req->cdb[0] = MGT_CMD;
1726                 req->cdb[1] = MGT_CMD_SIGNATURE;
1727                 req->cdb[2] = CTLR_CONFIG_CMD;
1728                 req->cdb[3] = CTLR_SHUTDOWN;
1729         } else {
1730                 req->cdb[0] = CONTROLLER_CMD;
1731                 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1732                 req->cdb[2] = CTLR_POWER_SAVING;
1733         }
1734
1735         hba->ccb[tag].cmd = NULL;
1736         hba->ccb[tag].sg_count = 0;
1737         hba->ccb[tag].sense_bufflen = 0;
1738         hba->ccb[tag].sense_buffer = NULL;
1739         hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1740
1741         hba->send(hba, req, tag);
1742         spin_unlock_irqrestore(hba->host->host_lock, flags);
1743
1744         before = jiffies;
1745         while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1746                 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1747                         hba->ccb[tag].req_type = 0;
1748                         return;
1749                 }
1750                 msleep(1);
1751         }
1752 }
1753
1754 static void stex_hba_free(struct st_hba *hba)
1755 {
1756         stex_free_irq(hba);
1757
1758         destroy_workqueue(hba->work_q);
1759
1760         iounmap(hba->mmio_base);
1761
1762         pci_release_regions(hba->pdev);
1763
1764         kfree(hba->ccb);
1765
1766         dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1767                           hba->dma_mem, hba->dma_handle);
1768 }
1769
1770 static void stex_remove(struct pci_dev *pdev)
1771 {
1772         struct st_hba *hba = pci_get_drvdata(pdev);
1773
1774         scsi_remove_host(hba->host);
1775
1776         stex_hba_stop(hba);
1777
1778         stex_hba_free(hba);
1779
1780         scsi_host_put(hba->host);
1781
1782         pci_disable_device(pdev);
1783 }
1784
1785 static void stex_shutdown(struct pci_dev *pdev)
1786 {
1787         struct st_hba *hba = pci_get_drvdata(pdev);
1788
1789         stex_hba_stop(hba);
1790 }
1791
1792 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1793
1794 static struct pci_driver stex_pci_driver = {
1795         .name           = DRV_NAME,
1796         .id_table       = stex_pci_tbl,
1797         .probe          = stex_probe,
1798         .remove         = stex_remove,
1799         .shutdown       = stex_shutdown,
1800 };
1801
1802 static int __init stex_init(void)
1803 {
1804         printk(KERN_INFO DRV_NAME
1805                 ": Promise SuperTrak EX Driver version: %s\n",
1806                  ST_DRIVER_VERSION);
1807
1808         return pci_register_driver(&stex_pci_driver);
1809 }
1810
1811 static void __exit stex_exit(void)
1812 {
1813         pci_unregister_driver(&stex_pci_driver);
1814 }
1815
1816 module_init(stex_init);
1817 module_exit(stex_exit);