]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/scsi/qla2xxx/qla_isr.c
Merge branch 'linus' into release
[mv-sheeva.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19
20 /**
21  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
22  * @irq:
23  * @dev_id: SCSI driver HA context
24  *
25  * Called by system whenever the host adapter generates an interrupt.
26  *
27  * Returns handled flag.
28  */
29 irqreturn_t
30 qla2100_intr_handler(int irq, void *dev_id)
31 {
32         scsi_qla_host_t *vha;
33         struct qla_hw_data *ha;
34         struct device_reg_2xxx __iomem *reg;
35         int             status;
36         unsigned long   iter;
37         uint16_t        hccr;
38         uint16_t        mb[4];
39         struct rsp_que *rsp;
40         unsigned long   flags;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock_irqsave(&ha->hardware_lock, flags);
54         vha = pci_get_drvdata(ha->pdev);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock_irqrestore(&ha->hardware_lock, flags);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137         unsigned long   flags;
138
139         rsp = (struct rsp_que *) dev_id;
140         if (!rsp) {
141                 printk(KERN_INFO
142                     "%s(): NULL response queue pointer\n", __func__);
143                 return (IRQ_NONE);
144         }
145
146         ha = rsp->hw;
147         reg = &ha->iobase->isp;
148         status = 0;
149
150         spin_lock_irqsave(&ha->hardware_lock, flags);
151         vha = pci_get_drvdata(ha->pdev);
152         for (iter = 50; iter--; ) {
153                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154                 if (stat & HSR_RISC_PAUSED) {
155                         if (pci_channel_offline(ha->pdev))
156                                 break;
157
158                         hccr = RD_REG_WORD(&reg->hccr);
159                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
160                                 qla_printk(KERN_INFO, ha, "Parity error -- "
161                                     "HCCR=%x, Dumping firmware!\n", hccr);
162                         else
163                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165
166                         /*
167                          * Issue a "HARD" reset in order for the RISC
168                          * interrupt bit to be cleared.  Schedule a big
169                          * hammmer to get out of the RISC PAUSED state.
170                          */
171                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
172                         RD_REG_WORD(&reg->hccr);
173
174                         ha->isp_ops->fw_dump(vha, 1);
175                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
176                         break;
177                 } else if ((stat & HSR_RISC_INT) == 0)
178                         break;
179
180                 switch (stat & 0xff) {
181                 case 0x1:
182                 case 0x2:
183                 case 0x10:
184                 case 0x11:
185                         qla2x00_mbx_completion(vha, MSW(stat));
186                         status |= MBX_INTERRUPT;
187
188                         /* Release mailbox registers. */
189                         WRT_REG_WORD(&reg->semaphore, 0);
190                         break;
191                 case 0x12:
192                         mb[0] = MSW(stat);
193                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
194                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
195                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
196                         qla2x00_async_event(vha, rsp, mb);
197                         break;
198                 case 0x13:
199                         qla2x00_process_response_queue(rsp);
200                         break;
201                 case 0x15:
202                         mb[0] = MBA_CMPLT_1_16BIT;
203                         mb[1] = MSW(stat);
204                         qla2x00_async_event(vha, rsp, mb);
205                         break;
206                 case 0x16:
207                         mb[0] = MBA_SCSI_COMPLETION;
208                         mb[1] = MSW(stat);
209                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
210                         qla2x00_async_event(vha, rsp, mb);
211                         break;
212                 default:
213                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
214                             "(%d).\n",
215                             vha->host_no, stat & 0xff));
216                         break;
217                 }
218                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
219                 RD_REG_WORD_RELAXED(&reg->hccr);
220         }
221         spin_unlock_irqrestore(&ha->hardware_lock, flags);
222
223         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
224             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
225                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
226                 complete(&ha->mbx_intr_comp);
227         }
228
229         return (IRQ_HANDLED);
230 }
231
232 /**
233  * qla2x00_mbx_completion() - Process mailbox command completions.
234  * @ha: SCSI driver HA context
235  * @mb0: Mailbox0 register
236  */
237 static void
238 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
239 {
240         uint16_t        cnt;
241         uint16_t __iomem *wptr;
242         struct qla_hw_data *ha = vha->hw;
243         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
244
245         /* Load return mailbox registers. */
246         ha->flags.mbox_int = 1;
247         ha->mailbox_out[0] = mb0;
248         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
249
250         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
251                 if (IS_QLA2200(ha) && cnt == 8)
252                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
253                 if (cnt == 4 || cnt == 5)
254                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
255                 else
256                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
257
258                 wptr++;
259         }
260
261         if (ha->mcp) {
262                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
263                     __func__, vha->host_no, ha->mcp->mb[0]));
264         } else {
265                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
266                     __func__, vha->host_no));
267         }
268 }
269
270 static void
271 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
272 {
273         static char *event[] =
274                 { "Complete", "Request Notification", "Time Extension" };
275         int rval;
276         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
277         uint16_t __iomem *wptr;
278         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
279
280         /* Seed data -- mailbox1 -> mailbox7. */
281         wptr = (uint16_t __iomem *)&reg24->mailbox1;
282         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
283                 mb[cnt] = RD_REG_WORD(wptr);
284
285         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
286             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
287             event[aen & 0xff],
288             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
289
290         /* Acknowledgement needed? [Notify && non-zero timeout]. */
291         timeout = (descr >> 8) & 0xf;
292         if (aen != MBA_IDC_NOTIFY || !timeout)
293                 return;
294
295         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
296             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
297
298         rval = qla2x00_post_idc_ack_work(vha, mb);
299         if (rval != QLA_SUCCESS)
300                 qla_printk(KERN_WARNING, vha->hw,
301                     "IDC failed to post ACK.\n");
302 }
303
304 /**
305  * qla2x00_async_event() - Process aynchronous events.
306  * @ha: SCSI driver HA context
307  * @mb: Mailbox registers (0 - 3)
308  */
309 void
310 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
311 {
312 #define LS_UNKNOWN      2
313         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
314         char            *link_speed;
315         uint16_t        handle_cnt;
316         uint16_t        cnt;
317         uint32_t        handles[5];
318         struct qla_hw_data *ha = vha->hw;
319         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
320         uint32_t        rscn_entry, host_pid;
321         uint8_t         rscn_queue_index;
322         unsigned long   flags;
323
324         /* Setup to process RIO completion. */
325         handle_cnt = 0;
326         if (IS_QLA81XX(ha))
327                 goto skip_rio;
328         switch (mb[0]) {
329         case MBA_SCSI_COMPLETION:
330                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
331                 handle_cnt = 1;
332                 break;
333         case MBA_CMPLT_1_16BIT:
334                 handles[0] = mb[1];
335                 handle_cnt = 1;
336                 mb[0] = MBA_SCSI_COMPLETION;
337                 break;
338         case MBA_CMPLT_2_16BIT:
339                 handles[0] = mb[1];
340                 handles[1] = mb[2];
341                 handle_cnt = 2;
342                 mb[0] = MBA_SCSI_COMPLETION;
343                 break;
344         case MBA_CMPLT_3_16BIT:
345                 handles[0] = mb[1];
346                 handles[1] = mb[2];
347                 handles[2] = mb[3];
348                 handle_cnt = 3;
349                 mb[0] = MBA_SCSI_COMPLETION;
350                 break;
351         case MBA_CMPLT_4_16BIT:
352                 handles[0] = mb[1];
353                 handles[1] = mb[2];
354                 handles[2] = mb[3];
355                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
356                 handle_cnt = 4;
357                 mb[0] = MBA_SCSI_COMPLETION;
358                 break;
359         case MBA_CMPLT_5_16BIT:
360                 handles[0] = mb[1];
361                 handles[1] = mb[2];
362                 handles[2] = mb[3];
363                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
364                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
365                 handle_cnt = 5;
366                 mb[0] = MBA_SCSI_COMPLETION;
367                 break;
368         case MBA_CMPLT_2_32BIT:
369                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
370                 handles[1] = le32_to_cpu(
371                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
372                     RD_MAILBOX_REG(ha, reg, 6));
373                 handle_cnt = 2;
374                 mb[0] = MBA_SCSI_COMPLETION;
375                 break;
376         default:
377                 break;
378         }
379 skip_rio:
380         switch (mb[0]) {
381         case MBA_SCSI_COMPLETION:       /* Fast Post */
382                 if (!vha->flags.online)
383                         break;
384
385                 for (cnt = 0; cnt < handle_cnt; cnt++)
386                         qla2x00_process_completed_request(vha, rsp->req,
387                                 handles[cnt]);
388                 break;
389
390         case MBA_RESET:                 /* Reset */
391                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
392                         vha->host_no));
393
394                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
395                 break;
396
397         case MBA_SYSTEM_ERR:            /* System Error */
398                 qla_printk(KERN_INFO, ha,
399                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
400                     mb[1], mb[2], mb[3]);
401
402                 ha->isp_ops->fw_dump(vha, 1);
403
404                 if (IS_FWI2_CAPABLE(ha)) {
405                         if (mb[1] == 0 && mb[2] == 0) {
406                                 qla_printk(KERN_ERR, ha,
407                                     "Unrecoverable Hardware Error: adapter "
408                                     "marked OFFLINE!\n");
409                                 vha->flags.online = 0;
410                         } else
411                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
412                 } else if (mb[1] == 0) {
413                         qla_printk(KERN_INFO, ha,
414                             "Unrecoverable Hardware Error: adapter marked "
415                             "OFFLINE!\n");
416                         vha->flags.online = 0;
417                 } else
418                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
419                 break;
420
421         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
422                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
423                     vha->host_no));
424                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
425
426                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
427                 break;
428
429         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
430                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
431                     vha->host_no));
432                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
433
434                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
435                 break;
436
437         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
438                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
439                     vha->host_no));
440                 break;
441
442         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
443                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
444                     mb[1]));
445                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
446
447                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
448                         atomic_set(&vha->loop_state, LOOP_DOWN);
449                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
450                         qla2x00_mark_all_devices_lost(vha, 1);
451                 }
452
453                 if (vha->vp_idx) {
454                         atomic_set(&vha->vp_state, VP_FAILED);
455                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
456                 }
457
458                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
459                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
460
461                 vha->flags.management_server_logged_in = 0;
462                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
463                 break;
464
465         case MBA_LOOP_UP:               /* Loop Up Event */
466                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
467                         link_speed = link_speeds[0];
468                         ha->link_data_rate = PORT_SPEED_1GB;
469                 } else {
470                         link_speed = link_speeds[LS_UNKNOWN];
471                         if (mb[1] < 5)
472                                 link_speed = link_speeds[mb[1]];
473                         else if (mb[1] == 0x13)
474                                 link_speed = link_speeds[5];
475                         ha->link_data_rate = mb[1];
476                 }
477
478                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
479                     vha->host_no, link_speed));
480                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
481                     link_speed);
482
483                 vha->flags.management_server_logged_in = 0;
484                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
485                 break;
486
487         case MBA_LOOP_DOWN:             /* Loop Down Event */
488                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
489                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
490                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
491                     mb[1], mb[2], mb[3]);
492
493                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
494                         atomic_set(&vha->loop_state, LOOP_DOWN);
495                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
496                         vha->device_flags |= DFLG_NO_CABLE;
497                         qla2x00_mark_all_devices_lost(vha, 1);
498                 }
499
500                 if (vha->vp_idx) {
501                         atomic_set(&vha->vp_state, VP_FAILED);
502                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
503                 }
504
505                 vha->flags.management_server_logged_in = 0;
506                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
507                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
508                 break;
509
510         case MBA_LIP_RESET:             /* LIP reset occurred */
511                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
512                     vha->host_no, mb[1]));
513                 qla_printk(KERN_INFO, ha,
514                     "LIP reset occurred (%x).\n", mb[1]);
515
516                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
517                         atomic_set(&vha->loop_state, LOOP_DOWN);
518                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
519                         qla2x00_mark_all_devices_lost(vha, 1);
520                 }
521
522                 if (vha->vp_idx) {
523                         atomic_set(&vha->vp_state, VP_FAILED);
524                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
525                 }
526
527                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
528
529                 ha->operating_mode = LOOP;
530                 vha->flags.management_server_logged_in = 0;
531                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
532                 break;
533
534         /* case MBA_DCBX_COMPLETE: */
535         case MBA_POINT_TO_POINT:        /* Point-to-Point */
536                 if (IS_QLA2100(ha))
537                         break;
538
539                 if (IS_QLA81XX(ha))
540                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
541                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
542                 else
543                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
544                             "received.\n", vha->host_no));
545
546                 /*
547                  * Until there's a transition from loop down to loop up, treat
548                  * this as loop down only.
549                  */
550                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
551                         atomic_set(&vha->loop_state, LOOP_DOWN);
552                         if (!atomic_read(&vha->loop_down_timer))
553                                 atomic_set(&vha->loop_down_timer,
554                                     LOOP_DOWN_TIME);
555                         qla2x00_mark_all_devices_lost(vha, 1);
556                 }
557
558                 if (vha->vp_idx) {
559                         atomic_set(&vha->vp_state, VP_FAILED);
560                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
561                 }
562
563                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
564                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
565
566                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
567                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
568
569                 ha->flags.gpsc_supported = 1;
570                 vha->flags.management_server_logged_in = 0;
571                 break;
572
573         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
574                 if (IS_QLA2100(ha))
575                         break;
576
577                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
578                     "received.\n",
579                     vha->host_no));
580                 qla_printk(KERN_INFO, ha,
581                     "Configuration change detected: value=%x.\n", mb[1]);
582
583                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
584                         atomic_set(&vha->loop_state, LOOP_DOWN);
585                         if (!atomic_read(&vha->loop_down_timer))
586                                 atomic_set(&vha->loop_down_timer,
587                                     LOOP_DOWN_TIME);
588                         qla2x00_mark_all_devices_lost(vha, 1);
589                 }
590
591                 if (vha->vp_idx) {
592                         atomic_set(&vha->vp_state, VP_FAILED);
593                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
594                 }
595
596                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
597                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
598                 break;
599
600         case MBA_PORT_UPDATE:           /* Port database update */
601                 /*
602                  * Handle only global and vn-port update events
603                  *
604                  * Relevant inputs:
605                  * mb[1] = N_Port handle of changed port
606                  * OR 0xffff for global event
607                  * mb[2] = New login state
608                  * 7 = Port logged out
609                  * mb[3] = LSB is vp_idx, 0xff = all vps
610                  *
611                  * Skip processing if:
612                  *       Event is global, vp_idx is NOT all vps,
613                  *           vp_idx does not match
614                  *       Event is not global, vp_idx does not match
615                  */
616                 if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
617                         || (mb[1] != 0xffff)) {
618                         if (vha->vp_idx != (mb[3] & 0xff))
619                                 break;
620                 }
621
622                 /* Global event -- port logout or port unavailable. */
623                 if (mb[1] == 0xffff && mb[2] == 0x7) {
624                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
625                             vha->host_no));
626                         DEBUG(printk(KERN_INFO
627                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
628                             vha->host_no, mb[1], mb[2], mb[3]));
629
630                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
631                                 atomic_set(&vha->loop_state, LOOP_DOWN);
632                                 atomic_set(&vha->loop_down_timer,
633                                     LOOP_DOWN_TIME);
634                                 vha->device_flags |= DFLG_NO_CABLE;
635                                 qla2x00_mark_all_devices_lost(vha, 1);
636                         }
637
638                         if (vha->vp_idx) {
639                                 atomic_set(&vha->vp_state, VP_FAILED);
640                                 fc_vport_set_state(vha->fc_vport,
641                                     FC_VPORT_FAILED);
642                                 qla2x00_mark_all_devices_lost(vha, 1);
643                         }
644
645                         vha->flags.management_server_logged_in = 0;
646                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
647                         break;
648                 }
649
650                 /*
651                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
652                  * event etc. earlier indicating loop is down) then process
653                  * it.  Otherwise ignore it and Wait for RSCN to come in.
654                  */
655                 atomic_set(&vha->loop_down_timer, 0);
656                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
657                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
658                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
659                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
660                             mb[2], mb[3]));
661                         break;
662                 }
663
664                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
665                     vha->host_no));
666                 DEBUG(printk(KERN_INFO
667                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
668                     vha->host_no, mb[1], mb[2], mb[3]));
669
670                 /*
671                  * Mark all devices as missing so we will login again.
672                  */
673                 atomic_set(&vha->loop_state, LOOP_UP);
674
675                 qla2x00_mark_all_devices_lost(vha, 1);
676
677                 vha->flags.rscn_queue_overflow = 1;
678
679                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
680                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
681                 break;
682
683         case MBA_RSCN_UPDATE:           /* State Change Registration */
684                 /* Check if the Vport has issued a SCR */
685                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
686                         break;
687                 /* Only handle SCNs for our Vport index. */
688                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
689                         break;
690
691                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
692                     vha->host_no));
693                 DEBUG(printk(KERN_INFO
694                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
695                     vha->host_no, mb[1], mb[2], mb[3]));
696
697                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
698                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
699                                 | vha->d_id.b.al_pa;
700                 if (rscn_entry == host_pid) {
701                         DEBUG(printk(KERN_INFO
702                             "scsi(%ld): Ignoring RSCN update to local host "
703                             "port ID (%06x)\n",
704                             vha->host_no, host_pid));
705                         break;
706                 }
707
708                 /* Ignore reserved bits from RSCN-payload. */
709                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
710                 rscn_queue_index = vha->rscn_in_ptr + 1;
711                 if (rscn_queue_index == MAX_RSCN_COUNT)
712                         rscn_queue_index = 0;
713                 if (rscn_queue_index != vha->rscn_out_ptr) {
714                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
715                         vha->rscn_in_ptr = rscn_queue_index;
716                 } else {
717                         vha->flags.rscn_queue_overflow = 1;
718                 }
719
720                 atomic_set(&vha->loop_state, LOOP_UPDATE);
721                 atomic_set(&vha->loop_down_timer, 0);
722                 vha->flags.management_server_logged_in = 0;
723
724                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
725                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
726                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
727                 break;
728
729         /* case MBA_RIO_RESPONSE: */
730         case MBA_ZIO_RESPONSE:
731                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
732                     vha->host_no));
733
734                 if (IS_FWI2_CAPABLE(ha))
735                         qla24xx_process_response_queue(vha, rsp);
736                 else
737                         qla2x00_process_response_queue(rsp);
738                 break;
739
740         case MBA_DISCARD_RND_FRAME:
741                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
742                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
743                 break;
744
745         case MBA_TRACE_NOTIFICATION:
746                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
747                 vha->host_no, mb[1], mb[2]));
748                 break;
749
750         case MBA_ISP84XX_ALERT:
751                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
752                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
753
754                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
755                 switch (mb[1]) {
756                 case A84_PANIC_RECOVERY:
757                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
758                             "%04x %04x\n", mb[2], mb[3]);
759                         break;
760                 case A84_OP_LOGIN_COMPLETE:
761                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
762                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
763                             "firmware version %x\n", ha->cs84xx->op_fw_version));
764                         break;
765                 case A84_DIAG_LOGIN_COMPLETE:
766                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
767                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
768                             "diagnostic firmware version %x\n",
769                             ha->cs84xx->diag_fw_version));
770                         break;
771                 case A84_GOLD_LOGIN_COMPLETE:
772                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
773                         ha->cs84xx->fw_update = 1;
774                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
775                             "firmware version %x\n",
776                             ha->cs84xx->gold_fw_version));
777                         break;
778                 default:
779                         qla_printk(KERN_ERR, ha,
780                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
781                             mb[1], mb[2], mb[3]);
782                 }
783                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
784                 break;
785         case MBA_DCBX_START:
786                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
787                     vha->host_no, mb[1], mb[2], mb[3]));
788                 break;
789         case MBA_DCBX_PARAM_UPDATE:
790                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
791                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
792                 break;
793         case MBA_FCF_CONF_ERR:
794                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
795                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
796                 break;
797         case MBA_IDC_COMPLETE:
798         case MBA_IDC_NOTIFY:
799         case MBA_IDC_TIME_EXT:
800                 qla81xx_idc_event(vha, mb[0], mb[1]);
801                 break;
802         }
803
804         if (!vha->vp_idx && ha->num_vhosts)
805                 qla2x00_alert_all_vps(rsp, mb);
806 }
807
808 static void
809 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
810 {
811         fc_port_t *fcport = data;
812         struct scsi_qla_host *vha = fcport->vha;
813         struct qla_hw_data *ha = vha->hw;
814         struct req_que *req = NULL;
815
816         if (!ql2xqfulltracking)
817                 return;
818
819         req = vha->req;
820         if (!req)
821                 return;
822         if (req->max_q_depth <= sdev->queue_depth)
823                 return;
824
825         if (sdev->ordered_tags)
826                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
827                     sdev->queue_depth + 1);
828         else
829                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
830                     sdev->queue_depth + 1);
831
832         fcport->last_ramp_up = jiffies;
833
834         DEBUG2(qla_printk(KERN_INFO, ha,
835             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
836             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
837             sdev->queue_depth));
838 }
839
840 static void
841 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
842 {
843         fc_port_t *fcport = data;
844
845         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
846                 return;
847
848         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
849             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
850             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
851             sdev->queue_depth));
852 }
853
854 static inline void
855 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
856                                                                 srb_t *sp)
857 {
858         fc_port_t *fcport;
859         struct scsi_device *sdev;
860
861         if (!ql2xqfulltracking)
862                 return;
863
864         sdev = sp->cmd->device;
865         if (sdev->queue_depth >= req->max_q_depth)
866                 return;
867
868         fcport = sp->fcport;
869         if (time_before(jiffies,
870             fcport->last_ramp_up + ql2xqfullrampup * HZ))
871                 return;
872         if (time_before(jiffies,
873             fcport->last_queue_full + ql2xqfullrampup * HZ))
874                 return;
875
876         starget_for_each_device(sdev->sdev_target, fcport,
877             qla2x00_adjust_sdev_qdepth_up);
878 }
879
880 /**
881  * qla2x00_process_completed_request() - Process a Fast Post response.
882  * @ha: SCSI driver HA context
883  * @index: SRB index
884  */
885 static void
886 qla2x00_process_completed_request(struct scsi_qla_host *vha,
887                                 struct req_que *req, uint32_t index)
888 {
889         srb_t *sp;
890         struct qla_hw_data *ha = vha->hw;
891
892         /* Validate handle. */
893         if (index >= MAX_OUTSTANDING_COMMANDS) {
894                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
895                     vha->host_no, index));
896                 qla_printk(KERN_WARNING, ha,
897                     "Invalid SCSI completion handle %d.\n", index);
898
899                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
900                 return;
901         }
902
903         sp = req->outstanding_cmds[index];
904         if (sp) {
905                 /* Free outstanding command slot. */
906                 req->outstanding_cmds[index] = NULL;
907
908                 /* Save ISP completion status */
909                 sp->cmd->result = DID_OK << 16;
910
911                 qla2x00_ramp_up_queue_depth(vha, req, sp);
912                 qla2x00_sp_compl(ha, sp);
913         } else {
914                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
915                         " handle(%d)\n", vha->host_no, req->id, index));
916                 qla_printk(KERN_WARNING, ha,
917                     "Invalid ISP SCSI completion handle\n");
918
919                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
920         }
921 }
922
923 static srb_t *
924 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
925     struct req_que *req, void *iocb)
926 {
927         struct qla_hw_data *ha = vha->hw;
928         sts_entry_t *pkt = iocb;
929         srb_t *sp = NULL;
930         uint16_t index;
931
932         index = LSW(pkt->handle);
933         if (index >= MAX_OUTSTANDING_COMMANDS) {
934                 qla_printk(KERN_WARNING, ha,
935                     "%s: Invalid completion handle (%x).\n", func, index);
936                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
937                 goto done;
938         }
939         sp = req->outstanding_cmds[index];
940         if (!sp) {
941                 qla_printk(KERN_WARNING, ha,
942                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
943                     index);
944                 return sp;
945         }
946         if (sp->handle != index) {
947                 qla_printk(KERN_WARNING, ha,
948                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
949                     index);
950                 return NULL;
951         }
952         req->outstanding_cmds[index] = NULL;
953 done:
954         return sp;
955 }
956
957 static void
958 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
959     struct mbx_entry *mbx)
960 {
961         const char func[] = "MBX-IOCB";
962         const char *type;
963         struct qla_hw_data *ha = vha->hw;
964         fc_port_t *fcport;
965         srb_t *sp;
966         struct srb_logio *lio;
967         uint16_t data[2];
968
969         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
970         if (!sp)
971                 return;
972
973         type = NULL;
974         lio = sp->ctx;
975         switch (lio->ctx.type) {
976         case SRB_LOGIN_CMD:
977                 type = "login";
978                 break;
979         case SRB_LOGOUT_CMD:
980                 type = "logout";
981                 break;
982         default:
983                 qla_printk(KERN_WARNING, ha,
984                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
985                     lio->ctx.type);
986                 return;
987         }
988
989         del_timer(&lio->ctx.timer);
990         fcport = sp->fcport;
991
992         data[0] = data[1] = 0;
993         if (mbx->entry_status) {
994                 DEBUG2(printk(KERN_WARNING
995                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
996                     "status=%x state-flag=%x status-flags=%x.\n",
997                     fcport->vha->host_no, sp->handle, type,
998                     mbx->entry_status, le16_to_cpu(mbx->status),
999                     le16_to_cpu(mbx->state_flags),
1000                     le16_to_cpu(mbx->status_flags)));
1001                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
1002
1003                 data[0] = MBS_COMMAND_ERROR;
1004                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1005                     QLA_LOGIO_LOGIN_RETRIED: 0;
1006                 goto done_post_logio_done_work;
1007         }
1008
1009         if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1010                 DEBUG2(printk(KERN_DEBUG
1011                     "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
1012                     fcport->vha->host_no, sp->handle, type,
1013                     le16_to_cpu(mbx->mb1)));
1014
1015                 data[0] = MBS_COMMAND_COMPLETE;
1016                 if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
1017                         fcport->flags |= FCF_FCP2_DEVICE;
1018
1019                 goto done_post_logio_done_work;
1020         }
1021
1022         data[0] = le16_to_cpu(mbx->mb0);
1023         switch (data[0]) {
1024         case MBS_PORT_ID_USED:
1025                 data[1] = le16_to_cpu(mbx->mb1);
1026                 break;
1027         case MBS_LOOP_ID_USED:
1028                 break;
1029         default:
1030                 data[0] = MBS_COMMAND_ERROR;
1031                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1032                     QLA_LOGIO_LOGIN_RETRIED: 0;
1033                 break;
1034         }
1035
1036         DEBUG2(printk(KERN_WARNING
1037             "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
1038             "mb6=%x mb7=%x.\n",
1039             fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
1040             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1041             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1042             le16_to_cpu(mbx->mb7)));
1043
1044 done_post_logio_done_work:
1045         lio->ctx.type == SRB_LOGIN_CMD ?
1046             qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1047             qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1048
1049         lio->ctx.free(sp);
1050 }
1051
1052 static void
1053 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1054     struct logio_entry_24xx *logio)
1055 {
1056         const char func[] = "LOGIO-IOCB";
1057         const char *type;
1058         struct qla_hw_data *ha = vha->hw;
1059         fc_port_t *fcport;
1060         srb_t *sp;
1061         struct srb_logio *lio;
1062         uint16_t data[2];
1063         uint32_t iop[2];
1064
1065         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1066         if (!sp)
1067                 return;
1068
1069         type = NULL;
1070         lio = sp->ctx;
1071         switch (lio->ctx.type) {
1072         case SRB_LOGIN_CMD:
1073                 type = "login";
1074                 break;
1075         case SRB_LOGOUT_CMD:
1076                 type = "logout";
1077                 break;
1078         default:
1079                 qla_printk(KERN_WARNING, ha,
1080                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1081                     lio->ctx.type);
1082                 return;
1083         }
1084
1085         del_timer(&lio->ctx.timer);
1086         fcport = sp->fcport;
1087
1088         data[0] = data[1] = 0;
1089         if (logio->entry_status) {
1090                 DEBUG2(printk(KERN_WARNING
1091                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1092                     fcport->vha->host_no, sp->handle, type,
1093                     logio->entry_status));
1094                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1095
1096                 data[0] = MBS_COMMAND_ERROR;
1097                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1098                     QLA_LOGIO_LOGIN_RETRIED: 0;
1099                 goto done_post_logio_done_work;
1100         }
1101
1102         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1103                 DEBUG2(printk(KERN_DEBUG
1104                     "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
1105                     fcport->vha->host_no, sp->handle, type,
1106                     le32_to_cpu(logio->io_parameter[0])));
1107
1108                 data[0] = MBS_COMMAND_COMPLETE;
1109                 if (lio->ctx.type == SRB_LOGOUT_CMD)
1110                         goto done_post_logio_done_work;
1111
1112                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1113                 if (iop[0] & BIT_4) {
1114                         fcport->port_type = FCT_TARGET;
1115                         if (iop[0] & BIT_8)
1116                                 fcport->flags |= FCF_FCP2_DEVICE;
1117                 }
1118                 if (iop[0] & BIT_5)
1119                         fcport->port_type = FCT_INITIATOR;
1120                 if (logio->io_parameter[7] || logio->io_parameter[8])
1121                         fcport->supported_classes |= FC_COS_CLASS2;
1122                 if (logio->io_parameter[9] || logio->io_parameter[10])
1123                         fcport->supported_classes |= FC_COS_CLASS3;
1124
1125                 goto done_post_logio_done_work;
1126         }
1127
1128         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1129         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1130         switch (iop[0]) {
1131         case LSC_SCODE_PORTID_USED:
1132                 data[0] = MBS_PORT_ID_USED;
1133                 data[1] = LSW(iop[1]);
1134                 break;
1135         case LSC_SCODE_NPORT_USED:
1136                 data[0] = MBS_LOOP_ID_USED;
1137                 break;
1138         case LSC_SCODE_CMD_FAILED:
1139                 if ((iop[1] & 0xff) == 0x05) {
1140                         data[0] = MBS_NOT_LOGGED_IN;
1141                         break;
1142                 }
1143                 /* Fall through. */
1144         default:
1145                 data[0] = MBS_COMMAND_ERROR;
1146                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1147                     QLA_LOGIO_LOGIN_RETRIED: 0;
1148                 break;
1149         }
1150
1151         DEBUG2(printk(KERN_WARNING
1152             "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
1153             fcport->vha->host_no, sp->handle, type,
1154             le16_to_cpu(logio->comp_status),
1155             le32_to_cpu(logio->io_parameter[0]),
1156             le32_to_cpu(logio->io_parameter[1])));
1157
1158 done_post_logio_done_work:
1159         lio->ctx.type == SRB_LOGIN_CMD ?
1160             qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1161             qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1162
1163         lio->ctx.free(sp);
1164 }
1165
1166 /**
1167  * qla2x00_process_response_queue() - Process response queue entries.
1168  * @ha: SCSI driver HA context
1169  */
1170 void
1171 qla2x00_process_response_queue(struct rsp_que *rsp)
1172 {
1173         struct scsi_qla_host *vha;
1174         struct qla_hw_data *ha = rsp->hw;
1175         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1176         sts_entry_t     *pkt;
1177         uint16_t        handle_cnt;
1178         uint16_t        cnt;
1179
1180         vha = pci_get_drvdata(ha->pdev);
1181
1182         if (!vha->flags.online)
1183                 return;
1184
1185         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1186                 pkt = (sts_entry_t *)rsp->ring_ptr;
1187
1188                 rsp->ring_index++;
1189                 if (rsp->ring_index == rsp->length) {
1190                         rsp->ring_index = 0;
1191                         rsp->ring_ptr = rsp->ring;
1192                 } else {
1193                         rsp->ring_ptr++;
1194                 }
1195
1196                 if (pkt->entry_status != 0) {
1197                         DEBUG3(printk(KERN_INFO
1198                             "scsi(%ld): Process error entry.\n", vha->host_no));
1199
1200                         qla2x00_error_entry(vha, rsp, pkt);
1201                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1202                         wmb();
1203                         continue;
1204                 }
1205
1206                 switch (pkt->entry_type) {
1207                 case STATUS_TYPE:
1208                         qla2x00_status_entry(vha, rsp, pkt);
1209                         break;
1210                 case STATUS_TYPE_21:
1211                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1212                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1213                                 qla2x00_process_completed_request(vha, rsp->req,
1214                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1215                         }
1216                         break;
1217                 case STATUS_TYPE_22:
1218                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1219                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1220                                 qla2x00_process_completed_request(vha, rsp->req,
1221                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1222                         }
1223                         break;
1224                 case STATUS_CONT_TYPE:
1225                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1226                         break;
1227                 case MBX_IOCB_TYPE:
1228                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1229                             (struct mbx_entry *)pkt);
1230                 default:
1231                         /* Type Not Supported. */
1232                         DEBUG4(printk(KERN_WARNING
1233                             "scsi(%ld): Received unknown response pkt type %x "
1234                             "entry status=%x.\n",
1235                             vha->host_no, pkt->entry_type, pkt->entry_status));
1236                         break;
1237                 }
1238                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1239                 wmb();
1240         }
1241
1242         /* Adjust ring index */
1243         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1244 }
1245
1246 static inline void
1247 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1248         struct rsp_que *rsp)
1249 {
1250         struct scsi_cmnd *cp = sp->cmd;
1251
1252         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1253                 sense_len = SCSI_SENSE_BUFFERSIZE;
1254
1255         sp->request_sense_length = sense_len;
1256         sp->request_sense_ptr = cp->sense_buffer;
1257         if (sp->request_sense_length > 32)
1258                 sense_len = 32;
1259
1260         memcpy(cp->sense_buffer, sense_data, sense_len);
1261
1262         sp->request_sense_ptr += sense_len;
1263         sp->request_sense_length -= sense_len;
1264         if (sp->request_sense_length != 0)
1265                 rsp->status_srb = sp;
1266
1267         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1268             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1269             cp->device->channel, cp->device->id, cp->device->lun, cp,
1270             cp->serial_number));
1271         if (sense_len)
1272                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1273 }
1274
1275 /**
1276  * qla2x00_status_entry() - Process a Status IOCB entry.
1277  * @ha: SCSI driver HA context
1278  * @pkt: Entry pointer
1279  */
1280 static void
1281 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1282 {
1283         srb_t           *sp;
1284         fc_port_t       *fcport;
1285         struct scsi_cmnd *cp;
1286         sts_entry_t *sts;
1287         struct sts_entry_24xx *sts24;
1288         uint16_t        comp_status;
1289         uint16_t        scsi_status;
1290         uint8_t         lscsi_status;
1291         int32_t         resid;
1292         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
1293         uint8_t         *rsp_info, *sense_data;
1294         struct qla_hw_data *ha = vha->hw;
1295         uint32_t handle;
1296         uint16_t que;
1297         struct req_que *req;
1298
1299         sts = (sts_entry_t *) pkt;
1300         sts24 = (struct sts_entry_24xx *) pkt;
1301         if (IS_FWI2_CAPABLE(ha)) {
1302                 comp_status = le16_to_cpu(sts24->comp_status);
1303                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1304         } else {
1305                 comp_status = le16_to_cpu(sts->comp_status);
1306                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1307         }
1308         handle = (uint32_t) LSW(sts->handle);
1309         que = MSW(sts->handle);
1310         req = ha->req_q_map[que];
1311         /* Fast path completion. */
1312         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1313                 qla2x00_process_completed_request(vha, req, handle);
1314
1315                 return;
1316         }
1317
1318         /* Validate handle. */
1319         if (handle < MAX_OUTSTANDING_COMMANDS) {
1320                 sp = req->outstanding_cmds[handle];
1321                 req->outstanding_cmds[handle] = NULL;
1322         } else
1323                 sp = NULL;
1324
1325         if (sp == NULL) {
1326                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1327                     vha->host_no));
1328                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1329
1330                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1331                 qla2xxx_wake_dpc(vha);
1332                 return;
1333         }
1334         cp = sp->cmd;
1335         if (cp == NULL) {
1336                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1337                     "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1338                 qla_printk(KERN_WARNING, ha,
1339                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
1340
1341                 return;
1342         }
1343
1344         lscsi_status = scsi_status & STATUS_MASK;
1345
1346         fcport = sp->fcport;
1347
1348         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1349         if (IS_FWI2_CAPABLE(ha)) {
1350                 sense_len = le32_to_cpu(sts24->sense_len);
1351                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1352                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1353                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1354                 rsp_info = sts24->data;
1355                 sense_data = sts24->data;
1356                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1357         } else {
1358                 sense_len = le16_to_cpu(sts->req_sense_length);
1359                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1360                 resid_len = le32_to_cpu(sts->residual_length);
1361                 rsp_info = sts->rsp_info;
1362                 sense_data = sts->req_sense_data;
1363         }
1364
1365         /* Check for any FCP transport errors. */
1366         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1367                 /* Sense data lies beyond any FCP RESPONSE data. */
1368                 if (IS_FWI2_CAPABLE(ha))
1369                         sense_data += rsp_info_len;
1370                 if (rsp_info_len > 3 && rsp_info[3]) {
1371                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1372                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1373                             "retrying command\n", vha->host_no,
1374                             cp->device->channel, cp->device->id,
1375                             cp->device->lun, rsp_info_len, rsp_info[0],
1376                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1377                             rsp_info[5], rsp_info[6], rsp_info[7]));
1378
1379                         cp->result = DID_BUS_BUSY << 16;
1380                         qla2x00_sp_compl(ha, sp);
1381                         return;
1382                 }
1383         }
1384
1385         /* Check for overrun. */
1386         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1387             scsi_status & SS_RESIDUAL_OVER)
1388                 comp_status = CS_DATA_OVERRUN;
1389
1390         /*
1391          * Based on Host and scsi status generate status code for Linux
1392          */
1393         switch (comp_status) {
1394         case CS_COMPLETE:
1395         case CS_QUEUE_FULL:
1396                 if (scsi_status == 0) {
1397                         cp->result = DID_OK << 16;
1398                         break;
1399                 }
1400                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1401                         resid = resid_len;
1402                         scsi_set_resid(cp, resid);
1403
1404                         if (!lscsi_status &&
1405                             ((unsigned)(scsi_bufflen(cp) - resid) <
1406                              cp->underflow)) {
1407                                 qla_printk(KERN_INFO, ha,
1408                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1409                                            "detected (%x of %x bytes)...returning "
1410                                            "error status.\n", vha->host_no,
1411                                            cp->device->channel, cp->device->id,
1412                                            cp->device->lun, resid,
1413                                            scsi_bufflen(cp));
1414
1415                                 cp->result = DID_ERROR << 16;
1416                                 break;
1417                         }
1418                 }
1419                 cp->result = DID_OK << 16 | lscsi_status;
1420
1421                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1422                         DEBUG2(printk(KERN_INFO
1423                             "scsi(%ld): QUEUE FULL status detected "
1424                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1425                             scsi_status));
1426
1427                         /* Adjust queue depth for all luns on the port. */
1428                         if (!ql2xqfulltracking)
1429                                 break;
1430                         fcport->last_queue_full = jiffies;
1431                         starget_for_each_device(cp->device->sdev_target,
1432                             fcport, qla2x00_adjust_sdev_qdepth_down);
1433                         break;
1434                 }
1435                 if (lscsi_status != SS_CHECK_CONDITION)
1436                         break;
1437
1438                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1439                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1440                         break;
1441
1442                 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1443                 break;
1444
1445         case CS_DATA_UNDERRUN:
1446                 resid = resid_len;
1447                 /* Use F/W calculated residual length. */
1448                 if (IS_FWI2_CAPABLE(ha)) {
1449                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1450                                 lscsi_status = 0;
1451                         } else if (resid != fw_resid_len) {
1452                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1453                                 lscsi_status = 0;
1454                         }
1455                         resid = fw_resid_len;
1456                 }
1457
1458                 if (scsi_status & SS_RESIDUAL_UNDER) {
1459                         scsi_set_resid(cp, resid);
1460                 } else {
1461                         DEBUG2(printk(KERN_INFO
1462                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1463                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1464                             "os_underflow=0x%x\n", vha->host_no,
1465                             cp->device->id, cp->device->lun, comp_status,
1466                             scsi_status, resid_len, resid, cp->cmnd[0],
1467                             cp->underflow));
1468
1469                 }
1470
1471                 /*
1472                  * Check to see if SCSI Status is non zero. If so report SCSI
1473                  * Status.
1474                  */
1475                 if (lscsi_status != 0) {
1476                         cp->result = DID_OK << 16 | lscsi_status;
1477
1478                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1479                                 DEBUG2(printk(KERN_INFO
1480                                     "scsi(%ld): QUEUE FULL status detected "
1481                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1482                                     scsi_status));
1483
1484                                 /*
1485                                  * Adjust queue depth for all luns on the
1486                                  * port.
1487                                  */
1488                                 if (!ql2xqfulltracking)
1489                                         break;
1490                                 fcport->last_queue_full = jiffies;
1491                                 starget_for_each_device(
1492                                     cp->device->sdev_target, fcport,
1493                                     qla2x00_adjust_sdev_qdepth_down);
1494                                 break;
1495                         }
1496                         if (lscsi_status != SS_CHECK_CONDITION)
1497                                 break;
1498
1499                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1500                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1501                                 break;
1502
1503                         qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1504                 } else {
1505                         /*
1506                          * If RISC reports underrun and target does not report
1507                          * it then we must have a lost frame, so tell upper
1508                          * layer to retry it by reporting an error.
1509                          */
1510                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1511                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1512                                               "frame(s) detected (%x of %x bytes)..."
1513                                               "retrying command.\n",
1514                                         vha->host_no, cp->device->channel,
1515                                         cp->device->id, cp->device->lun, resid,
1516                                         scsi_bufflen(cp)));
1517
1518                                 scsi_set_resid(cp, resid);
1519                                 cp->result = DID_ERROR << 16;
1520                                 break;
1521                         }
1522
1523                         /* Handle mid-layer underflow */
1524                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1525                             cp->underflow) {
1526                                 qla_printk(KERN_INFO, ha,
1527                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1528                                            "detected (%x of %x bytes)...returning "
1529                                            "error status.\n", vha->host_no,
1530                                            cp->device->channel, cp->device->id,
1531                                            cp->device->lun, resid,
1532                                            scsi_bufflen(cp));
1533
1534                                 cp->result = DID_ERROR << 16;
1535                                 break;
1536                         }
1537
1538                         /* Everybody online, looking good... */
1539                         cp->result = DID_OK << 16;
1540                 }
1541                 break;
1542
1543         case CS_DATA_OVERRUN:
1544                 DEBUG2(printk(KERN_INFO
1545                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1546                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1547                     scsi_status));
1548                 DEBUG2(printk(KERN_INFO
1549                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1550                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1551                     cp->cmnd[4], cp->cmnd[5]));
1552                 DEBUG2(printk(KERN_INFO
1553                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1554                     "status!\n",
1555                     cp->serial_number, scsi_bufflen(cp), resid_len));
1556
1557                 cp->result = DID_ERROR << 16;
1558                 break;
1559
1560         case CS_PORT_LOGGED_OUT:
1561         case CS_PORT_CONFIG_CHG:
1562         case CS_PORT_BUSY:
1563         case CS_INCOMPLETE:
1564         case CS_PORT_UNAVAILABLE:
1565                 /*
1566                  * If the port is in Target Down state, return all IOs for this
1567                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1568                  * retry_queue.
1569                  */
1570                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1571                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1572                     vha->host_no, cp->device->id, cp->device->lun,
1573                     cp->serial_number, comp_status,
1574                     atomic_read(&fcport->state)));
1575
1576                 /*
1577                  * We are going to have the fc class block the rport
1578                  * while we try to recover so instruct the mid layer
1579                  * to requeue until the class decides how to handle this.
1580                  */
1581                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1582                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1583                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1584                 break;
1585
1586         case CS_RESET:
1587                 DEBUG2(printk(KERN_INFO
1588                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1589                     vha->host_no, comp_status, scsi_status));
1590
1591                 cp->result = DID_RESET << 16;
1592                 break;
1593
1594         case CS_ABORTED:
1595                 /*
1596                  * hv2.19.12 - DID_ABORT does not retry the request if we
1597                  * aborted this request then abort otherwise it must be a
1598                  * reset.
1599                  */
1600                 DEBUG2(printk(KERN_INFO
1601                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1602                     vha->host_no, comp_status, scsi_status));
1603
1604                 cp->result = DID_RESET << 16;
1605                 break;
1606
1607         case CS_TIMEOUT:
1608                 /*
1609                  * We are going to have the fc class block the rport
1610                  * while we try to recover so instruct the mid layer
1611                  * to requeue until the class decides how to handle this.
1612                  */
1613                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1614
1615                 if (IS_FWI2_CAPABLE(ha)) {
1616                         DEBUG2(printk(KERN_INFO
1617                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1618                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1619                             cp->device->id, cp->device->lun, comp_status,
1620                             scsi_status));
1621                         break;
1622                 }
1623                 DEBUG2(printk(KERN_INFO
1624                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1625                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1626                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1627                     le16_to_cpu(sts->status_flags)));
1628
1629                 /* Check to see if logout occurred. */
1630                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1631                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1632                 break;
1633
1634         default:
1635                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1636                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1637                 qla_printk(KERN_INFO, ha,
1638                     "Unknown status detected 0x%x-0x%x.\n",
1639                     comp_status, scsi_status);
1640
1641                 cp->result = DID_ERROR << 16;
1642                 break;
1643         }
1644
1645         /* Place command on done queue. */
1646         if (rsp->status_srb == NULL)
1647                 qla2x00_sp_compl(ha, sp);
1648 }
1649
1650 /**
1651  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1652  * @ha: SCSI driver HA context
1653  * @pkt: Entry pointer
1654  *
1655  * Extended sense data.
1656  */
1657 static void
1658 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1659 {
1660         uint8_t         sense_sz = 0;
1661         struct qla_hw_data *ha = rsp->hw;
1662         srb_t           *sp = rsp->status_srb;
1663         struct scsi_cmnd *cp;
1664
1665         if (sp != NULL && sp->request_sense_length != 0) {
1666                 cp = sp->cmd;
1667                 if (cp == NULL) {
1668                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1669                             "sp=%p.\n", __func__, sp));
1670                         qla_printk(KERN_INFO, ha,
1671                             "cmd is NULL: already returned to OS (sp=%p)\n",
1672                             sp);
1673
1674                         rsp->status_srb = NULL;
1675                         return;
1676                 }
1677
1678                 if (sp->request_sense_length > sizeof(pkt->data)) {
1679                         sense_sz = sizeof(pkt->data);
1680                 } else {
1681                         sense_sz = sp->request_sense_length;
1682                 }
1683
1684                 /* Move sense data. */
1685                 if (IS_FWI2_CAPABLE(ha))
1686                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1687                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1688                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1689
1690                 sp->request_sense_ptr += sense_sz;
1691                 sp->request_sense_length -= sense_sz;
1692
1693                 /* Place command on done queue. */
1694                 if (sp->request_sense_length == 0) {
1695                         rsp->status_srb = NULL;
1696                         qla2x00_sp_compl(ha, sp);
1697                 }
1698         }
1699 }
1700
1701 /**
1702  * qla2x00_error_entry() - Process an error entry.
1703  * @ha: SCSI driver HA context
1704  * @pkt: Entry pointer
1705  */
1706 static void
1707 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1708 {
1709         srb_t *sp;
1710         struct qla_hw_data *ha = vha->hw;
1711         uint32_t handle = LSW(pkt->handle);
1712         uint16_t que = MSW(pkt->handle);
1713         struct req_que *req = ha->req_q_map[que];
1714 #if defined(QL_DEBUG_LEVEL_2)
1715         if (pkt->entry_status & RF_INV_E_ORDER)
1716                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1717         else if (pkt->entry_status & RF_INV_E_COUNT)
1718                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1719         else if (pkt->entry_status & RF_INV_E_PARAM)
1720                 qla_printk(KERN_ERR, ha,
1721                     "%s: Invalid Entry Parameter\n", __func__);
1722         else if (pkt->entry_status & RF_INV_E_TYPE)
1723                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1724         else if (pkt->entry_status & RF_BUSY)
1725                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1726         else
1727                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1728 #endif
1729
1730         /* Validate handle. */
1731         if (handle < MAX_OUTSTANDING_COMMANDS)
1732                 sp = req->outstanding_cmds[handle];
1733         else
1734                 sp = NULL;
1735
1736         if (sp) {
1737                 /* Free outstanding command slot. */
1738                 req->outstanding_cmds[handle] = NULL;
1739
1740                 /* Bad payload or header */
1741                 if (pkt->entry_status &
1742                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1743                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1744                         sp->cmd->result = DID_ERROR << 16;
1745                 } else if (pkt->entry_status & RF_BUSY) {
1746                         sp->cmd->result = DID_BUS_BUSY << 16;
1747                 } else {
1748                         sp->cmd->result = DID_ERROR << 16;
1749                 }
1750                 qla2x00_sp_compl(ha, sp);
1751
1752         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1753             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1754                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1755                     vha->host_no));
1756                 qla_printk(KERN_WARNING, ha,
1757                     "Error entry - invalid handle\n");
1758
1759                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1760                 qla2xxx_wake_dpc(vha);
1761         }
1762 }
1763
1764 /**
1765  * qla24xx_mbx_completion() - Process mailbox command completions.
1766  * @ha: SCSI driver HA context
1767  * @mb0: Mailbox0 register
1768  */
1769 static void
1770 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1771 {
1772         uint16_t        cnt;
1773         uint16_t __iomem *wptr;
1774         struct qla_hw_data *ha = vha->hw;
1775         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1776
1777         /* Load return mailbox registers. */
1778         ha->flags.mbox_int = 1;
1779         ha->mailbox_out[0] = mb0;
1780         wptr = (uint16_t __iomem *)&reg->mailbox1;
1781
1782         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1783                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1784                 wptr++;
1785         }
1786
1787         if (ha->mcp) {
1788                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1789                     __func__, vha->host_no, ha->mcp->mb[0]));
1790         } else {
1791                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1792                     __func__, vha->host_no));
1793         }
1794 }
1795
1796 /**
1797  * qla24xx_process_response_queue() - Process response queue entries.
1798  * @ha: SCSI driver HA context
1799  */
1800 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1801         struct rsp_que *rsp)
1802 {
1803         struct sts_entry_24xx *pkt;
1804
1805         if (!vha->flags.online)
1806                 return;
1807
1808         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1809                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1810
1811                 rsp->ring_index++;
1812                 if (rsp->ring_index == rsp->length) {
1813                         rsp->ring_index = 0;
1814                         rsp->ring_ptr = rsp->ring;
1815                 } else {
1816                         rsp->ring_ptr++;
1817                 }
1818
1819                 if (pkt->entry_status != 0) {
1820                         DEBUG3(printk(KERN_INFO
1821                             "scsi(%ld): Process error entry.\n", vha->host_no));
1822
1823                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1824                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1825                         wmb();
1826                         continue;
1827                 }
1828
1829                 switch (pkt->entry_type) {
1830                 case STATUS_TYPE:
1831                         qla2x00_status_entry(vha, rsp, pkt);
1832                         break;
1833                 case STATUS_CONT_TYPE:
1834                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1835                         break;
1836                 case VP_RPT_ID_IOCB_TYPE:
1837                         qla24xx_report_id_acquisition(vha,
1838                             (struct vp_rpt_id_entry_24xx *)pkt);
1839                         break;
1840                 case LOGINOUT_PORT_IOCB_TYPE:
1841                         qla24xx_logio_entry(vha, rsp->req,
1842                             (struct logio_entry_24xx *)pkt);
1843                         break;
1844                 default:
1845                         /* Type Not Supported. */
1846                         DEBUG4(printk(KERN_WARNING
1847                             "scsi(%ld): Received unknown response pkt type %x "
1848                             "entry status=%x.\n",
1849                             vha->host_no, pkt->entry_type, pkt->entry_status));
1850                         break;
1851                 }
1852                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1853                 wmb();
1854         }
1855
1856         /* Adjust ring index */
1857         WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
1858 }
1859
1860 static void
1861 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1862 {
1863         int rval;
1864         uint32_t cnt;
1865         struct qla_hw_data *ha = vha->hw;
1866         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1867
1868         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1869                 return;
1870
1871         rval = QLA_SUCCESS;
1872         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1873         RD_REG_DWORD(&reg->iobase_addr);
1874         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1875         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1876             rval == QLA_SUCCESS; cnt--) {
1877                 if (cnt) {
1878                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1879                         udelay(10);
1880                 } else
1881                         rval = QLA_FUNCTION_TIMEOUT;
1882         }
1883         if (rval == QLA_SUCCESS)
1884                 goto next_test;
1885
1886         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1887         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1888             rval == QLA_SUCCESS; cnt--) {
1889                 if (cnt) {
1890                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1891                         udelay(10);
1892                 } else
1893                         rval = QLA_FUNCTION_TIMEOUT;
1894         }
1895         if (rval != QLA_SUCCESS)
1896                 goto done;
1897
1898 next_test:
1899         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1900                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1901
1902 done:
1903         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1904         RD_REG_DWORD(&reg->iobase_window);
1905 }
1906
1907 /**
1908  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1909  * @irq:
1910  * @dev_id: SCSI driver HA context
1911  *
1912  * Called by system whenever the host adapter generates an interrupt.
1913  *
1914  * Returns handled flag.
1915  */
1916 irqreturn_t
1917 qla24xx_intr_handler(int irq, void *dev_id)
1918 {
1919         scsi_qla_host_t *vha;
1920         struct qla_hw_data *ha;
1921         struct device_reg_24xx __iomem *reg;
1922         int             status;
1923         unsigned long   iter;
1924         uint32_t        stat;
1925         uint32_t        hccr;
1926         uint16_t        mb[4];
1927         struct rsp_que *rsp;
1928         unsigned long   flags;
1929
1930         rsp = (struct rsp_que *) dev_id;
1931         if (!rsp) {
1932                 printk(KERN_INFO
1933                     "%s(): NULL response queue pointer\n", __func__);
1934                 return IRQ_NONE;
1935         }
1936
1937         ha = rsp->hw;
1938         reg = &ha->iobase->isp24;
1939         status = 0;
1940
1941         spin_lock_irqsave(&ha->hardware_lock, flags);
1942         vha = pci_get_drvdata(ha->pdev);
1943         for (iter = 50; iter--; ) {
1944                 stat = RD_REG_DWORD(&reg->host_status);
1945                 if (stat & HSRX_RISC_PAUSED) {
1946                         if (pci_channel_offline(ha->pdev))
1947                                 break;
1948
1949                         hccr = RD_REG_DWORD(&reg->hccr);
1950
1951                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1952                             "Dumping firmware!\n", hccr);
1953
1954                         qla2xxx_check_risc_status(vha);
1955
1956                         ha->isp_ops->fw_dump(vha, 1);
1957                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1958                         break;
1959                 } else if ((stat & HSRX_RISC_INT) == 0)
1960                         break;
1961
1962                 switch (stat & 0xff) {
1963                 case 0x1:
1964                 case 0x2:
1965                 case 0x10:
1966                 case 0x11:
1967                         qla24xx_mbx_completion(vha, MSW(stat));
1968                         status |= MBX_INTERRUPT;
1969
1970                         break;
1971                 case 0x12:
1972                         mb[0] = MSW(stat);
1973                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1974                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1975                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1976                         qla2x00_async_event(vha, rsp, mb);
1977                         break;
1978                 case 0x13:
1979                 case 0x14:
1980                         qla24xx_process_response_queue(vha, rsp);
1981                         break;
1982                 default:
1983                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1984                             "(%d).\n",
1985                             vha->host_no, stat & 0xff));
1986                         break;
1987                 }
1988                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1989                 RD_REG_DWORD_RELAXED(&reg->hccr);
1990         }
1991         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1992
1993         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1994             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1995                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1996                 complete(&ha->mbx_intr_comp);
1997         }
1998
1999         return IRQ_HANDLED;
2000 }
2001
2002 static irqreturn_t
2003 qla24xx_msix_rsp_q(int irq, void *dev_id)
2004 {
2005         struct qla_hw_data *ha;
2006         struct rsp_que *rsp;
2007         struct device_reg_24xx __iomem *reg;
2008         struct scsi_qla_host *vha;
2009
2010         rsp = (struct rsp_que *) dev_id;
2011         if (!rsp) {
2012                 printk(KERN_INFO
2013                 "%s(): NULL response queue pointer\n", __func__);
2014                 return IRQ_NONE;
2015         }
2016         ha = rsp->hw;
2017         reg = &ha->iobase->isp24;
2018
2019         spin_lock_irq(&ha->hardware_lock);
2020
2021         vha = qla25xx_get_host(rsp);
2022         qla24xx_process_response_queue(vha, rsp);
2023         if (!ha->mqenable) {
2024                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025                 RD_REG_DWORD_RELAXED(&reg->hccr);
2026         }
2027         spin_unlock_irq(&ha->hardware_lock);
2028
2029         return IRQ_HANDLED;
2030 }
2031
2032 static irqreturn_t
2033 qla25xx_msix_rsp_q(int irq, void *dev_id)
2034 {
2035         struct qla_hw_data *ha;
2036         struct rsp_que *rsp;
2037
2038         rsp = (struct rsp_que *) dev_id;
2039         if (!rsp) {
2040                 printk(KERN_INFO
2041                         "%s(): NULL response queue pointer\n", __func__);
2042                 return IRQ_NONE;
2043         }
2044         ha = rsp->hw;
2045
2046         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2047
2048         return IRQ_HANDLED;
2049 }
2050
2051 static irqreturn_t
2052 qla24xx_msix_default(int irq, void *dev_id)
2053 {
2054         scsi_qla_host_t *vha;
2055         struct qla_hw_data *ha;
2056         struct rsp_que *rsp;
2057         struct device_reg_24xx __iomem *reg;
2058         int             status;
2059         uint32_t        stat;
2060         uint32_t        hccr;
2061         uint16_t        mb[4];
2062
2063         rsp = (struct rsp_que *) dev_id;
2064         if (!rsp) {
2065                 DEBUG(printk(
2066                 "%s(): NULL response queue pointer\n", __func__));
2067                 return IRQ_NONE;
2068         }
2069         ha = rsp->hw;
2070         reg = &ha->iobase->isp24;
2071         status = 0;
2072
2073         spin_lock_irq(&ha->hardware_lock);
2074         vha = pci_get_drvdata(ha->pdev);
2075         do {
2076                 stat = RD_REG_DWORD(&reg->host_status);
2077                 if (stat & HSRX_RISC_PAUSED) {
2078                         if (pci_channel_offline(ha->pdev))
2079                                 break;
2080
2081                         hccr = RD_REG_DWORD(&reg->hccr);
2082
2083                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2084                             "Dumping firmware!\n", hccr);
2085
2086                         qla2xxx_check_risc_status(vha);
2087
2088                         ha->isp_ops->fw_dump(vha, 1);
2089                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2090                         break;
2091                 } else if ((stat & HSRX_RISC_INT) == 0)
2092                         break;
2093
2094                 switch (stat & 0xff) {
2095                 case 0x1:
2096                 case 0x2:
2097                 case 0x10:
2098                 case 0x11:
2099                         qla24xx_mbx_completion(vha, MSW(stat));
2100                         status |= MBX_INTERRUPT;
2101
2102                         break;
2103                 case 0x12:
2104                         mb[0] = MSW(stat);
2105                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2106                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2107                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2108                         qla2x00_async_event(vha, rsp, mb);
2109                         break;
2110                 case 0x13:
2111                 case 0x14:
2112                         qla24xx_process_response_queue(vha, rsp);
2113                         break;
2114                 default:
2115                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2116                             "(%d).\n",
2117                             vha->host_no, stat & 0xff));
2118                         break;
2119                 }
2120                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2121         } while (0);
2122         spin_unlock_irq(&ha->hardware_lock);
2123
2124         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2125             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2126                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2127                 complete(&ha->mbx_intr_comp);
2128         }
2129
2130         return IRQ_HANDLED;
2131 }
2132
2133 /* Interrupt handling helpers. */
2134
2135 struct qla_init_msix_entry {
2136         const char *name;
2137         irq_handler_t handler;
2138 };
2139
2140 static struct qla_init_msix_entry msix_entries[3] = {
2141         { "qla2xxx (default)", qla24xx_msix_default },
2142         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2143         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2144 };
2145
2146 static void
2147 qla24xx_disable_msix(struct qla_hw_data *ha)
2148 {
2149         int i;
2150         struct qla_msix_entry *qentry;
2151
2152         for (i = 0; i < ha->msix_count; i++) {
2153                 qentry = &ha->msix_entries[i];
2154                 if (qentry->have_irq)
2155                         free_irq(qentry->vector, qentry->rsp);
2156         }
2157         pci_disable_msix(ha->pdev);
2158         kfree(ha->msix_entries);
2159         ha->msix_entries = NULL;
2160         ha->flags.msix_enabled = 0;
2161 }
2162
2163 static int
2164 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2165 {
2166 #define MIN_MSIX_COUNT  2
2167         int i, ret;
2168         struct msix_entry *entries;
2169         struct qla_msix_entry *qentry;
2170
2171         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2172                                         GFP_KERNEL);
2173         if (!entries)
2174                 return -ENOMEM;
2175
2176         for (i = 0; i < ha->msix_count; i++)
2177                 entries[i].entry = i;
2178
2179         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2180         if (ret) {
2181                 if (ret < MIN_MSIX_COUNT)
2182                         goto msix_failed;
2183
2184                 qla_printk(KERN_WARNING, ha,
2185                         "MSI-X: Failed to enable support -- %d/%d\n"
2186                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2187                 ha->msix_count = ret;
2188                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2189                 if (ret) {
2190 msix_failed:
2191                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2192                                 " support, giving up -- %d/%d\n",
2193                                 ha->msix_count, ret);
2194                         goto msix_out;
2195                 }
2196                 ha->max_rsp_queues = ha->msix_count - 1;
2197         }
2198         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2199                                 ha->msix_count, GFP_KERNEL);
2200         if (!ha->msix_entries) {
2201                 ret = -ENOMEM;
2202                 goto msix_out;
2203         }
2204         ha->flags.msix_enabled = 1;
2205
2206         for (i = 0; i < ha->msix_count; i++) {
2207                 qentry = &ha->msix_entries[i];
2208                 qentry->vector = entries[i].vector;
2209                 qentry->entry = entries[i].entry;
2210                 qentry->have_irq = 0;
2211                 qentry->rsp = NULL;
2212         }
2213
2214         /* Enable MSI-X vectors for the base queue */
2215         for (i = 0; i < 2; i++) {
2216                 qentry = &ha->msix_entries[i];
2217                 ret = request_irq(qentry->vector, msix_entries[i].handler,
2218                                         0, msix_entries[i].name, rsp);
2219                 if (ret) {
2220                         qla_printk(KERN_WARNING, ha,
2221                         "MSI-X: Unable to register handler -- %x/%d.\n",
2222                         qentry->vector, ret);
2223                         qla24xx_disable_msix(ha);
2224                         ha->mqenable = 0;
2225                         goto msix_out;
2226                 }
2227                 qentry->have_irq = 1;
2228                 qentry->rsp = rsp;
2229                 rsp->msix = qentry;
2230         }
2231
2232         /* Enable MSI-X vector for response queue update for queue 0 */
2233         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2234                 ha->mqenable = 1;
2235
2236 msix_out:
2237         kfree(entries);
2238         return ret;
2239 }
2240
2241 int
2242 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2243 {
2244         int ret;
2245         device_reg_t __iomem *reg = ha->iobase;
2246
2247         /* If possible, enable MSI-X. */
2248         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2249             !IS_QLA8432(ha) && !IS_QLA8001(ha))
2250                 goto skip_msix;
2251
2252         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2253                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2254                 DEBUG2(qla_printk(KERN_WARNING, ha,
2255                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2256                         ha->pdev->revision, ha->fw_attributes));
2257
2258                 goto skip_msix;
2259         }
2260
2261         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2262             (ha->pdev->subsystem_device == 0x7040 ||
2263                 ha->pdev->subsystem_device == 0x7041 ||
2264                 ha->pdev->subsystem_device == 0x1705)) {
2265                 DEBUG2(qla_printk(KERN_WARNING, ha,
2266                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
2267                     ha->pdev->subsystem_vendor,
2268                     ha->pdev->subsystem_device));
2269
2270                 goto skip_msi;
2271         }
2272
2273         ret = qla24xx_enable_msix(ha, rsp);
2274         if (!ret) {
2275                 DEBUG2(qla_printk(KERN_INFO, ha,
2276                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2277                     ha->fw_attributes));
2278                 goto clear_risc_ints;
2279         }
2280         qla_printk(KERN_WARNING, ha,
2281             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2282 skip_msix:
2283
2284         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2285             !IS_QLA8001(ha))
2286                 goto skip_msi;
2287
2288         ret = pci_enable_msi(ha->pdev);
2289         if (!ret) {
2290                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2291                 ha->flags.msi_enabled = 1;
2292         }
2293 skip_msi:
2294
2295         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2296             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2297         if (ret) {
2298                 qla_printk(KERN_WARNING, ha,
2299                     "Failed to reserve interrupt %d already in use.\n",
2300                     ha->pdev->irq);
2301                 goto fail;
2302         }
2303         ha->flags.inta_enabled = 1;
2304 clear_risc_ints:
2305
2306         /*
2307          * FIXME: Noted that 8014s were being dropped during NK testing.
2308          * Timing deltas during MSI-X/INTa transitions?
2309          */
2310         if (IS_QLA81XX(ha))
2311                 goto fail;
2312         spin_lock_irq(&ha->hardware_lock);
2313         if (IS_FWI2_CAPABLE(ha)) {
2314                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2315                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2316         } else {
2317                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2318                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2319                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2320         }
2321         spin_unlock_irq(&ha->hardware_lock);
2322
2323 fail:
2324         return ret;
2325 }
2326
2327 void
2328 qla2x00_free_irqs(scsi_qla_host_t *vha)
2329 {
2330         struct qla_hw_data *ha = vha->hw;
2331         struct rsp_que *rsp = ha->rsp_q_map[0];
2332
2333         if (ha->flags.msix_enabled)
2334                 qla24xx_disable_msix(ha);
2335         else if (ha->flags.inta_enabled) {
2336                 free_irq(ha->pdev->irq, rsp);
2337                 pci_disable_msi(ha->pdev);
2338         }
2339 }
2340
2341
2342 int qla25xx_request_irq(struct rsp_que *rsp)
2343 {
2344         struct qla_hw_data *ha = rsp->hw;
2345         struct qla_init_msix_entry *intr = &msix_entries[2];
2346         struct qla_msix_entry *msix = rsp->msix;
2347         int ret;
2348
2349         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2350         if (ret) {
2351                 qla_printk(KERN_WARNING, ha,
2352                         "MSI-X: Unable to register handler -- %x/%d.\n",
2353                         msix->vector, ret);
2354                 return ret;
2355         }
2356         msix->have_irq = 1;
2357         msix->rsp = rsp;
2358         return ret;
2359 }
2360
2361 struct scsi_qla_host *
2362 qla25xx_get_host(struct rsp_que *rsp)
2363 {
2364         srb_t *sp;
2365         struct qla_hw_data *ha = rsp->hw;
2366         struct scsi_qla_host *vha = NULL;
2367         struct sts_entry_24xx *pkt;
2368         struct req_que *req;
2369         uint16_t que;
2370         uint32_t handle;
2371
2372         pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2373         que = MSW(pkt->handle);
2374         handle = (uint32_t) LSW(pkt->handle);
2375         req = ha->req_q_map[que];
2376         if (handle < MAX_OUTSTANDING_COMMANDS) {
2377                 sp = req->outstanding_cmds[handle];
2378                 if (sp)
2379                         return  sp->fcport->vha;
2380                 else
2381                         goto base_que;
2382         }
2383 base_que:
2384         vha = pci_get_drvdata(ha->pdev);
2385         return vha;
2386 }