]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: Further limit device-table (qla_devtbl) lookup to non-24xx.
[mv-sheeva.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19
20 /**
21  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
22  * @irq:
23  * @dev_id: SCSI driver HA context
24  *
25  * Called by system whenever the host adapter generates an interrupt.
26  *
27  * Returns handled flag.
28  */
29 irqreturn_t
30 qla2100_intr_handler(int irq, void *dev_id)
31 {
32         scsi_qla_host_t *vha;
33         struct qla_hw_data *ha;
34         struct device_reg_2xxx __iomem *reg;
35         int             status;
36         unsigned long   iter;
37         uint16_t        hccr;
38         uint16_t        mb[4];
39         struct rsp_que *rsp;
40         unsigned long   flags;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock_irqsave(&ha->hardware_lock, flags);
54         vha = pci_get_drvdata(ha->pdev);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock_irqrestore(&ha->hardware_lock, flags);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137         unsigned long   flags;
138
139         rsp = (struct rsp_que *) dev_id;
140         if (!rsp) {
141                 printk(KERN_INFO
142                     "%s(): NULL response queue pointer\n", __func__);
143                 return (IRQ_NONE);
144         }
145
146         ha = rsp->hw;
147         reg = &ha->iobase->isp;
148         status = 0;
149
150         spin_lock_irqsave(&ha->hardware_lock, flags);
151         vha = pci_get_drvdata(ha->pdev);
152         for (iter = 50; iter--; ) {
153                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154                 if (stat & HSR_RISC_PAUSED) {
155                         if (pci_channel_offline(ha->pdev))
156                                 break;
157
158                         hccr = RD_REG_WORD(&reg->hccr);
159                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
160                                 qla_printk(KERN_INFO, ha, "Parity error -- "
161                                     "HCCR=%x, Dumping firmware!\n", hccr);
162                         else
163                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165
166                         /*
167                          * Issue a "HARD" reset in order for the RISC
168                          * interrupt bit to be cleared.  Schedule a big
169                          * hammmer to get out of the RISC PAUSED state.
170                          */
171                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
172                         RD_REG_WORD(&reg->hccr);
173
174                         ha->isp_ops->fw_dump(vha, 1);
175                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
176                         break;
177                 } else if ((stat & HSR_RISC_INT) == 0)
178                         break;
179
180                 switch (stat & 0xff) {
181                 case 0x1:
182                 case 0x2:
183                 case 0x10:
184                 case 0x11:
185                         qla2x00_mbx_completion(vha, MSW(stat));
186                         status |= MBX_INTERRUPT;
187
188                         /* Release mailbox registers. */
189                         WRT_REG_WORD(&reg->semaphore, 0);
190                         break;
191                 case 0x12:
192                         mb[0] = MSW(stat);
193                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
194                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
195                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
196                         qla2x00_async_event(vha, rsp, mb);
197                         break;
198                 case 0x13:
199                         qla2x00_process_response_queue(rsp);
200                         break;
201                 case 0x15:
202                         mb[0] = MBA_CMPLT_1_16BIT;
203                         mb[1] = MSW(stat);
204                         qla2x00_async_event(vha, rsp, mb);
205                         break;
206                 case 0x16:
207                         mb[0] = MBA_SCSI_COMPLETION;
208                         mb[1] = MSW(stat);
209                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
210                         qla2x00_async_event(vha, rsp, mb);
211                         break;
212                 default:
213                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
214                             "(%d).\n",
215                             vha->host_no, stat & 0xff));
216                         break;
217                 }
218                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
219                 RD_REG_WORD_RELAXED(&reg->hccr);
220         }
221         spin_unlock_irqrestore(&ha->hardware_lock, flags);
222
223         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
224             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
225                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
226                 complete(&ha->mbx_intr_comp);
227         }
228
229         return (IRQ_HANDLED);
230 }
231
232 /**
233  * qla2x00_mbx_completion() - Process mailbox command completions.
234  * @ha: SCSI driver HA context
235  * @mb0: Mailbox0 register
236  */
237 static void
238 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
239 {
240         uint16_t        cnt;
241         uint16_t __iomem *wptr;
242         struct qla_hw_data *ha = vha->hw;
243         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
244
245         /* Load return mailbox registers. */
246         ha->flags.mbox_int = 1;
247         ha->mailbox_out[0] = mb0;
248         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
249
250         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
251                 if (IS_QLA2200(ha) && cnt == 8)
252                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
253                 if (cnt == 4 || cnt == 5)
254                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
255                 else
256                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
257
258                 wptr++;
259         }
260
261         if (ha->mcp) {
262                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
263                     __func__, vha->host_no, ha->mcp->mb[0]));
264         } else {
265                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
266                     __func__, vha->host_no));
267         }
268 }
269
270 static void
271 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
272 {
273         static char *event[] =
274                 { "Complete", "Request Notification", "Time Extension" };
275         int rval;
276         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
277         uint16_t __iomem *wptr;
278         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
279
280         /* Seed data -- mailbox1 -> mailbox7. */
281         wptr = (uint16_t __iomem *)&reg24->mailbox1;
282         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
283                 mb[cnt] = RD_REG_WORD(wptr);
284
285         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
286             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
287             event[aen & 0xff],
288             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
289
290         /* Acknowledgement needed? [Notify && non-zero timeout]. */
291         timeout = (descr >> 8) & 0xf;
292         if (aen != MBA_IDC_NOTIFY || !timeout)
293                 return;
294
295         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
296             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
297
298         rval = qla2x00_post_idc_ack_work(vha, mb);
299         if (rval != QLA_SUCCESS)
300                 qla_printk(KERN_WARNING, vha->hw,
301                     "IDC failed to post ACK.\n");
302 }
303
304 /**
305  * qla2x00_async_event() - Process aynchronous events.
306  * @ha: SCSI driver HA context
307  * @mb: Mailbox registers (0 - 3)
308  */
309 void
310 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
311 {
312 #define LS_UNKNOWN      2
313         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
314         char            *link_speed;
315         uint16_t        handle_cnt;
316         uint16_t        cnt;
317         uint32_t        handles[5];
318         struct qla_hw_data *ha = vha->hw;
319         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
320         uint32_t        rscn_entry, host_pid;
321         uint8_t         rscn_queue_index;
322         unsigned long   flags;
323
324         /* Setup to process RIO completion. */
325         handle_cnt = 0;
326         if (IS_QLA81XX(ha))
327                 goto skip_rio;
328         switch (mb[0]) {
329         case MBA_SCSI_COMPLETION:
330                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
331                 handle_cnt = 1;
332                 break;
333         case MBA_CMPLT_1_16BIT:
334                 handles[0] = mb[1];
335                 handle_cnt = 1;
336                 mb[0] = MBA_SCSI_COMPLETION;
337                 break;
338         case MBA_CMPLT_2_16BIT:
339                 handles[0] = mb[1];
340                 handles[1] = mb[2];
341                 handle_cnt = 2;
342                 mb[0] = MBA_SCSI_COMPLETION;
343                 break;
344         case MBA_CMPLT_3_16BIT:
345                 handles[0] = mb[1];
346                 handles[1] = mb[2];
347                 handles[2] = mb[3];
348                 handle_cnt = 3;
349                 mb[0] = MBA_SCSI_COMPLETION;
350                 break;
351         case MBA_CMPLT_4_16BIT:
352                 handles[0] = mb[1];
353                 handles[1] = mb[2];
354                 handles[2] = mb[3];
355                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
356                 handle_cnt = 4;
357                 mb[0] = MBA_SCSI_COMPLETION;
358                 break;
359         case MBA_CMPLT_5_16BIT:
360                 handles[0] = mb[1];
361                 handles[1] = mb[2];
362                 handles[2] = mb[3];
363                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
364                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
365                 handle_cnt = 5;
366                 mb[0] = MBA_SCSI_COMPLETION;
367                 break;
368         case MBA_CMPLT_2_32BIT:
369                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
370                 handles[1] = le32_to_cpu(
371                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
372                     RD_MAILBOX_REG(ha, reg, 6));
373                 handle_cnt = 2;
374                 mb[0] = MBA_SCSI_COMPLETION;
375                 break;
376         default:
377                 break;
378         }
379 skip_rio:
380         switch (mb[0]) {
381         case MBA_SCSI_COMPLETION:       /* Fast Post */
382                 if (!vha->flags.online)
383                         break;
384
385                 for (cnt = 0; cnt < handle_cnt; cnt++)
386                         qla2x00_process_completed_request(vha, rsp->req,
387                                 handles[cnt]);
388                 break;
389
390         case MBA_RESET:                 /* Reset */
391                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
392                         vha->host_no));
393
394                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
395                 break;
396
397         case MBA_SYSTEM_ERR:            /* System Error */
398                 qla_printk(KERN_INFO, ha,
399                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
400                     mb[1], mb[2], mb[3]);
401
402                 ha->isp_ops->fw_dump(vha, 1);
403
404                 if (IS_FWI2_CAPABLE(ha)) {
405                         if (mb[1] == 0 && mb[2] == 0) {
406                                 qla_printk(KERN_ERR, ha,
407                                     "Unrecoverable Hardware Error: adapter "
408                                     "marked OFFLINE!\n");
409                                 vha->flags.online = 0;
410                         } else
411                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
412                 } else if (mb[1] == 0) {
413                         qla_printk(KERN_INFO, ha,
414                             "Unrecoverable Hardware Error: adapter marked "
415                             "OFFLINE!\n");
416                         vha->flags.online = 0;
417                 } else
418                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
419                 break;
420
421         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
422                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
423                     vha->host_no));
424                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
425
426                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
427                 break;
428
429         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
430                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
431                     vha->host_no));
432                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
433
434                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
435                 break;
436
437         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
438                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
439                     vha->host_no));
440                 break;
441
442         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
443                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
444                     mb[1]));
445                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
446
447                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
448                         atomic_set(&vha->loop_state, LOOP_DOWN);
449                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
450                         qla2x00_mark_all_devices_lost(vha, 1);
451                 }
452
453                 if (vha->vp_idx) {
454                         atomic_set(&vha->vp_state, VP_FAILED);
455                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
456                 }
457
458                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
459                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
460
461                 vha->flags.management_server_logged_in = 0;
462                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
463                 break;
464
465         case MBA_LOOP_UP:               /* Loop Up Event */
466                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
467                         link_speed = link_speeds[0];
468                         ha->link_data_rate = PORT_SPEED_1GB;
469                 } else {
470                         link_speed = link_speeds[LS_UNKNOWN];
471                         if (mb[1] < 5)
472                                 link_speed = link_speeds[mb[1]];
473                         else if (mb[1] == 0x13)
474                                 link_speed = link_speeds[5];
475                         ha->link_data_rate = mb[1];
476                 }
477
478                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
479                     vha->host_no, link_speed));
480                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
481                     link_speed);
482
483                 vha->flags.management_server_logged_in = 0;
484                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
485                 break;
486
487         case MBA_LOOP_DOWN:             /* Loop Down Event */
488                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
489                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
490                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
491                     mb[1], mb[2], mb[3]);
492
493                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
494                         atomic_set(&vha->loop_state, LOOP_DOWN);
495                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
496                         vha->device_flags |= DFLG_NO_CABLE;
497                         qla2x00_mark_all_devices_lost(vha, 1);
498                 }
499
500                 if (vha->vp_idx) {
501                         atomic_set(&vha->vp_state, VP_FAILED);
502                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
503                 }
504
505                 vha->flags.management_server_logged_in = 0;
506                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
507                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
508                 break;
509
510         case MBA_LIP_RESET:             /* LIP reset occurred */
511                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
512                     vha->host_no, mb[1]));
513                 qla_printk(KERN_INFO, ha,
514                     "LIP reset occurred (%x).\n", mb[1]);
515
516                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
517                         atomic_set(&vha->loop_state, LOOP_DOWN);
518                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
519                         qla2x00_mark_all_devices_lost(vha, 1);
520                 }
521
522                 if (vha->vp_idx) {
523                         atomic_set(&vha->vp_state, VP_FAILED);
524                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
525                 }
526
527                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
528
529                 ha->operating_mode = LOOP;
530                 vha->flags.management_server_logged_in = 0;
531                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
532                 break;
533
534         /* case MBA_DCBX_COMPLETE: */
535         case MBA_POINT_TO_POINT:        /* Point-to-Point */
536                 if (IS_QLA2100(ha))
537                         break;
538
539                 if (IS_QLA81XX(ha))
540                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
541                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
542                 else
543                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
544                             "received.\n", vha->host_no));
545
546                 /*
547                  * Until there's a transition from loop down to loop up, treat
548                  * this as loop down only.
549                  */
550                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
551                         atomic_set(&vha->loop_state, LOOP_DOWN);
552                         if (!atomic_read(&vha->loop_down_timer))
553                                 atomic_set(&vha->loop_down_timer,
554                                     LOOP_DOWN_TIME);
555                         qla2x00_mark_all_devices_lost(vha, 1);
556                 }
557
558                 if (vha->vp_idx) {
559                         atomic_set(&vha->vp_state, VP_FAILED);
560                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
561                 }
562
563                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
564                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
565
566                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
567                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
568
569                 ha->flags.gpsc_supported = 1;
570                 vha->flags.management_server_logged_in = 0;
571                 break;
572
573         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
574                 if (IS_QLA2100(ha))
575                         break;
576
577                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
578                     "received.\n",
579                     vha->host_no));
580                 qla_printk(KERN_INFO, ha,
581                     "Configuration change detected: value=%x.\n", mb[1]);
582
583                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
584                         atomic_set(&vha->loop_state, LOOP_DOWN);
585                         if (!atomic_read(&vha->loop_down_timer))
586                                 atomic_set(&vha->loop_down_timer,
587                                     LOOP_DOWN_TIME);
588                         qla2x00_mark_all_devices_lost(vha, 1);
589                 }
590
591                 if (vha->vp_idx) {
592                         atomic_set(&vha->vp_state, VP_FAILED);
593                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
594                 }
595
596                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
597                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
598                 break;
599
600         case MBA_PORT_UPDATE:           /* Port database update */
601                 /*
602                  * Handle only global and vn-port update events
603                  *
604                  * Relevant inputs:
605                  * mb[1] = N_Port handle of changed port
606                  * OR 0xffff for global event
607                  * mb[2] = New login state
608                  * 7 = Port logged out
609                  * mb[3] = LSB is vp_idx, 0xff = all vps
610                  *
611                  * Skip processing if:
612                  *       Event is global, vp_idx is NOT all vps,
613                  *           vp_idx does not match
614                  *       Event is not global, vp_idx does not match
615                  */
616                 if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
617                         || (mb[1] != 0xffff)) {
618                         if (vha->vp_idx != (mb[3] & 0xff))
619                                 break;
620                 }
621
622                 /* Global event -- port logout or port unavailable. */
623                 if (mb[1] == 0xffff && mb[2] == 0x7) {
624                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
625                             vha->host_no));
626                         DEBUG(printk(KERN_INFO
627                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
628                             vha->host_no, mb[1], mb[2], mb[3]));
629
630                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
631                                 atomic_set(&vha->loop_state, LOOP_DOWN);
632                                 atomic_set(&vha->loop_down_timer,
633                                     LOOP_DOWN_TIME);
634                                 vha->device_flags |= DFLG_NO_CABLE;
635                                 qla2x00_mark_all_devices_lost(vha, 1);
636                         }
637
638                         if (vha->vp_idx) {
639                                 atomic_set(&vha->vp_state, VP_FAILED);
640                                 fc_vport_set_state(vha->fc_vport,
641                                     FC_VPORT_FAILED);
642                                 qla2x00_mark_all_devices_lost(vha, 1);
643                         }
644
645                         vha->flags.management_server_logged_in = 0;
646                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
647                         break;
648                 }
649
650                 /*
651                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
652                  * event etc. earlier indicating loop is down) then process
653                  * it.  Otherwise ignore it and Wait for RSCN to come in.
654                  */
655                 atomic_set(&vha->loop_down_timer, 0);
656                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
657                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
658                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
659                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
660                             mb[2], mb[3]));
661                         break;
662                 }
663
664                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
665                     vha->host_no));
666                 DEBUG(printk(KERN_INFO
667                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
668                     vha->host_no, mb[1], mb[2], mb[3]));
669
670                 /*
671                  * Mark all devices as missing so we will login again.
672                  */
673                 atomic_set(&vha->loop_state, LOOP_UP);
674
675                 qla2x00_mark_all_devices_lost(vha, 1);
676
677                 vha->flags.rscn_queue_overflow = 1;
678
679                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
680                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
681                 break;
682
683         case MBA_RSCN_UPDATE:           /* State Change Registration */
684                 /* Check if the Vport has issued a SCR */
685                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
686                         break;
687                 /* Only handle SCNs for our Vport index. */
688                 if (vha->vp_idx != (mb[3] & 0xff))
689                         break;
690                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
691                     vha->host_no));
692                 DEBUG(printk(KERN_INFO
693                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
694                     vha->host_no, mb[1], mb[2], mb[3]));
695
696                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
697                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
698                                 | vha->d_id.b.al_pa;
699                 if (rscn_entry == host_pid) {
700                         DEBUG(printk(KERN_INFO
701                             "scsi(%ld): Ignoring RSCN update to local host "
702                             "port ID (%06x)\n",
703                             vha->host_no, host_pid));
704                         break;
705                 }
706
707                 /* Ignore reserved bits from RSCN-payload. */
708                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
709                 rscn_queue_index = vha->rscn_in_ptr + 1;
710                 if (rscn_queue_index == MAX_RSCN_COUNT)
711                         rscn_queue_index = 0;
712                 if (rscn_queue_index != vha->rscn_out_ptr) {
713                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
714                         vha->rscn_in_ptr = rscn_queue_index;
715                 } else {
716                         vha->flags.rscn_queue_overflow = 1;
717                 }
718
719                 atomic_set(&vha->loop_state, LOOP_UPDATE);
720                 atomic_set(&vha->loop_down_timer, 0);
721                 vha->flags.management_server_logged_in = 0;
722
723                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
724                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
725                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
726                 break;
727
728         /* case MBA_RIO_RESPONSE: */
729         case MBA_ZIO_RESPONSE:
730                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
731                     vha->host_no));
732
733                 if (IS_FWI2_CAPABLE(ha))
734                         qla24xx_process_response_queue(vha, rsp);
735                 else
736                         qla2x00_process_response_queue(rsp);
737                 break;
738
739         case MBA_DISCARD_RND_FRAME:
740                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
741                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
742                 break;
743
744         case MBA_TRACE_NOTIFICATION:
745                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
746                 vha->host_no, mb[1], mb[2]));
747                 break;
748
749         case MBA_ISP84XX_ALERT:
750                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
751                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
752
753                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
754                 switch (mb[1]) {
755                 case A84_PANIC_RECOVERY:
756                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
757                             "%04x %04x\n", mb[2], mb[3]);
758                         break;
759                 case A84_OP_LOGIN_COMPLETE:
760                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
761                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
762                             "firmware version %x\n", ha->cs84xx->op_fw_version));
763                         break;
764                 case A84_DIAG_LOGIN_COMPLETE:
765                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
766                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
767                             "diagnostic firmware version %x\n",
768                             ha->cs84xx->diag_fw_version));
769                         break;
770                 case A84_GOLD_LOGIN_COMPLETE:
771                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
772                         ha->cs84xx->fw_update = 1;
773                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
774                             "firmware version %x\n",
775                             ha->cs84xx->gold_fw_version));
776                         break;
777                 default:
778                         qla_printk(KERN_ERR, ha,
779                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
780                             mb[1], mb[2], mb[3]);
781                 }
782                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
783                 break;
784         case MBA_DCBX_START:
785                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
786                     vha->host_no, mb[1], mb[2], mb[3]));
787                 break;
788         case MBA_DCBX_PARAM_UPDATE:
789                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
790                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
791                 break;
792         case MBA_FCF_CONF_ERR:
793                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
794                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
795                 break;
796         case MBA_IDC_COMPLETE:
797         case MBA_IDC_NOTIFY:
798         case MBA_IDC_TIME_EXT:
799                 qla81xx_idc_event(vha, mb[0], mb[1]);
800                 break;
801         }
802
803         if (!vha->vp_idx && ha->num_vhosts)
804                 qla2x00_alert_all_vps(rsp, mb);
805 }
806
807 static void
808 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
809 {
810         fc_port_t *fcport = data;
811         struct scsi_qla_host *vha = fcport->vha;
812         struct qla_hw_data *ha = vha->hw;
813         struct req_que *req = NULL;
814
815         if (!ql2xqfulltracking)
816                 return;
817
818         req = vha->req;
819         if (!req)
820                 return;
821         if (req->max_q_depth <= sdev->queue_depth)
822                 return;
823
824         if (sdev->ordered_tags)
825                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
826                     sdev->queue_depth + 1);
827         else
828                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
829                     sdev->queue_depth + 1);
830
831         fcport->last_ramp_up = jiffies;
832
833         DEBUG2(qla_printk(KERN_INFO, ha,
834             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
835             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
836             sdev->queue_depth));
837 }
838
839 static void
840 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
841 {
842         fc_port_t *fcport = data;
843
844         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
845                 return;
846
847         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
848             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
849             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
850             sdev->queue_depth));
851 }
852
853 static inline void
854 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
855                                                                 srb_t *sp)
856 {
857         fc_port_t *fcport;
858         struct scsi_device *sdev;
859
860         if (!ql2xqfulltracking)
861                 return;
862
863         sdev = sp->cmd->device;
864         if (sdev->queue_depth >= req->max_q_depth)
865                 return;
866
867         fcport = sp->fcport;
868         if (time_before(jiffies,
869             fcport->last_ramp_up + ql2xqfullrampup * HZ))
870                 return;
871         if (time_before(jiffies,
872             fcport->last_queue_full + ql2xqfullrampup * HZ))
873                 return;
874
875         starget_for_each_device(sdev->sdev_target, fcport,
876             qla2x00_adjust_sdev_qdepth_up);
877 }
878
879 /**
880  * qla2x00_process_completed_request() - Process a Fast Post response.
881  * @ha: SCSI driver HA context
882  * @index: SRB index
883  */
884 static void
885 qla2x00_process_completed_request(struct scsi_qla_host *vha,
886                                 struct req_que *req, uint32_t index)
887 {
888         srb_t *sp;
889         struct qla_hw_data *ha = vha->hw;
890
891         /* Validate handle. */
892         if (index >= MAX_OUTSTANDING_COMMANDS) {
893                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
894                     vha->host_no, index));
895                 qla_printk(KERN_WARNING, ha,
896                     "Invalid SCSI completion handle %d.\n", index);
897
898                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
899                 return;
900         }
901
902         sp = req->outstanding_cmds[index];
903         if (sp) {
904                 /* Free outstanding command slot. */
905                 req->outstanding_cmds[index] = NULL;
906
907                 /* Save ISP completion status */
908                 sp->cmd->result = DID_OK << 16;
909
910                 qla2x00_ramp_up_queue_depth(vha, req, sp);
911                 qla2x00_sp_compl(ha, sp);
912         } else {
913                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
914                         " handle(%d)\n", vha->host_no, req->id, index));
915                 qla_printk(KERN_WARNING, ha,
916                     "Invalid ISP SCSI completion handle\n");
917
918                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
919         }
920 }
921
922 static srb_t *
923 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
924     struct req_que *req, void *iocb)
925 {
926         struct qla_hw_data *ha = vha->hw;
927         sts_entry_t *pkt = iocb;
928         srb_t *sp = NULL;
929         uint16_t index;
930
931         index = LSW(pkt->handle);
932         if (index >= MAX_OUTSTANDING_COMMANDS) {
933                 qla_printk(KERN_WARNING, ha,
934                     "%s: Invalid completion handle (%x).\n", func, index);
935                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
936                 goto done;
937         }
938         sp = req->outstanding_cmds[index];
939         if (!sp) {
940                 qla_printk(KERN_WARNING, ha,
941                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
942                     index);
943                 return sp;
944         }
945         if (sp->handle != index) {
946                 qla_printk(KERN_WARNING, ha,
947                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
948                     index);
949                 return NULL;
950         }
951         req->outstanding_cmds[index] = NULL;
952 done:
953         return sp;
954 }
955
956 static void
957 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
958     struct mbx_entry *mbx)
959 {
960         const char func[] = "MBX-IOCB";
961         const char *type;
962         struct qla_hw_data *ha = vha->hw;
963         fc_port_t *fcport;
964         srb_t *sp;
965         struct srb_logio *lio;
966         uint16_t data[2];
967
968         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
969         if (!sp)
970                 return;
971
972         type = NULL;
973         lio = sp->ctx;
974         switch (lio->ctx.type) {
975         case SRB_LOGIN_CMD:
976                 type = "login";
977                 break;
978         case SRB_LOGOUT_CMD:
979                 type = "logout";
980                 break;
981         default:
982                 qla_printk(KERN_WARNING, ha,
983                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
984                     lio->ctx.type);
985                 return;
986         }
987
988         del_timer(&lio->ctx.timer);
989         fcport = sp->fcport;
990
991         data[0] = data[1] = 0;
992         if (mbx->entry_status) {
993                 DEBUG2(printk(KERN_WARNING
994                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
995                     "status=%x state-flag=%x status-flags=%x.\n",
996                     fcport->vha->host_no, sp->handle, type,
997                     mbx->entry_status, le16_to_cpu(mbx->status),
998                     le16_to_cpu(mbx->state_flags),
999                     le16_to_cpu(mbx->status_flags)));
1000                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
1001
1002                 data[0] = MBS_COMMAND_ERROR;
1003                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1004                     QLA_LOGIO_LOGIN_RETRIED: 0;
1005                 goto done_post_logio_done_work;
1006         }
1007
1008         if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1009                 DEBUG2(printk(KERN_DEBUG
1010                     "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
1011                     fcport->vha->host_no, sp->handle, type,
1012                     le16_to_cpu(mbx->mb1)));
1013
1014                 data[0] = MBS_COMMAND_COMPLETE;
1015                 if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
1016                         fcport->flags |= FCF_FCP2_DEVICE;
1017
1018                 goto done_post_logio_done_work;
1019         }
1020
1021         data[0] = le16_to_cpu(mbx->mb0);
1022         switch (data[0]) {
1023         case MBS_PORT_ID_USED:
1024                 data[1] = le16_to_cpu(mbx->mb1);
1025                 break;
1026         case MBS_LOOP_ID_USED:
1027                 break;
1028         default:
1029                 data[0] = MBS_COMMAND_ERROR;
1030                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1031                     QLA_LOGIO_LOGIN_RETRIED: 0;
1032                 break;
1033         }
1034
1035         DEBUG2(printk(KERN_WARNING
1036             "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
1037             "mb6=%x mb7=%x.\n",
1038             fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
1039             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1040             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1041             le16_to_cpu(mbx->mb7)));
1042
1043 done_post_logio_done_work:
1044         lio->ctx.type == SRB_LOGIN_CMD ?
1045             qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1046             qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1047
1048         lio->ctx.free(sp);
1049 }
1050
1051 static void
1052 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1053     struct logio_entry_24xx *logio)
1054 {
1055         const char func[] = "LOGIO-IOCB";
1056         const char *type;
1057         struct qla_hw_data *ha = vha->hw;
1058         fc_port_t *fcport;
1059         srb_t *sp;
1060         struct srb_logio *lio;
1061         uint16_t data[2];
1062         uint32_t iop[2];
1063
1064         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1065         if (!sp)
1066                 return;
1067
1068         type = NULL;
1069         lio = sp->ctx;
1070         switch (lio->ctx.type) {
1071         case SRB_LOGIN_CMD:
1072                 type = "login";
1073                 break;
1074         case SRB_LOGOUT_CMD:
1075                 type = "logout";
1076                 break;
1077         default:
1078                 qla_printk(KERN_WARNING, ha,
1079                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1080                     lio->ctx.type);
1081                 return;
1082         }
1083
1084         del_timer(&lio->ctx.timer);
1085         fcport = sp->fcport;
1086
1087         data[0] = data[1] = 0;
1088         if (logio->entry_status) {
1089                 DEBUG2(printk(KERN_WARNING
1090                     "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1091                     fcport->vha->host_no, sp->handle, type,
1092                     logio->entry_status));
1093                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1094
1095                 data[0] = MBS_COMMAND_ERROR;
1096                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1097                     QLA_LOGIO_LOGIN_RETRIED: 0;
1098                 goto done_post_logio_done_work;
1099         }
1100
1101         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1102                 DEBUG2(printk(KERN_DEBUG
1103                     "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
1104                     fcport->vha->host_no, sp->handle, type,
1105                     le32_to_cpu(logio->io_parameter[0])));
1106
1107                 data[0] = MBS_COMMAND_COMPLETE;
1108                 if (lio->ctx.type == SRB_LOGOUT_CMD)
1109                         goto done_post_logio_done_work;
1110
1111                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1112                 if (iop[0] & BIT_4) {
1113                         fcport->port_type = FCT_TARGET;
1114                         if (iop[0] & BIT_8)
1115                                 fcport->flags |= FCF_FCP2_DEVICE;
1116                 }
1117                 if (iop[0] & BIT_5)
1118                         fcport->port_type = FCT_INITIATOR;
1119                 if (logio->io_parameter[7] || logio->io_parameter[8])
1120                         fcport->supported_classes |= FC_COS_CLASS2;
1121                 if (logio->io_parameter[9] || logio->io_parameter[10])
1122                         fcport->supported_classes |= FC_COS_CLASS3;
1123
1124                 goto done_post_logio_done_work;
1125         }
1126
1127         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1128         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1129         switch (iop[0]) {
1130         case LSC_SCODE_PORTID_USED:
1131                 data[0] = MBS_PORT_ID_USED;
1132                 data[1] = LSW(iop[1]);
1133                 break;
1134         case LSC_SCODE_NPORT_USED:
1135                 data[0] = MBS_LOOP_ID_USED;
1136                 break;
1137         case LSC_SCODE_CMD_FAILED:
1138                 if ((iop[1] & 0xff) == 0x05) {
1139                         data[0] = MBS_NOT_LOGGED_IN;
1140                         break;
1141                 }
1142                 /* Fall through. */
1143         default:
1144                 data[0] = MBS_COMMAND_ERROR;
1145                 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1146                     QLA_LOGIO_LOGIN_RETRIED: 0;
1147                 break;
1148         }
1149
1150         DEBUG2(printk(KERN_WARNING
1151             "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
1152             fcport->vha->host_no, sp->handle, type,
1153             le16_to_cpu(logio->comp_status),
1154             le32_to_cpu(logio->io_parameter[0]),
1155             le32_to_cpu(logio->io_parameter[1])));
1156
1157 done_post_logio_done_work:
1158         lio->ctx.type == SRB_LOGIN_CMD ?
1159             qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1160             qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1161
1162         lio->ctx.free(sp);
1163 }
1164
1165 /**
1166  * qla2x00_process_response_queue() - Process response queue entries.
1167  * @ha: SCSI driver HA context
1168  */
1169 void
1170 qla2x00_process_response_queue(struct rsp_que *rsp)
1171 {
1172         struct scsi_qla_host *vha;
1173         struct qla_hw_data *ha = rsp->hw;
1174         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1175         sts_entry_t     *pkt;
1176         uint16_t        handle_cnt;
1177         uint16_t        cnt;
1178
1179         vha = pci_get_drvdata(ha->pdev);
1180
1181         if (!vha->flags.online)
1182                 return;
1183
1184         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1185                 pkt = (sts_entry_t *)rsp->ring_ptr;
1186
1187                 rsp->ring_index++;
1188                 if (rsp->ring_index == rsp->length) {
1189                         rsp->ring_index = 0;
1190                         rsp->ring_ptr = rsp->ring;
1191                 } else {
1192                         rsp->ring_ptr++;
1193                 }
1194
1195                 if (pkt->entry_status != 0) {
1196                         DEBUG3(printk(KERN_INFO
1197                             "scsi(%ld): Process error entry.\n", vha->host_no));
1198
1199                         qla2x00_error_entry(vha, rsp, pkt);
1200                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1201                         wmb();
1202                         continue;
1203                 }
1204
1205                 switch (pkt->entry_type) {
1206                 case STATUS_TYPE:
1207                         qla2x00_status_entry(vha, rsp, pkt);
1208                         break;
1209                 case STATUS_TYPE_21:
1210                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1211                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1212                                 qla2x00_process_completed_request(vha, rsp->req,
1213                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1214                         }
1215                         break;
1216                 case STATUS_TYPE_22:
1217                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1218                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1219                                 qla2x00_process_completed_request(vha, rsp->req,
1220                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1221                         }
1222                         break;
1223                 case STATUS_CONT_TYPE:
1224                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1225                         break;
1226                 case MBX_IOCB_TYPE:
1227                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1228                             (struct mbx_entry *)pkt);
1229                 default:
1230                         /* Type Not Supported. */
1231                         DEBUG4(printk(KERN_WARNING
1232                             "scsi(%ld): Received unknown response pkt type %x "
1233                             "entry status=%x.\n",
1234                             vha->host_no, pkt->entry_type, pkt->entry_status));
1235                         break;
1236                 }
1237                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1238                 wmb();
1239         }
1240
1241         /* Adjust ring index */
1242         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1243 }
1244
1245 static inline void
1246 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1247         struct rsp_que *rsp)
1248 {
1249         struct scsi_cmnd *cp = sp->cmd;
1250
1251         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1252                 sense_len = SCSI_SENSE_BUFFERSIZE;
1253
1254         sp->request_sense_length = sense_len;
1255         sp->request_sense_ptr = cp->sense_buffer;
1256         if (sp->request_sense_length > 32)
1257                 sense_len = 32;
1258
1259         memcpy(cp->sense_buffer, sense_data, sense_len);
1260
1261         sp->request_sense_ptr += sense_len;
1262         sp->request_sense_length -= sense_len;
1263         if (sp->request_sense_length != 0)
1264                 rsp->status_srb = sp;
1265
1266         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1267             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1268             cp->device->channel, cp->device->id, cp->device->lun, cp,
1269             cp->serial_number));
1270         if (sense_len)
1271                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1272 }
1273
1274 /**
1275  * qla2x00_status_entry() - Process a Status IOCB entry.
1276  * @ha: SCSI driver HA context
1277  * @pkt: Entry pointer
1278  */
1279 static void
1280 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1281 {
1282         srb_t           *sp;
1283         fc_port_t       *fcport;
1284         struct scsi_cmnd *cp;
1285         sts_entry_t *sts;
1286         struct sts_entry_24xx *sts24;
1287         uint16_t        comp_status;
1288         uint16_t        scsi_status;
1289         uint8_t         lscsi_status;
1290         int32_t         resid;
1291         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
1292         uint8_t         *rsp_info, *sense_data;
1293         struct qla_hw_data *ha = vha->hw;
1294         uint32_t handle;
1295         uint16_t que;
1296         struct req_que *req;
1297
1298         sts = (sts_entry_t *) pkt;
1299         sts24 = (struct sts_entry_24xx *) pkt;
1300         if (IS_FWI2_CAPABLE(ha)) {
1301                 comp_status = le16_to_cpu(sts24->comp_status);
1302                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1303         } else {
1304                 comp_status = le16_to_cpu(sts->comp_status);
1305                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1306         }
1307         handle = (uint32_t) LSW(sts->handle);
1308         que = MSW(sts->handle);
1309         req = ha->req_q_map[que];
1310         /* Fast path completion. */
1311         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1312                 qla2x00_process_completed_request(vha, req, handle);
1313
1314                 return;
1315         }
1316
1317         /* Validate handle. */
1318         if (handle < MAX_OUTSTANDING_COMMANDS) {
1319                 sp = req->outstanding_cmds[handle];
1320                 req->outstanding_cmds[handle] = NULL;
1321         } else
1322                 sp = NULL;
1323
1324         if (sp == NULL) {
1325                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1326                     vha->host_no));
1327                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1328
1329                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1330                 qla2xxx_wake_dpc(vha);
1331                 return;
1332         }
1333         cp = sp->cmd;
1334         if (cp == NULL) {
1335                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1336                     "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1337                 qla_printk(KERN_WARNING, ha,
1338                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
1339
1340                 return;
1341         }
1342
1343         lscsi_status = scsi_status & STATUS_MASK;
1344
1345         fcport = sp->fcport;
1346
1347         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1348         if (IS_FWI2_CAPABLE(ha)) {
1349                 sense_len = le32_to_cpu(sts24->sense_len);
1350                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1351                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1352                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1353                 rsp_info = sts24->data;
1354                 sense_data = sts24->data;
1355                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1356         } else {
1357                 sense_len = le16_to_cpu(sts->req_sense_length);
1358                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1359                 resid_len = le32_to_cpu(sts->residual_length);
1360                 rsp_info = sts->rsp_info;
1361                 sense_data = sts->req_sense_data;
1362         }
1363
1364         /* Check for any FCP transport errors. */
1365         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1366                 /* Sense data lies beyond any FCP RESPONSE data. */
1367                 if (IS_FWI2_CAPABLE(ha))
1368                         sense_data += rsp_info_len;
1369                 if (rsp_info_len > 3 && rsp_info[3]) {
1370                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1371                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1372                             "retrying command\n", vha->host_no,
1373                             cp->device->channel, cp->device->id,
1374                             cp->device->lun, rsp_info_len, rsp_info[0],
1375                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1376                             rsp_info[5], rsp_info[6], rsp_info[7]));
1377
1378                         cp->result = DID_BUS_BUSY << 16;
1379                         qla2x00_sp_compl(ha, sp);
1380                         return;
1381                 }
1382         }
1383
1384         /* Check for overrun. */
1385         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1386             scsi_status & SS_RESIDUAL_OVER)
1387                 comp_status = CS_DATA_OVERRUN;
1388
1389         /*
1390          * Based on Host and scsi status generate status code for Linux
1391          */
1392         switch (comp_status) {
1393         case CS_COMPLETE:
1394         case CS_QUEUE_FULL:
1395                 if (scsi_status == 0) {
1396                         cp->result = DID_OK << 16;
1397                         break;
1398                 }
1399                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1400                         resid = resid_len;
1401                         scsi_set_resid(cp, resid);
1402
1403                         if (!lscsi_status &&
1404                             ((unsigned)(scsi_bufflen(cp) - resid) <
1405                              cp->underflow)) {
1406                                 qla_printk(KERN_INFO, ha,
1407                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1408                                            "detected (%x of %x bytes)...returning "
1409                                            "error status.\n", vha->host_no,
1410                                            cp->device->channel, cp->device->id,
1411                                            cp->device->lun, resid,
1412                                            scsi_bufflen(cp));
1413
1414                                 cp->result = DID_ERROR << 16;
1415                                 break;
1416                         }
1417                 }
1418                 cp->result = DID_OK << 16 | lscsi_status;
1419
1420                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1421                         DEBUG2(printk(KERN_INFO
1422                             "scsi(%ld): QUEUE FULL status detected "
1423                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1424                             scsi_status));
1425
1426                         /* Adjust queue depth for all luns on the port. */
1427                         if (!ql2xqfulltracking)
1428                                 break;
1429                         fcport->last_queue_full = jiffies;
1430                         starget_for_each_device(cp->device->sdev_target,
1431                             fcport, qla2x00_adjust_sdev_qdepth_down);
1432                         break;
1433                 }
1434                 if (lscsi_status != SS_CHECK_CONDITION)
1435                         break;
1436
1437                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1438                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1439                         break;
1440
1441                 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1442                 break;
1443
1444         case CS_DATA_UNDERRUN:
1445                 resid = resid_len;
1446                 /* Use F/W calculated residual length. */
1447                 if (IS_FWI2_CAPABLE(ha)) {
1448                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1449                                 lscsi_status = 0;
1450                         } else if (resid != fw_resid_len) {
1451                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1452                                 lscsi_status = 0;
1453                         }
1454                         resid = fw_resid_len;
1455                 }
1456
1457                 if (scsi_status & SS_RESIDUAL_UNDER) {
1458                         scsi_set_resid(cp, resid);
1459                 } else {
1460                         DEBUG2(printk(KERN_INFO
1461                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1462                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1463                             "os_underflow=0x%x\n", vha->host_no,
1464                             cp->device->id, cp->device->lun, comp_status,
1465                             scsi_status, resid_len, resid, cp->cmnd[0],
1466                             cp->underflow));
1467
1468                 }
1469
1470                 /*
1471                  * Check to see if SCSI Status is non zero. If so report SCSI
1472                  * Status.
1473                  */
1474                 if (lscsi_status != 0) {
1475                         cp->result = DID_OK << 16 | lscsi_status;
1476
1477                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1478                                 DEBUG2(printk(KERN_INFO
1479                                     "scsi(%ld): QUEUE FULL status detected "
1480                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1481                                     scsi_status));
1482
1483                                 /*
1484                                  * Adjust queue depth for all luns on the
1485                                  * port.
1486                                  */
1487                                 if (!ql2xqfulltracking)
1488                                         break;
1489                                 fcport->last_queue_full = jiffies;
1490                                 starget_for_each_device(
1491                                     cp->device->sdev_target, fcport,
1492                                     qla2x00_adjust_sdev_qdepth_down);
1493                                 break;
1494                         }
1495                         if (lscsi_status != SS_CHECK_CONDITION)
1496                                 break;
1497
1498                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1499                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1500                                 break;
1501
1502                         qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1503                 } else {
1504                         /*
1505                          * If RISC reports underrun and target does not report
1506                          * it then we must have a lost frame, so tell upper
1507                          * layer to retry it by reporting an error.
1508                          */
1509                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1510                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1511                                               "frame(s) detected (%x of %x bytes)..."
1512                                               "retrying command.\n",
1513                                         vha->host_no, cp->device->channel,
1514                                         cp->device->id, cp->device->lun, resid,
1515                                         scsi_bufflen(cp)));
1516
1517                                 scsi_set_resid(cp, resid);
1518                                 cp->result = DID_ERROR << 16;
1519                                 break;
1520                         }
1521
1522                         /* Handle mid-layer underflow */
1523                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1524                             cp->underflow) {
1525                                 qla_printk(KERN_INFO, ha,
1526                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1527                                            "detected (%x of %x bytes)...returning "
1528                                            "error status.\n", vha->host_no,
1529                                            cp->device->channel, cp->device->id,
1530                                            cp->device->lun, resid,
1531                                            scsi_bufflen(cp));
1532
1533                                 cp->result = DID_ERROR << 16;
1534                                 break;
1535                         }
1536
1537                         /* Everybody online, looking good... */
1538                         cp->result = DID_OK << 16;
1539                 }
1540                 break;
1541
1542         case CS_DATA_OVERRUN:
1543                 DEBUG2(printk(KERN_INFO
1544                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1545                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1546                     scsi_status));
1547                 DEBUG2(printk(KERN_INFO
1548                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1549                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1550                     cp->cmnd[4], cp->cmnd[5]));
1551                 DEBUG2(printk(KERN_INFO
1552                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1553                     "status!\n",
1554                     cp->serial_number, scsi_bufflen(cp), resid_len));
1555
1556                 cp->result = DID_ERROR << 16;
1557                 break;
1558
1559         case CS_PORT_LOGGED_OUT:
1560         case CS_PORT_CONFIG_CHG:
1561         case CS_PORT_BUSY:
1562         case CS_INCOMPLETE:
1563         case CS_PORT_UNAVAILABLE:
1564                 /*
1565                  * If the port is in Target Down state, return all IOs for this
1566                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1567                  * retry_queue.
1568                  */
1569                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1570                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1571                     vha->host_no, cp->device->id, cp->device->lun,
1572                     cp->serial_number, comp_status,
1573                     atomic_read(&fcport->state)));
1574
1575                 /*
1576                  * We are going to have the fc class block the rport
1577                  * while we try to recover so instruct the mid layer
1578                  * to requeue until the class decides how to handle this.
1579                  */
1580                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1581                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1582                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1583                 break;
1584
1585         case CS_RESET:
1586                 DEBUG2(printk(KERN_INFO
1587                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1588                     vha->host_no, comp_status, scsi_status));
1589
1590                 cp->result = DID_RESET << 16;
1591                 break;
1592
1593         case CS_ABORTED:
1594                 /*
1595                  * hv2.19.12 - DID_ABORT does not retry the request if we
1596                  * aborted this request then abort otherwise it must be a
1597                  * reset.
1598                  */
1599                 DEBUG2(printk(KERN_INFO
1600                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1601                     vha->host_no, comp_status, scsi_status));
1602
1603                 cp->result = DID_RESET << 16;
1604                 break;
1605
1606         case CS_TIMEOUT:
1607                 /*
1608                  * We are going to have the fc class block the rport
1609                  * while we try to recover so instruct the mid layer
1610                  * to requeue until the class decides how to handle this.
1611                  */
1612                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1613
1614                 if (IS_FWI2_CAPABLE(ha)) {
1615                         DEBUG2(printk(KERN_INFO
1616                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1617                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1618                             cp->device->id, cp->device->lun, comp_status,
1619                             scsi_status));
1620                         break;
1621                 }
1622                 DEBUG2(printk(KERN_INFO
1623                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1624                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1625                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1626                     le16_to_cpu(sts->status_flags)));
1627
1628                 /* Check to see if logout occurred. */
1629                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1630                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1631                 break;
1632
1633         default:
1634                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1635                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1636                 qla_printk(KERN_INFO, ha,
1637                     "Unknown status detected 0x%x-0x%x.\n",
1638                     comp_status, scsi_status);
1639
1640                 cp->result = DID_ERROR << 16;
1641                 break;
1642         }
1643
1644         /* Place command on done queue. */
1645         if (rsp->status_srb == NULL)
1646                 qla2x00_sp_compl(ha, sp);
1647 }
1648
1649 /**
1650  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1651  * @ha: SCSI driver HA context
1652  * @pkt: Entry pointer
1653  *
1654  * Extended sense data.
1655  */
1656 static void
1657 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1658 {
1659         uint8_t         sense_sz = 0;
1660         struct qla_hw_data *ha = rsp->hw;
1661         srb_t           *sp = rsp->status_srb;
1662         struct scsi_cmnd *cp;
1663
1664         if (sp != NULL && sp->request_sense_length != 0) {
1665                 cp = sp->cmd;
1666                 if (cp == NULL) {
1667                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1668                             "sp=%p.\n", __func__, sp));
1669                         qla_printk(KERN_INFO, ha,
1670                             "cmd is NULL: already returned to OS (sp=%p)\n",
1671                             sp);
1672
1673                         rsp->status_srb = NULL;
1674                         return;
1675                 }
1676
1677                 if (sp->request_sense_length > sizeof(pkt->data)) {
1678                         sense_sz = sizeof(pkt->data);
1679                 } else {
1680                         sense_sz = sp->request_sense_length;
1681                 }
1682
1683                 /* Move sense data. */
1684                 if (IS_FWI2_CAPABLE(ha))
1685                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1686                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1687                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1688
1689                 sp->request_sense_ptr += sense_sz;
1690                 sp->request_sense_length -= sense_sz;
1691
1692                 /* Place command on done queue. */
1693                 if (sp->request_sense_length == 0) {
1694                         rsp->status_srb = NULL;
1695                         qla2x00_sp_compl(ha, sp);
1696                 }
1697         }
1698 }
1699
1700 /**
1701  * qla2x00_error_entry() - Process an error entry.
1702  * @ha: SCSI driver HA context
1703  * @pkt: Entry pointer
1704  */
1705 static void
1706 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1707 {
1708         srb_t *sp;
1709         struct qla_hw_data *ha = vha->hw;
1710         uint32_t handle = LSW(pkt->handle);
1711         uint16_t que = MSW(pkt->handle);
1712         struct req_que *req = ha->req_q_map[que];
1713 #if defined(QL_DEBUG_LEVEL_2)
1714         if (pkt->entry_status & RF_INV_E_ORDER)
1715                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1716         else if (pkt->entry_status & RF_INV_E_COUNT)
1717                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1718         else if (pkt->entry_status & RF_INV_E_PARAM)
1719                 qla_printk(KERN_ERR, ha,
1720                     "%s: Invalid Entry Parameter\n", __func__);
1721         else if (pkt->entry_status & RF_INV_E_TYPE)
1722                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1723         else if (pkt->entry_status & RF_BUSY)
1724                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1725         else
1726                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1727 #endif
1728
1729         /* Validate handle. */
1730         if (handle < MAX_OUTSTANDING_COMMANDS)
1731                 sp = req->outstanding_cmds[handle];
1732         else
1733                 sp = NULL;
1734
1735         if (sp) {
1736                 /* Free outstanding command slot. */
1737                 req->outstanding_cmds[handle] = NULL;
1738
1739                 /* Bad payload or header */
1740                 if (pkt->entry_status &
1741                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1742                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1743                         sp->cmd->result = DID_ERROR << 16;
1744                 } else if (pkt->entry_status & RF_BUSY) {
1745                         sp->cmd->result = DID_BUS_BUSY << 16;
1746                 } else {
1747                         sp->cmd->result = DID_ERROR << 16;
1748                 }
1749                 qla2x00_sp_compl(ha, sp);
1750
1751         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1752             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1753                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1754                     vha->host_no));
1755                 qla_printk(KERN_WARNING, ha,
1756                     "Error entry - invalid handle\n");
1757
1758                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1759                 qla2xxx_wake_dpc(vha);
1760         }
1761 }
1762
1763 /**
1764  * qla24xx_mbx_completion() - Process mailbox command completions.
1765  * @ha: SCSI driver HA context
1766  * @mb0: Mailbox0 register
1767  */
1768 static void
1769 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1770 {
1771         uint16_t        cnt;
1772         uint16_t __iomem *wptr;
1773         struct qla_hw_data *ha = vha->hw;
1774         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1775
1776         /* Load return mailbox registers. */
1777         ha->flags.mbox_int = 1;
1778         ha->mailbox_out[0] = mb0;
1779         wptr = (uint16_t __iomem *)&reg->mailbox1;
1780
1781         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1782                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1783                 wptr++;
1784         }
1785
1786         if (ha->mcp) {
1787                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1788                     __func__, vha->host_no, ha->mcp->mb[0]));
1789         } else {
1790                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1791                     __func__, vha->host_no));
1792         }
1793 }
1794
1795 /**
1796  * qla24xx_process_response_queue() - Process response queue entries.
1797  * @ha: SCSI driver HA context
1798  */
1799 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1800         struct rsp_que *rsp)
1801 {
1802         struct sts_entry_24xx *pkt;
1803
1804         if (!vha->flags.online)
1805                 return;
1806
1807         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1808                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1809
1810                 rsp->ring_index++;
1811                 if (rsp->ring_index == rsp->length) {
1812                         rsp->ring_index = 0;
1813                         rsp->ring_ptr = rsp->ring;
1814                 } else {
1815                         rsp->ring_ptr++;
1816                 }
1817
1818                 if (pkt->entry_status != 0) {
1819                         DEBUG3(printk(KERN_INFO
1820                             "scsi(%ld): Process error entry.\n", vha->host_no));
1821
1822                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1823                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1824                         wmb();
1825                         continue;
1826                 }
1827
1828                 switch (pkt->entry_type) {
1829                 case STATUS_TYPE:
1830                         qla2x00_status_entry(vha, rsp, pkt);
1831                         break;
1832                 case STATUS_CONT_TYPE:
1833                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1834                         break;
1835                 case VP_RPT_ID_IOCB_TYPE:
1836                         qla24xx_report_id_acquisition(vha,
1837                             (struct vp_rpt_id_entry_24xx *)pkt);
1838                         break;
1839                 case LOGINOUT_PORT_IOCB_TYPE:
1840                         qla24xx_logio_entry(vha, rsp->req,
1841                             (struct logio_entry_24xx *)pkt);
1842                         break;
1843                 default:
1844                         /* Type Not Supported. */
1845                         DEBUG4(printk(KERN_WARNING
1846                             "scsi(%ld): Received unknown response pkt type %x "
1847                             "entry status=%x.\n",
1848                             vha->host_no, pkt->entry_type, pkt->entry_status));
1849                         break;
1850                 }
1851                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1852                 wmb();
1853         }
1854
1855         /* Adjust ring index */
1856         WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
1857 }
1858
1859 static void
1860 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1861 {
1862         int rval;
1863         uint32_t cnt;
1864         struct qla_hw_data *ha = vha->hw;
1865         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1866
1867         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1868                 return;
1869
1870         rval = QLA_SUCCESS;
1871         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1872         RD_REG_DWORD(&reg->iobase_addr);
1873         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1874         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1875             rval == QLA_SUCCESS; cnt--) {
1876                 if (cnt) {
1877                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1878                         udelay(10);
1879                 } else
1880                         rval = QLA_FUNCTION_TIMEOUT;
1881         }
1882         if (rval == QLA_SUCCESS)
1883                 goto next_test;
1884
1885         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1886         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1887             rval == QLA_SUCCESS; cnt--) {
1888                 if (cnt) {
1889                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1890                         udelay(10);
1891                 } else
1892                         rval = QLA_FUNCTION_TIMEOUT;
1893         }
1894         if (rval != QLA_SUCCESS)
1895                 goto done;
1896
1897 next_test:
1898         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1899                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1900
1901 done:
1902         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1903         RD_REG_DWORD(&reg->iobase_window);
1904 }
1905
1906 /**
1907  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1908  * @irq:
1909  * @dev_id: SCSI driver HA context
1910  *
1911  * Called by system whenever the host adapter generates an interrupt.
1912  *
1913  * Returns handled flag.
1914  */
1915 irqreturn_t
1916 qla24xx_intr_handler(int irq, void *dev_id)
1917 {
1918         scsi_qla_host_t *vha;
1919         struct qla_hw_data *ha;
1920         struct device_reg_24xx __iomem *reg;
1921         int             status;
1922         unsigned long   iter;
1923         uint32_t        stat;
1924         uint32_t        hccr;
1925         uint16_t        mb[4];
1926         struct rsp_que *rsp;
1927         unsigned long   flags;
1928
1929         rsp = (struct rsp_que *) dev_id;
1930         if (!rsp) {
1931                 printk(KERN_INFO
1932                     "%s(): NULL response queue pointer\n", __func__);
1933                 return IRQ_NONE;
1934         }
1935
1936         ha = rsp->hw;
1937         reg = &ha->iobase->isp24;
1938         status = 0;
1939
1940         spin_lock_irqsave(&ha->hardware_lock, flags);
1941         vha = pci_get_drvdata(ha->pdev);
1942         for (iter = 50; iter--; ) {
1943                 stat = RD_REG_DWORD(&reg->host_status);
1944                 if (stat & HSRX_RISC_PAUSED) {
1945                         if (pci_channel_offline(ha->pdev))
1946                                 break;
1947
1948                         hccr = RD_REG_DWORD(&reg->hccr);
1949
1950                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1951                             "Dumping firmware!\n", hccr);
1952
1953                         qla2xxx_check_risc_status(vha);
1954
1955                         ha->isp_ops->fw_dump(vha, 1);
1956                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1957                         break;
1958                 } else if ((stat & HSRX_RISC_INT) == 0)
1959                         break;
1960
1961                 switch (stat & 0xff) {
1962                 case 0x1:
1963                 case 0x2:
1964                 case 0x10:
1965                 case 0x11:
1966                         qla24xx_mbx_completion(vha, MSW(stat));
1967                         status |= MBX_INTERRUPT;
1968
1969                         break;
1970                 case 0x12:
1971                         mb[0] = MSW(stat);
1972                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1973                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1974                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1975                         qla2x00_async_event(vha, rsp, mb);
1976                         break;
1977                 case 0x13:
1978                 case 0x14:
1979                         qla24xx_process_response_queue(vha, rsp);
1980                         break;
1981                 default:
1982                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1983                             "(%d).\n",
1984                             vha->host_no, stat & 0xff));
1985                         break;
1986                 }
1987                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1988                 RD_REG_DWORD_RELAXED(&reg->hccr);
1989         }
1990         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1991
1992         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1993             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1994                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1995                 complete(&ha->mbx_intr_comp);
1996         }
1997
1998         return IRQ_HANDLED;
1999 }
2000
2001 static irqreturn_t
2002 qla24xx_msix_rsp_q(int irq, void *dev_id)
2003 {
2004         struct qla_hw_data *ha;
2005         struct rsp_que *rsp;
2006         struct device_reg_24xx __iomem *reg;
2007         struct scsi_qla_host *vha;
2008
2009         rsp = (struct rsp_que *) dev_id;
2010         if (!rsp) {
2011                 printk(KERN_INFO
2012                 "%s(): NULL response queue pointer\n", __func__);
2013                 return IRQ_NONE;
2014         }
2015         ha = rsp->hw;
2016         reg = &ha->iobase->isp24;
2017
2018         spin_lock_irq(&ha->hardware_lock);
2019
2020         vha = qla25xx_get_host(rsp);
2021         qla24xx_process_response_queue(vha, rsp);
2022         if (!ha->mqenable) {
2023                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2024                 RD_REG_DWORD_RELAXED(&reg->hccr);
2025         }
2026         spin_unlock_irq(&ha->hardware_lock);
2027
2028         return IRQ_HANDLED;
2029 }
2030
2031 static irqreturn_t
2032 qla25xx_msix_rsp_q(int irq, void *dev_id)
2033 {
2034         struct qla_hw_data *ha;
2035         struct rsp_que *rsp;
2036
2037         rsp = (struct rsp_que *) dev_id;
2038         if (!rsp) {
2039                 printk(KERN_INFO
2040                         "%s(): NULL response queue pointer\n", __func__);
2041                 return IRQ_NONE;
2042         }
2043         ha = rsp->hw;
2044
2045         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2046
2047         return IRQ_HANDLED;
2048 }
2049
2050 static irqreturn_t
2051 qla24xx_msix_default(int irq, void *dev_id)
2052 {
2053         scsi_qla_host_t *vha;
2054         struct qla_hw_data *ha;
2055         struct rsp_que *rsp;
2056         struct device_reg_24xx __iomem *reg;
2057         int             status;
2058         uint32_t        stat;
2059         uint32_t        hccr;
2060         uint16_t        mb[4];
2061
2062         rsp = (struct rsp_que *) dev_id;
2063         if (!rsp) {
2064                 DEBUG(printk(
2065                 "%s(): NULL response queue pointer\n", __func__));
2066                 return IRQ_NONE;
2067         }
2068         ha = rsp->hw;
2069         reg = &ha->iobase->isp24;
2070         status = 0;
2071
2072         spin_lock_irq(&ha->hardware_lock);
2073         vha = pci_get_drvdata(ha->pdev);
2074         do {
2075                 stat = RD_REG_DWORD(&reg->host_status);
2076                 if (stat & HSRX_RISC_PAUSED) {
2077                         if (pci_channel_offline(ha->pdev))
2078                                 break;
2079
2080                         hccr = RD_REG_DWORD(&reg->hccr);
2081
2082                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2083                             "Dumping firmware!\n", hccr);
2084
2085                         qla2xxx_check_risc_status(vha);
2086
2087                         ha->isp_ops->fw_dump(vha, 1);
2088                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2089                         break;
2090                 } else if ((stat & HSRX_RISC_INT) == 0)
2091                         break;
2092
2093                 switch (stat & 0xff) {
2094                 case 0x1:
2095                 case 0x2:
2096                 case 0x10:
2097                 case 0x11:
2098                         qla24xx_mbx_completion(vha, MSW(stat));
2099                         status |= MBX_INTERRUPT;
2100
2101                         break;
2102                 case 0x12:
2103                         mb[0] = MSW(stat);
2104                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2105                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2106                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2107                         qla2x00_async_event(vha, rsp, mb);
2108                         break;
2109                 case 0x13:
2110                 case 0x14:
2111                         qla24xx_process_response_queue(vha, rsp);
2112                         break;
2113                 default:
2114                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2115                             "(%d).\n",
2116                             vha->host_no, stat & 0xff));
2117                         break;
2118                 }
2119                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2120         } while (0);
2121         spin_unlock_irq(&ha->hardware_lock);
2122
2123         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2124             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2125                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2126                 complete(&ha->mbx_intr_comp);
2127         }
2128
2129         return IRQ_HANDLED;
2130 }
2131
2132 /* Interrupt handling helpers. */
2133
2134 struct qla_init_msix_entry {
2135         const char *name;
2136         irq_handler_t handler;
2137 };
2138
2139 static struct qla_init_msix_entry msix_entries[3] = {
2140         { "qla2xxx (default)", qla24xx_msix_default },
2141         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2142         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2143 };
2144
2145 static void
2146 qla24xx_disable_msix(struct qla_hw_data *ha)
2147 {
2148         int i;
2149         struct qla_msix_entry *qentry;
2150
2151         for (i = 0; i < ha->msix_count; i++) {
2152                 qentry = &ha->msix_entries[i];
2153                 if (qentry->have_irq)
2154                         free_irq(qentry->vector, qentry->rsp);
2155         }
2156         pci_disable_msix(ha->pdev);
2157         kfree(ha->msix_entries);
2158         ha->msix_entries = NULL;
2159         ha->flags.msix_enabled = 0;
2160 }
2161
2162 static int
2163 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2164 {
2165 #define MIN_MSIX_COUNT  2
2166         int i, ret;
2167         struct msix_entry *entries;
2168         struct qla_msix_entry *qentry;
2169
2170         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2171                                         GFP_KERNEL);
2172         if (!entries)
2173                 return -ENOMEM;
2174
2175         for (i = 0; i < ha->msix_count; i++)
2176                 entries[i].entry = i;
2177
2178         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2179         if (ret) {
2180                 if (ret < MIN_MSIX_COUNT)
2181                         goto msix_failed;
2182
2183                 qla_printk(KERN_WARNING, ha,
2184                         "MSI-X: Failed to enable support -- %d/%d\n"
2185                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2186                 ha->msix_count = ret;
2187                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2188                 if (ret) {
2189 msix_failed:
2190                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2191                                 " support, giving up -- %d/%d\n",
2192                                 ha->msix_count, ret);
2193                         goto msix_out;
2194                 }
2195                 ha->max_rsp_queues = ha->msix_count - 1;
2196         }
2197         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2198                                 ha->msix_count, GFP_KERNEL);
2199         if (!ha->msix_entries) {
2200                 ret = -ENOMEM;
2201                 goto msix_out;
2202         }
2203         ha->flags.msix_enabled = 1;
2204
2205         for (i = 0; i < ha->msix_count; i++) {
2206                 qentry = &ha->msix_entries[i];
2207                 qentry->vector = entries[i].vector;
2208                 qentry->entry = entries[i].entry;
2209                 qentry->have_irq = 0;
2210                 qentry->rsp = NULL;
2211         }
2212
2213         /* Enable MSI-X vectors for the base queue */
2214         for (i = 0; i < 2; i++) {
2215                 qentry = &ha->msix_entries[i];
2216                 ret = request_irq(qentry->vector, msix_entries[i].handler,
2217                                         0, msix_entries[i].name, rsp);
2218                 if (ret) {
2219                         qla_printk(KERN_WARNING, ha,
2220                         "MSI-X: Unable to register handler -- %x/%d.\n",
2221                         qentry->vector, ret);
2222                         qla24xx_disable_msix(ha);
2223                         ha->mqenable = 0;
2224                         goto msix_out;
2225                 }
2226                 qentry->have_irq = 1;
2227                 qentry->rsp = rsp;
2228                 rsp->msix = qentry;
2229         }
2230
2231         /* Enable MSI-X vector for response queue update for queue 0 */
2232         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2233                 ha->mqenable = 1;
2234
2235 msix_out:
2236         kfree(entries);
2237         return ret;
2238 }
2239
2240 int
2241 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2242 {
2243         int ret;
2244         device_reg_t __iomem *reg = ha->iobase;
2245
2246         /* If possible, enable MSI-X. */
2247         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2248             !IS_QLA8432(ha) && !IS_QLA8001(ha))
2249                 goto skip_msix;
2250
2251         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2252                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2253                 DEBUG2(qla_printk(KERN_WARNING, ha,
2254                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2255                         ha->pdev->revision, ha->fw_attributes));
2256
2257                 goto skip_msix;
2258         }
2259
2260         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2261             (ha->pdev->subsystem_device == 0x7040 ||
2262                 ha->pdev->subsystem_device == 0x7041 ||
2263                 ha->pdev->subsystem_device == 0x1705)) {
2264                 DEBUG2(qla_printk(KERN_WARNING, ha,
2265                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
2266                     ha->pdev->subsystem_vendor,
2267                     ha->pdev->subsystem_device));
2268
2269                 goto skip_msi;
2270         }
2271
2272         ret = qla24xx_enable_msix(ha, rsp);
2273         if (!ret) {
2274                 DEBUG2(qla_printk(KERN_INFO, ha,
2275                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2276                     ha->fw_attributes));
2277                 goto clear_risc_ints;
2278         }
2279         qla_printk(KERN_WARNING, ha,
2280             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2281 skip_msix:
2282
2283         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2284             !IS_QLA8001(ha))
2285                 goto skip_msi;
2286
2287         ret = pci_enable_msi(ha->pdev);
2288         if (!ret) {
2289                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2290                 ha->flags.msi_enabled = 1;
2291         }
2292 skip_msi:
2293
2294         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2295             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2296         if (ret) {
2297                 qla_printk(KERN_WARNING, ha,
2298                     "Failed to reserve interrupt %d already in use.\n",
2299                     ha->pdev->irq);
2300                 goto fail;
2301         }
2302         ha->flags.inta_enabled = 1;
2303 clear_risc_ints:
2304
2305         /*
2306          * FIXME: Noted that 8014s were being dropped during NK testing.
2307          * Timing deltas during MSI-X/INTa transitions?
2308          */
2309         if (IS_QLA81XX(ha))
2310                 goto fail;
2311         spin_lock_irq(&ha->hardware_lock);
2312         if (IS_FWI2_CAPABLE(ha)) {
2313                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2314                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2315         } else {
2316                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2317                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2318                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2319         }
2320         spin_unlock_irq(&ha->hardware_lock);
2321
2322 fail:
2323         return ret;
2324 }
2325
2326 void
2327 qla2x00_free_irqs(scsi_qla_host_t *vha)
2328 {
2329         struct qla_hw_data *ha = vha->hw;
2330         struct rsp_que *rsp = ha->rsp_q_map[0];
2331
2332         if (ha->flags.msix_enabled)
2333                 qla24xx_disable_msix(ha);
2334         else if (ha->flags.inta_enabled) {
2335                 free_irq(ha->pdev->irq, rsp);
2336                 pci_disable_msi(ha->pdev);
2337         }
2338 }
2339
2340
2341 int qla25xx_request_irq(struct rsp_que *rsp)
2342 {
2343         struct qla_hw_data *ha = rsp->hw;
2344         struct qla_init_msix_entry *intr = &msix_entries[2];
2345         struct qla_msix_entry *msix = rsp->msix;
2346         int ret;
2347
2348         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2349         if (ret) {
2350                 qla_printk(KERN_WARNING, ha,
2351                         "MSI-X: Unable to register handler -- %x/%d.\n",
2352                         msix->vector, ret);
2353                 return ret;
2354         }
2355         msix->have_irq = 1;
2356         msix->rsp = rsp;
2357         return ret;
2358 }
2359
2360 struct scsi_qla_host *
2361 qla25xx_get_host(struct rsp_que *rsp)
2362 {
2363         srb_t *sp;
2364         struct qla_hw_data *ha = rsp->hw;
2365         struct scsi_qla_host *vha = NULL;
2366         struct sts_entry_24xx *pkt;
2367         struct req_que *req;
2368         uint16_t que;
2369         uint32_t handle;
2370
2371         pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2372         que = MSW(pkt->handle);
2373         handle = (uint32_t) LSW(pkt->handle);
2374         req = ha->req_q_map[que];
2375         if (handle < MAX_OUTSTANDING_COMMANDS) {
2376                 sp = req->outstanding_cmds[handle];
2377                 if (sp)
2378                         return  sp->fcport->vha;
2379                 else
2380                         goto base_que;
2381         }
2382 base_que:
2383         vha = pci_get_drvdata(ha->pdev);
2384         return vha;
2385 }