]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35                 vha->qla_stats.output_requests++;
36         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37                 cflags = CF_READ;
38                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39                 vha->qla_stats.input_requests++;
40         }
41         return (cflags);
42 }
43
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55         uint16_t iocbs;
56
57         iocbs = 1;
58         if (dsds > 3) {
59                 iocbs += (dsds - 3) / 7;
60                 if ((dsds - 3) % 7)
61                         iocbs++;
62         }
63         return (iocbs);
64 }
65
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77         uint16_t iocbs;
78
79         iocbs = 1;
80         if (dsds > 2) {
81                 iocbs += (dsds - 2) / 5;
82                 if ((dsds - 2) % 5)
83                         iocbs++;
84         }
85         return (iocbs);
86 }
87
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 {
97         cont_entry_t *cont_pkt;
98         struct req_que *req = vha->req;
99         /* Adjust ring index. */
100         req->ring_index++;
101         if (req->ring_index == req->length) {
102                 req->ring_index = 0;
103                 req->ring_ptr = req->ring;
104         } else {
105                 req->ring_ptr++;
106         }
107
108         cont_pkt = (cont_entry_t *)req->ring_ptr;
109
110         /* Load packet defaults. */
111         *((uint32_t *)(&cont_pkt->entry_type)) =
112             __constant_cpu_to_le32(CONTINUE_TYPE);
113
114         return (cont_pkt);
115 }
116
117 /**
118  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119  * @ha: HA context
120  *
121  * Returns a pointer to the continuation type 1 IOCB packet.
122  */
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 {
126         cont_a64_entry_t *cont_pkt;
127
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144         return (cont_pkt);
145 }
146
147 static inline int
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 {
150         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151         uint8_t guard = scsi_host_get_guard(cmd->device->host);
152
153         /* We always use DIFF Bundling for best performance */
154         *fw_prot_opts = 0;
155
156         /* Translate SCSI opcode to a protection opcode */
157         switch (scsi_get_prot_op(cmd)) {
158         case SCSI_PROT_READ_STRIP:
159                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160                 break;
161         case SCSI_PROT_WRITE_INSERT:
162                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163                 break;
164         case SCSI_PROT_READ_INSERT:
165                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166                 break;
167         case SCSI_PROT_WRITE_STRIP:
168                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169                 break;
170         case SCSI_PROT_READ_PASS:
171         case SCSI_PROT_WRITE_PASS:
172                 if (guard & SHOST_DIX_GUARD_IP)
173                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174                 else
175                         *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         default:        /* Normal Request */
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         }
181
182         return scsi_prot_sg_count(cmd);
183 }
184
185 /*
186  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187  * capable IOCB types.
188  *
189  * @sp: SRB command to process
190  * @cmd_pkt: Command type 2 IOCB
191  * @tot_dsds: Total number of segments to transfer
192  */
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194     uint16_t tot_dsds)
195 {
196         uint16_t        avail_dsds;
197         uint32_t        *cur_dsd;
198         scsi_qla_host_t *vha;
199         struct scsi_cmnd *cmd;
200         struct scatterlist *sg;
201         int i;
202
203         cmd = GET_CMD_SP(sp);
204
205         /* Update entry type to indicate Command Type 2 IOCB */
206         *((uint32_t *)(&cmd_pkt->entry_type)) =
207             __constant_cpu_to_le32(COMMAND_TYPE);
208
209         /* No data transfer */
210         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212                 return;
213         }
214
215         vha = sp->fcport->vha;
216         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217
218         /* Three DSDs are available in the Command Type 2 IOCB */
219         avail_dsds = 3;
220         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222         /* Load data segments */
223         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224                 cont_entry_t *cont_pkt;
225
226                 /* Allocate additional continuation packets? */
227                 if (avail_dsds == 0) {
228                         /*
229                          * Seven DSDs are available in the Continuation
230                          * Type 0 IOCB.
231                          */
232                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234                         avail_dsds = 7;
235                 }
236
237                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239                 avail_dsds--;
240         }
241 }
242
243 /**
244  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245  * capable IOCB types.
246  *
247  * @sp: SRB command to process
248  * @cmd_pkt: Command type 3 IOCB
249  * @tot_dsds: Total number of segments to transfer
250  */
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252     uint16_t tot_dsds)
253 {
254         uint16_t        avail_dsds;
255         uint32_t        *cur_dsd;
256         scsi_qla_host_t *vha;
257         struct scsi_cmnd *cmd;
258         struct scatterlist *sg;
259         int i;
260
261         cmd = GET_CMD_SP(sp);
262
263         /* Update entry type to indicate Command Type 3 IOCB */
264         *((uint32_t *)(&cmd_pkt->entry_type)) =
265             __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267         /* No data transfer */
268         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270                 return;
271         }
272
273         vha = sp->fcport->vha;
274         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275
276         /* Two DSDs are available in the Command Type 3 IOCB */
277         avail_dsds = 2;
278         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280         /* Load data segments */
281         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282                 dma_addr_t      sle_dma;
283                 cont_a64_entry_t *cont_pkt;
284
285                 /* Allocate additional continuation packets? */
286                 if (avail_dsds == 0) {
287                         /*
288                          * Five DSDs are available in the Continuation
289                          * Type 1 IOCB.
290                          */
291                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293                         avail_dsds = 5;
294                 }
295
296                 sle_dma = sg_dma_address(sg);
297                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300                 avail_dsds--;
301         }
302 }
303
304 /**
305  * qla2x00_start_scsi() - Send a SCSI command to the ISP
306  * @sp: command to send to the ISP
307  *
308  * Returns non-zero if a failure occurred, else zero.
309  */
310 int
311 qla2x00_start_scsi(srb_t *sp)
312 {
313         int             ret, nseg;
314         unsigned long   flags;
315         scsi_qla_host_t *vha;
316         struct scsi_cmnd *cmd;
317         uint32_t        *clr_ptr;
318         uint32_t        index;
319         uint32_t        handle;
320         cmd_entry_t     *cmd_pkt;
321         uint16_t        cnt;
322         uint16_t        req_cnt;
323         uint16_t        tot_dsds;
324         struct device_reg_2xxx __iomem *reg;
325         struct qla_hw_data *ha;
326         struct req_que *req;
327         struct rsp_que *rsp;
328         char            tag[2];
329
330         /* Setup device pointers. */
331         ret = 0;
332         vha = sp->fcport->vha;
333         ha = vha->hw;
334         reg = &ha->iobase->isp;
335         cmd = GET_CMD_SP(sp);
336         req = ha->req_q_map[0];
337         rsp = ha->rsp_q_map[0];
338         /* So we know we haven't pci_map'ed anything yet */
339         tot_dsds = 0;
340
341         /* Send marker if required */
342         if (vha->marker_needed != 0) {
343                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344                     QLA_SUCCESS) {
345                         return (QLA_FUNCTION_FAILED);
346                 }
347                 vha->marker_needed = 0;
348         }
349
350         /* Acquire ring specific lock */
351         spin_lock_irqsave(&ha->hardware_lock, flags);
352
353         /* Check for room in outstanding command list. */
354         handle = req->current_outstanding_cmd;
355         for (index = 1; index < req->num_outstanding_cmds; index++) {
356                 handle++;
357                 if (handle == req->num_outstanding_cmds)
358                         handle = 1;
359                 if (!req->outstanding_cmds[handle])
360                         break;
361         }
362         if (index == req->num_outstanding_cmds)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         if (scsi_populate_tag_msg(cmd, tag)) {
410                 switch (tag[0]) {
411                 case HEAD_OF_QUEUE_TAG:
412                         cmd_pkt->control_flags =
413                             __constant_cpu_to_le16(CF_HEAD_TAG);
414                         break;
415                 case ORDERED_QUEUE_TAG:
416                         cmd_pkt->control_flags =
417                             __constant_cpu_to_le16(CF_ORDERED_TAG);
418                         break;
419                 default:
420                         cmd_pkt->control_flags =
421                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
422                         break;
423                 }
424         } else {
425                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426         }
427
428         /* Load SCSI command packet. */
429         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432         /* Build IOCB segments */
433         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435         /* Set total data segment count. */
436         cmd_pkt->entry_count = (uint8_t)req_cnt;
437         wmb();
438
439         /* Adjust ring index. */
440         req->ring_index++;
441         if (req->ring_index == req->length) {
442                 req->ring_index = 0;
443                 req->ring_ptr = req->ring;
444         } else
445                 req->ring_ptr++;
446
447         sp->flags |= SRB_DMA_VALID;
448
449         /* Set chip new ring index. */
450         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
452
453         /* Manage unprocessed RIO/ZIO commands in response queue. */
454         if (vha->flags.process_response_queue &&
455             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456                 qla2x00_process_response_queue(rsp);
457
458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
459         return (QLA_SUCCESS);
460
461 queuing_error:
462         if (tot_dsds)
463                 scsi_dma_unmap(cmd);
464
465         spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467         return (QLA_FUNCTION_FAILED);
468 }
469
470 /**
471  * qla2x00_start_iocbs() - Execute the IOCB command
472  */
473 void
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475 {
476         struct qla_hw_data *ha = vha->hw;
477         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478
479         if (IS_P3P_TYPE(ha)) {
480                 qla82xx_start_iocbs(vha);
481         } else {
482                 /* Adjust ring index. */
483                 req->ring_index++;
484                 if (req->ring_index == req->length) {
485                         req->ring_index = 0;
486                         req->ring_ptr = req->ring;
487                 } else
488                         req->ring_ptr++;
489
490                 /* Set chip new ring index. */
491                 if (ha->mqenable || IS_QLA83XX(ha)) {
492                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
493                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494                 } else if (IS_QLAFX00(ha)) {
495                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498                 } else if (IS_FWI2_CAPABLE(ha)) {
499                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501                 } else {
502                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503                                 req->ring_index);
504                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505                 }
506         }
507 }
508
509 /**
510  * qla2x00_marker() - Send a marker IOCB to the firmware.
511  * @ha: HA context
512  * @loop_id: loop ID
513  * @lun: LUN
514  * @type: marker modifier
515  *
516  * Can be called from both normal and interrupt context.
517  *
518  * Returns non-zero if a failure occurred, else zero.
519  */
520 static int
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522                         struct rsp_que *rsp, uint16_t loop_id,
523                         uint16_t lun, uint8_t type)
524 {
525         mrk_entry_t *mrk;
526         struct mrk_entry_24xx *mrk24 = NULL;
527         struct mrk_entry_fx00 *mrkfx = NULL;
528
529         struct qla_hw_data *ha = vha->hw;
530         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
531
532         req = ha->req_q_map[0];
533         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
534         if (mrk == NULL) {
535                 ql_log(ql_log_warn, base_vha, 0x3026,
536                     "Failed to allocate Marker IOCB.\n");
537
538                 return (QLA_FUNCTION_FAILED);
539         }
540
541         mrk->entry_type = MARKER_TYPE;
542         mrk->modifier = type;
543         if (type != MK_SYNC_ALL) {
544                 if (IS_QLAFX00(ha)) {
545                         mrkfx = (struct mrk_entry_fx00 *) mrk;
546                         mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
547                         mrkfx->handle_hi = 0;
548                         mrkfx->tgt_id = cpu_to_le16(loop_id);
549                         mrkfx->lun[1] = LSB(lun);
550                         mrkfx->lun[2] = MSB(lun);
551                         host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
552                 } else if (IS_FWI2_CAPABLE(ha)) {
553                         mrk24 = (struct mrk_entry_24xx *) mrk;
554                         mrk24->nport_handle = cpu_to_le16(loop_id);
555                         mrk24->lun[1] = LSB(lun);
556                         mrk24->lun[2] = MSB(lun);
557                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
558                         mrk24->vp_index = vha->vp_idx;
559                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
560                 } else {
561                         SET_TARGET_ID(ha, mrk->target, loop_id);
562                         mrk->lun = cpu_to_le16(lun);
563                 }
564         }
565         wmb();
566
567         qla2x00_start_iocbs(vha, req);
568
569         return (QLA_SUCCESS);
570 }
571
572 int
573 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
574                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
575                 uint8_t type)
576 {
577         int ret;
578         unsigned long flags = 0;
579
580         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
581         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
582         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
583
584         return (ret);
585 }
586
587 /*
588  * qla2x00_issue_marker
589  *
590  * Issue marker
591  * Caller CAN have hardware lock held as specified by ha_locked parameter.
592  * Might release it, then reaquire.
593  */
594 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
595 {
596         if (ha_locked) {
597                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
598                                         MK_SYNC_ALL) != QLA_SUCCESS)
599                         return QLA_FUNCTION_FAILED;
600         } else {
601                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
602                                         MK_SYNC_ALL) != QLA_SUCCESS)
603                         return QLA_FUNCTION_FAILED;
604         }
605         vha->marker_needed = 0;
606
607         return QLA_SUCCESS;
608 }
609
610 static inline int
611 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
612         uint16_t tot_dsds)
613 {
614         uint32_t *cur_dsd = NULL;
615         scsi_qla_host_t *vha;
616         struct qla_hw_data *ha;
617         struct scsi_cmnd *cmd;
618         struct  scatterlist *cur_seg;
619         uint32_t *dsd_seg;
620         void *next_dsd;
621         uint8_t avail_dsds;
622         uint8_t first_iocb = 1;
623         uint32_t dsd_list_len;
624         struct dsd_dma *dsd_ptr;
625         struct ct6_dsd *ctx;
626
627         cmd = GET_CMD_SP(sp);
628
629         /* Update entry type to indicate Command Type 3 IOCB */
630         *((uint32_t *)(&cmd_pkt->entry_type)) =
631                 __constant_cpu_to_le32(COMMAND_TYPE_6);
632
633         /* No data transfer */
634         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
635                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
636                 return 0;
637         }
638
639         vha = sp->fcport->vha;
640         ha = vha->hw;
641
642         /* Set transfer direction */
643         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
644                 cmd_pkt->control_flags =
645                     __constant_cpu_to_le16(CF_WRITE_DATA);
646                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
647                 vha->qla_stats.output_requests++;
648         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
649                 cmd_pkt->control_flags =
650                     __constant_cpu_to_le16(CF_READ_DATA);
651                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
652                 vha->qla_stats.input_requests++;
653         }
654
655         cur_seg = scsi_sglist(cmd);
656         ctx = GET_CMD_CTX_SP(sp);
657
658         while (tot_dsds) {
659                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
660                     QLA_DSDS_PER_IOCB : tot_dsds;
661                 tot_dsds -= avail_dsds;
662                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
663
664                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
665                     struct dsd_dma, list);
666                 next_dsd = dsd_ptr->dsd_addr;
667                 list_del(&dsd_ptr->list);
668                 ha->gbl_dsd_avail--;
669                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
670                 ctx->dsd_use_cnt++;
671                 ha->gbl_dsd_inuse++;
672
673                 if (first_iocb) {
674                         first_iocb = 0;
675                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
676                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
677                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
678                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
679                 } else {
680                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
681                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
682                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
683                 }
684                 cur_dsd = (uint32_t *)next_dsd;
685                 while (avail_dsds) {
686                         dma_addr_t      sle_dma;
687
688                         sle_dma = sg_dma_address(cur_seg);
689                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
690                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
691                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
692                         cur_seg = sg_next(cur_seg);
693                         avail_dsds--;
694                 }
695         }
696
697         /* Null termination */
698         *cur_dsd++ =  0;
699         *cur_dsd++ = 0;
700         *cur_dsd++ = 0;
701         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
702         return 0;
703 }
704
705 /*
706  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
707  * for Command Type 6.
708  *
709  * @dsds: number of data segment decriptors needed
710  *
711  * Returns the number of dsd list needed to store @dsds.
712  */
713 inline uint16_t
714 qla24xx_calc_dsd_lists(uint16_t dsds)
715 {
716         uint16_t dsd_lists = 0;
717
718         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
719         if (dsds % QLA_DSDS_PER_IOCB)
720                 dsd_lists++;
721         return dsd_lists;
722 }
723
724
725 /**
726  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
727  * IOCB types.
728  *
729  * @sp: SRB command to process
730  * @cmd_pkt: Command type 3 IOCB
731  * @tot_dsds: Total number of segments to transfer
732  */
733 inline void
734 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
735     uint16_t tot_dsds)
736 {
737         uint16_t        avail_dsds;
738         uint32_t        *cur_dsd;
739         scsi_qla_host_t *vha;
740         struct scsi_cmnd *cmd;
741         struct scatterlist *sg;
742         int i;
743         struct req_que *req;
744
745         cmd = GET_CMD_SP(sp);
746
747         /* Update entry type to indicate Command Type 3 IOCB */
748         *((uint32_t *)(&cmd_pkt->entry_type)) =
749             __constant_cpu_to_le32(COMMAND_TYPE_7);
750
751         /* No data transfer */
752         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
753                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
754                 return;
755         }
756
757         vha = sp->fcport->vha;
758         req = vha->req;
759
760         /* Set transfer direction */
761         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
762                 cmd_pkt->task_mgmt_flags =
763                     __constant_cpu_to_le16(TMF_WRITE_DATA);
764                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
765                 vha->qla_stats.output_requests++;
766         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
767                 cmd_pkt->task_mgmt_flags =
768                     __constant_cpu_to_le16(TMF_READ_DATA);
769                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
770                 vha->qla_stats.input_requests++;
771         }
772
773         /* One DSD is available in the Command Type 3 IOCB */
774         avail_dsds = 1;
775         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
776
777         /* Load data segments */
778
779         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
780                 dma_addr_t      sle_dma;
781                 cont_a64_entry_t *cont_pkt;
782
783                 /* Allocate additional continuation packets? */
784                 if (avail_dsds == 0) {
785                         /*
786                          * Five DSDs are available in the Continuation
787                          * Type 1 IOCB.
788                          */
789                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
790                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
791                         avail_dsds = 5;
792                 }
793
794                 sle_dma = sg_dma_address(sg);
795                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
796                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
797                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
798                 avail_dsds--;
799         }
800 }
801
802 struct fw_dif_context {
803         uint32_t ref_tag;
804         uint16_t app_tag;
805         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
806         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
807 };
808
809 /*
810  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
811  *
812  */
813 static inline void
814 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
815     unsigned int protcnt)
816 {
817         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
818
819         switch (scsi_get_prot_type(cmd)) {
820         case SCSI_PROT_DIF_TYPE0:
821                 /*
822                  * No check for ql2xenablehba_err_chk, as it would be an
823                  * I/O error if hba tag generation is not done.
824                  */
825                 pkt->ref_tag = cpu_to_le32((uint32_t)
826                     (0xffffffff & scsi_get_lba(cmd)));
827
828                 if (!qla2x00_hba_err_chk_enabled(sp))
829                         break;
830
831                 pkt->ref_tag_mask[0] = 0xff;
832                 pkt->ref_tag_mask[1] = 0xff;
833                 pkt->ref_tag_mask[2] = 0xff;
834                 pkt->ref_tag_mask[3] = 0xff;
835                 break;
836
837         /*
838          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
839          * match LBA in CDB + N
840          */
841         case SCSI_PROT_DIF_TYPE2:
842                 pkt->app_tag = __constant_cpu_to_le16(0);
843                 pkt->app_tag_mask[0] = 0x0;
844                 pkt->app_tag_mask[1] = 0x0;
845
846                 pkt->ref_tag = cpu_to_le32((uint32_t)
847                     (0xffffffff & scsi_get_lba(cmd)));
848
849                 if (!qla2x00_hba_err_chk_enabled(sp))
850                         break;
851
852                 /* enable ALL bytes of the ref tag */
853                 pkt->ref_tag_mask[0] = 0xff;
854                 pkt->ref_tag_mask[1] = 0xff;
855                 pkt->ref_tag_mask[2] = 0xff;
856                 pkt->ref_tag_mask[3] = 0xff;
857                 break;
858
859         /* For Type 3 protection: 16 bit GUARD only */
860         case SCSI_PROT_DIF_TYPE3:
861                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
862                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
863                                                                 0x00;
864                 break;
865
866         /*
867          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
868          * 16 bit app tag.
869          */
870         case SCSI_PROT_DIF_TYPE1:
871                 pkt->ref_tag = cpu_to_le32((uint32_t)
872                     (0xffffffff & scsi_get_lba(cmd)));
873                 pkt->app_tag = __constant_cpu_to_le16(0);
874                 pkt->app_tag_mask[0] = 0x0;
875                 pkt->app_tag_mask[1] = 0x0;
876
877                 if (!qla2x00_hba_err_chk_enabled(sp))
878                         break;
879
880                 /* enable ALL bytes of the ref tag */
881                 pkt->ref_tag_mask[0] = 0xff;
882                 pkt->ref_tag_mask[1] = 0xff;
883                 pkt->ref_tag_mask[2] = 0xff;
884                 pkt->ref_tag_mask[3] = 0xff;
885                 break;
886         }
887 }
888
889 struct qla2_sgx {
890         dma_addr_t              dma_addr;       /* OUT */
891         uint32_t                dma_len;        /* OUT */
892
893         uint32_t                tot_bytes;      /* IN */
894         struct scatterlist      *cur_sg;        /* IN */
895
896         /* for book keeping, bzero on initial invocation */
897         uint32_t                bytes_consumed;
898         uint32_t                num_bytes;
899         uint32_t                tot_partial;
900
901         /* for debugging */
902         uint32_t                num_sg;
903         srb_t                   *sp;
904 };
905
906 static int
907 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
908         uint32_t *partial)
909 {
910         struct scatterlist *sg;
911         uint32_t cumulative_partial, sg_len;
912         dma_addr_t sg_dma_addr;
913
914         if (sgx->num_bytes == sgx->tot_bytes)
915                 return 0;
916
917         sg = sgx->cur_sg;
918         cumulative_partial = sgx->tot_partial;
919
920         sg_dma_addr = sg_dma_address(sg);
921         sg_len = sg_dma_len(sg);
922
923         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
924
925         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
926                 sgx->dma_len = (blk_sz - cumulative_partial);
927                 sgx->tot_partial = 0;
928                 sgx->num_bytes += blk_sz;
929                 *partial = 0;
930         } else {
931                 sgx->dma_len = sg_len - sgx->bytes_consumed;
932                 sgx->tot_partial += sgx->dma_len;
933                 *partial = 1;
934         }
935
936         sgx->bytes_consumed += sgx->dma_len;
937
938         if (sg_len == sgx->bytes_consumed) {
939                 sg = sg_next(sg);
940                 sgx->num_sg++;
941                 sgx->cur_sg = sg;
942                 sgx->bytes_consumed = 0;
943         }
944
945         return 1;
946 }
947
948 static int
949 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
950         uint32_t *dsd, uint16_t tot_dsds)
951 {
952         void *next_dsd;
953         uint8_t avail_dsds = 0;
954         uint32_t dsd_list_len;
955         struct dsd_dma *dsd_ptr;
956         struct scatterlist *sg_prot;
957         uint32_t *cur_dsd = dsd;
958         uint16_t        used_dsds = tot_dsds;
959
960         uint32_t        prot_int;
961         uint32_t        partial;
962         struct qla2_sgx sgx;
963         dma_addr_t      sle_dma;
964         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
965         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
966
967         prot_int = cmd->device->sector_size;
968
969         memset(&sgx, 0, sizeof(struct qla2_sgx));
970         sgx.tot_bytes = scsi_bufflen(cmd);
971         sgx.cur_sg = scsi_sglist(cmd);
972         sgx.sp = sp;
973
974         sg_prot = scsi_prot_sglist(cmd);
975
976         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
977
978                 sle_dma = sgx.dma_addr;
979                 sle_dma_len = sgx.dma_len;
980 alloc_and_fill:
981                 /* Allocate additional continuation packets? */
982                 if (avail_dsds == 0) {
983                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
984                                         QLA_DSDS_PER_IOCB : used_dsds;
985                         dsd_list_len = (avail_dsds + 1) * 12;
986                         used_dsds -= avail_dsds;
987
988                         /* allocate tracking DS */
989                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
990                         if (!dsd_ptr)
991                                 return 1;
992
993                         /* allocate new list */
994                         dsd_ptr->dsd_addr = next_dsd =
995                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
996                                 &dsd_ptr->dsd_list_dma);
997
998                         if (!next_dsd) {
999                                 /*
1000                                  * Need to cleanup only this dsd_ptr, rest
1001                                  * will be done by sp_free_dma()
1002                                  */
1003                                 kfree(dsd_ptr);
1004                                 return 1;
1005                         }
1006
1007                         list_add_tail(&dsd_ptr->list,
1008                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1009
1010                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1011
1012                         /* add new list to cmd iocb or last list */
1013                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1014                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1015                         *cur_dsd++ = dsd_list_len;
1016                         cur_dsd = (uint32_t *)next_dsd;
1017                 }
1018                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1019                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1020                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1021                 avail_dsds--;
1022
1023                 if (partial == 0) {
1024                         /* Got a full protection interval */
1025                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1026                         sle_dma_len = 8;
1027
1028                         tot_prot_dma_len += sle_dma_len;
1029                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1030                                 tot_prot_dma_len = 0;
1031                                 sg_prot = sg_next(sg_prot);
1032                         }
1033
1034                         partial = 1; /* So as to not re-enter this block */
1035                         goto alloc_and_fill;
1036                 }
1037         }
1038         /* Null termination */
1039         *cur_dsd++ = 0;
1040         *cur_dsd++ = 0;
1041         *cur_dsd++ = 0;
1042         return 0;
1043 }
1044
1045 static int
1046 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1047         uint16_t tot_dsds)
1048 {
1049         void *next_dsd;
1050         uint8_t avail_dsds = 0;
1051         uint32_t dsd_list_len;
1052         struct dsd_dma *dsd_ptr;
1053         struct scatterlist *sg;
1054         uint32_t *cur_dsd = dsd;
1055         int     i;
1056         uint16_t        used_dsds = tot_dsds;
1057         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1058
1059         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1060                 dma_addr_t      sle_dma;
1061
1062                 /* Allocate additional continuation packets? */
1063                 if (avail_dsds == 0) {
1064                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1065                                         QLA_DSDS_PER_IOCB : used_dsds;
1066                         dsd_list_len = (avail_dsds + 1) * 12;
1067                         used_dsds -= avail_dsds;
1068
1069                         /* allocate tracking DS */
1070                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1071                         if (!dsd_ptr)
1072                                 return 1;
1073
1074                         /* allocate new list */
1075                         dsd_ptr->dsd_addr = next_dsd =
1076                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1077                                 &dsd_ptr->dsd_list_dma);
1078
1079                         if (!next_dsd) {
1080                                 /*
1081                                  * Need to cleanup only this dsd_ptr, rest
1082                                  * will be done by sp_free_dma()
1083                                  */
1084                                 kfree(dsd_ptr);
1085                                 return 1;
1086                         }
1087
1088                         list_add_tail(&dsd_ptr->list,
1089                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1090
1091                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1092
1093                         /* add new list to cmd iocb or last list */
1094                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1095                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1096                         *cur_dsd++ = dsd_list_len;
1097                         cur_dsd = (uint32_t *)next_dsd;
1098                 }
1099                 sle_dma = sg_dma_address(sg);
1100
1101                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1102                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1103                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1104                 avail_dsds--;
1105
1106         }
1107         /* Null termination */
1108         *cur_dsd++ = 0;
1109         *cur_dsd++ = 0;
1110         *cur_dsd++ = 0;
1111         return 0;
1112 }
1113
1114 static int
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116                                                         uint32_t *dsd,
1117         uint16_t tot_dsds)
1118 {
1119         void *next_dsd;
1120         uint8_t avail_dsds = 0;
1121         uint32_t dsd_list_len;
1122         struct dsd_dma *dsd_ptr;
1123         struct scatterlist *sg;
1124         int     i;
1125         struct scsi_cmnd *cmd;
1126         uint32_t *cur_dsd = dsd;
1127         uint16_t        used_dsds = tot_dsds;
1128
1129         cmd = GET_CMD_SP(sp);
1130         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1131                 dma_addr_t      sle_dma;
1132
1133                 /* Allocate additional continuation packets? */
1134                 if (avail_dsds == 0) {
1135                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1136                                                 QLA_DSDS_PER_IOCB : used_dsds;
1137                         dsd_list_len = (avail_dsds + 1) * 12;
1138                         used_dsds -= avail_dsds;
1139
1140                         /* allocate tracking DS */
1141                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1142                         if (!dsd_ptr)
1143                                 return 1;
1144
1145                         /* allocate new list */
1146                         dsd_ptr->dsd_addr = next_dsd =
1147                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1148                                 &dsd_ptr->dsd_list_dma);
1149
1150                         if (!next_dsd) {
1151                                 /*
1152                                  * Need to cleanup only this dsd_ptr, rest
1153                                  * will be done by sp_free_dma()
1154                                  */
1155                                 kfree(dsd_ptr);
1156                                 return 1;
1157                         }
1158
1159                         list_add_tail(&dsd_ptr->list,
1160                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1161
1162                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1163
1164                         /* add new list to cmd iocb or last list */
1165                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167                         *cur_dsd++ = dsd_list_len;
1168                         cur_dsd = (uint32_t *)next_dsd;
1169                 }
1170                 sle_dma = sg_dma_address(sg);
1171
1172                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1175
1176                 avail_dsds--;
1177         }
1178         /* Null termination */
1179         *cur_dsd++ = 0;
1180         *cur_dsd++ = 0;
1181         *cur_dsd++ = 0;
1182         return 0;
1183 }
1184
1185 /**
1186  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187  *                                                      Type 6 IOCB types.
1188  *
1189  * @sp: SRB command to process
1190  * @cmd_pkt: Command type 3 IOCB
1191  * @tot_dsds: Total number of segments to transfer
1192  */
1193 static inline int
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1196 {
1197         uint32_t                *cur_dsd, *fcp_dl;
1198         scsi_qla_host_t         *vha;
1199         struct scsi_cmnd        *cmd;
1200         int                     sgc;
1201         uint32_t                total_bytes = 0;
1202         uint32_t                data_bytes;
1203         uint32_t                dif_bytes;
1204         uint8_t                 bundling = 1;
1205         uint16_t                blk_size;
1206         uint8_t                 *clr_ptr;
1207         struct crc_context      *crc_ctx_pkt = NULL;
1208         struct qla_hw_data      *ha;
1209         uint8_t                 additional_fcpcdb_len;
1210         uint16_t                fcp_cmnd_len;
1211         struct fcp_cmnd         *fcp_cmnd;
1212         dma_addr_t              crc_ctx_dma;
1213         char                    tag[2];
1214
1215         cmd = GET_CMD_SP(sp);
1216
1217         sgc = 0;
1218         /* Update entry type to indicate Command Type CRC_2 IOCB */
1219         *((uint32_t *)(&cmd_pkt->entry_type)) =
1220             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1221
1222         vha = sp->fcport->vha;
1223         ha = vha->hw;
1224
1225         /* No data transfer */
1226         data_bytes = scsi_bufflen(cmd);
1227         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1228                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1229                 return QLA_SUCCESS;
1230         }
1231
1232         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1233
1234         /* Set transfer direction */
1235         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1236                 cmd_pkt->control_flags =
1237                     __constant_cpu_to_le16(CF_WRITE_DATA);
1238         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1239                 cmd_pkt->control_flags =
1240                     __constant_cpu_to_le16(CF_READ_DATA);
1241         }
1242
1243         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1244             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1245             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1246             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1247                 bundling = 0;
1248
1249         /* Allocate CRC context from global pool */
1250         crc_ctx_pkt = sp->u.scmd.ctx =
1251             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1252
1253         if (!crc_ctx_pkt)
1254                 goto crc_queuing_error;
1255
1256         /* Zero out CTX area. */
1257         clr_ptr = (uint8_t *)crc_ctx_pkt;
1258         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1259
1260         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1261
1262         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1263
1264         /* Set handle */
1265         crc_ctx_pkt->handle = cmd_pkt->handle;
1266
1267         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1268
1269         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1270             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1271
1272         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1273         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1274         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1275
1276         /* Determine SCSI command length -- align to 4 byte boundary */
1277         if (cmd->cmd_len > 16) {
1278                 additional_fcpcdb_len = cmd->cmd_len - 16;
1279                 if ((cmd->cmd_len % 4) != 0) {
1280                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1281                         goto crc_queuing_error;
1282                 }
1283                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1284         } else {
1285                 additional_fcpcdb_len = 0;
1286                 fcp_cmnd_len = 12 + 16 + 4;
1287         }
1288
1289         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1290
1291         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1292         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1293                 fcp_cmnd->additional_cdb_len |= 1;
1294         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1295                 fcp_cmnd->additional_cdb_len |= 2;
1296
1297         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1298         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1299         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1300         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1301             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1302         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1303             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1304         fcp_cmnd->task_management = 0;
1305
1306         /*
1307          * Update tagged queuing modifier if using command tag queuing
1308          */
1309         if (scsi_populate_tag_msg(cmd, tag)) {
1310                 switch (tag[0]) {
1311                 case HEAD_OF_QUEUE_TAG:
1312                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1313                     break;
1314                 case ORDERED_QUEUE_TAG:
1315                     fcp_cmnd->task_attribute = TSK_ORDERED;
1316                     break;
1317                 default:
1318                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1319                     break;
1320                 }
1321         } else {
1322                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1323         }
1324
1325         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1326
1327         /* Compute dif len and adjust data len to incude protection */
1328         dif_bytes = 0;
1329         blk_size = cmd->device->sector_size;
1330         dif_bytes = (data_bytes / blk_size) * 8;
1331
1332         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1333         case SCSI_PROT_READ_INSERT:
1334         case SCSI_PROT_WRITE_STRIP:
1335             total_bytes = data_bytes;
1336             data_bytes += dif_bytes;
1337             break;
1338
1339         case SCSI_PROT_READ_STRIP:
1340         case SCSI_PROT_WRITE_INSERT:
1341         case SCSI_PROT_READ_PASS:
1342         case SCSI_PROT_WRITE_PASS:
1343             total_bytes = data_bytes + dif_bytes;
1344             break;
1345         default:
1346             BUG();
1347         }
1348
1349         if (!qla2x00_hba_err_chk_enabled(sp))
1350                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1351         /* HBA error checking enabled */
1352         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1353                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1354                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1355                         SCSI_PROT_DIF_TYPE2))
1356                         fw_prot_opts |= BIT_10;
1357                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1358                     SCSI_PROT_DIF_TYPE3)
1359                         fw_prot_opts |= BIT_11;
1360         }
1361
1362         if (!bundling) {
1363                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1364         } else {
1365                 /*
1366                  * Configure Bundling if we need to fetch interlaving
1367                  * protection PCI accesses
1368                  */
1369                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1370                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1371                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1372                                                         tot_prot_dsds);
1373                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1374         }
1375
1376         /* Finish the common fields of CRC pkt */
1377         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1378         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1379         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1380         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1381         /* Fibre channel byte count */
1382         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1383         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1384             additional_fcpcdb_len);
1385         *fcp_dl = htonl(total_bytes);
1386
1387         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1388                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1389                 return QLA_SUCCESS;
1390         }
1391         /* Walks data segments */
1392
1393         cmd_pkt->control_flags |=
1394             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1395
1396         if (!bundling && tot_prot_dsds) {
1397                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1398                     cur_dsd, tot_dsds))
1399                         goto crc_queuing_error;
1400         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1401             (tot_dsds - tot_prot_dsds)))
1402                 goto crc_queuing_error;
1403
1404         if (bundling && tot_prot_dsds) {
1405                 /* Walks dif segments */
1406                 cmd_pkt->control_flags |=
1407                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1408                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1409                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1410                     tot_prot_dsds))
1411                         goto crc_queuing_error;
1412         }
1413         return QLA_SUCCESS;
1414
1415 crc_queuing_error:
1416         /* Cleanup will be performed by the caller */
1417
1418         return QLA_FUNCTION_FAILED;
1419 }
1420
1421 /**
1422  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1423  * @sp: command to send to the ISP
1424  *
1425  * Returns non-zero if a failure occurred, else zero.
1426  */
1427 int
1428 qla24xx_start_scsi(srb_t *sp)
1429 {
1430         int             ret, nseg;
1431         unsigned long   flags;
1432         uint32_t        *clr_ptr;
1433         uint32_t        index;
1434         uint32_t        handle;
1435         struct cmd_type_7 *cmd_pkt;
1436         uint16_t        cnt;
1437         uint16_t        req_cnt;
1438         uint16_t        tot_dsds;
1439         struct req_que *req = NULL;
1440         struct rsp_que *rsp = NULL;
1441         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1442         struct scsi_qla_host *vha = sp->fcport->vha;
1443         struct qla_hw_data *ha = vha->hw;
1444         char            tag[2];
1445
1446         /* Setup device pointers. */
1447         ret = 0;
1448
1449         qla25xx_set_que(sp, &rsp);
1450         req = vha->req;
1451
1452         /* So we know we haven't pci_map'ed anything yet */
1453         tot_dsds = 0;
1454
1455         /* Send marker if required */
1456         if (vha->marker_needed != 0) {
1457                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1458                     QLA_SUCCESS)
1459                         return QLA_FUNCTION_FAILED;
1460                 vha->marker_needed = 0;
1461         }
1462
1463         /* Acquire ring specific lock */
1464         spin_lock_irqsave(&ha->hardware_lock, flags);
1465
1466         /* Check for room in outstanding command list. */
1467         handle = req->current_outstanding_cmd;
1468         for (index = 1; index < req->num_outstanding_cmds; index++) {
1469                 handle++;
1470                 if (handle == req->num_outstanding_cmds)
1471                         handle = 1;
1472                 if (!req->outstanding_cmds[handle])
1473                         break;
1474         }
1475         if (index == req->num_outstanding_cmds)
1476                 goto queuing_error;
1477
1478         /* Map the sg table so we have an accurate count of sg entries needed */
1479         if (scsi_sg_count(cmd)) {
1480                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1481                     scsi_sg_count(cmd), cmd->sc_data_direction);
1482                 if (unlikely(!nseg))
1483                         goto queuing_error;
1484         } else
1485                 nseg = 0;
1486
1487         tot_dsds = nseg;
1488         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1489         if (req->cnt < (req_cnt + 2)) {
1490                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1491
1492                 if (req->ring_index < cnt)
1493                         req->cnt = cnt - req->ring_index;
1494                 else
1495                         req->cnt = req->length -
1496                                 (req->ring_index - cnt);
1497                 if (req->cnt < (req_cnt + 2))
1498                         goto queuing_error;
1499         }
1500
1501         /* Build command packet. */
1502         req->current_outstanding_cmd = handle;
1503         req->outstanding_cmds[handle] = sp;
1504         sp->handle = handle;
1505         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1506         req->cnt -= req_cnt;
1507
1508         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1509         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1510
1511         /* Zero out remaining portion of packet. */
1512         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1513         clr_ptr = (uint32_t *)cmd_pkt + 2;
1514         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1515         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1516
1517         /* Set NPORT-ID and LUN number*/
1518         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1519         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1520         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1521         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1522         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1523
1524         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1525         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1526
1527         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1528         if (scsi_populate_tag_msg(cmd, tag)) {
1529                 switch (tag[0]) {
1530                 case HEAD_OF_QUEUE_TAG:
1531                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1532                         break;
1533                 case ORDERED_QUEUE_TAG:
1534                         cmd_pkt->task = TSK_ORDERED;
1535                         break;
1536                 default:
1537                     cmd_pkt->task = TSK_SIMPLE;
1538                     break;
1539                 }
1540         } else {
1541                 cmd_pkt->task = TSK_SIMPLE;
1542         }
1543
1544         /* Load SCSI command packet. */
1545         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1546         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1547
1548         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1549
1550         /* Build IOCB segments */
1551         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1552
1553         /* Set total data segment count. */
1554         cmd_pkt->entry_count = (uint8_t)req_cnt;
1555         /* Specify response queue number where completion should happen */
1556         cmd_pkt->entry_status = (uint8_t) rsp->id;
1557         wmb();
1558         /* Adjust ring index. */
1559         req->ring_index++;
1560         if (req->ring_index == req->length) {
1561                 req->ring_index = 0;
1562                 req->ring_ptr = req->ring;
1563         } else
1564                 req->ring_ptr++;
1565
1566         sp->flags |= SRB_DMA_VALID;
1567
1568         /* Set chip new ring index. */
1569         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1570         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1571
1572         /* Manage unprocessed RIO/ZIO commands in response queue. */
1573         if (vha->flags.process_response_queue &&
1574                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1575                 qla24xx_process_response_queue(vha, rsp);
1576
1577         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1578         return QLA_SUCCESS;
1579
1580 queuing_error:
1581         if (tot_dsds)
1582                 scsi_dma_unmap(cmd);
1583
1584         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1585
1586         return QLA_FUNCTION_FAILED;
1587 }
1588
1589 /**
1590  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1591  * @sp: command to send to the ISP
1592  *
1593  * Returns non-zero if a failure occurred, else zero.
1594  */
1595 int
1596 qla24xx_dif_start_scsi(srb_t *sp)
1597 {
1598         int                     nseg;
1599         unsigned long           flags;
1600         uint32_t                *clr_ptr;
1601         uint32_t                index;
1602         uint32_t                handle;
1603         uint16_t                cnt;
1604         uint16_t                req_cnt = 0;
1605         uint16_t                tot_dsds;
1606         uint16_t                tot_prot_dsds;
1607         uint16_t                fw_prot_opts = 0;
1608         struct req_que          *req = NULL;
1609         struct rsp_que          *rsp = NULL;
1610         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1611         struct scsi_qla_host    *vha = sp->fcport->vha;
1612         struct qla_hw_data      *ha = vha->hw;
1613         struct cmd_type_crc_2   *cmd_pkt;
1614         uint32_t                status = 0;
1615
1616 #define QDSS_GOT_Q_SPACE        BIT_0
1617
1618         /* Only process protection or >16 cdb in this routine */
1619         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1620                 if (cmd->cmd_len <= 16)
1621                         return qla24xx_start_scsi(sp);
1622         }
1623
1624         /* Setup device pointers. */
1625
1626         qla25xx_set_que(sp, &rsp);
1627         req = vha->req;
1628
1629         /* So we know we haven't pci_map'ed anything yet */
1630         tot_dsds = 0;
1631
1632         /* Send marker if required */
1633         if (vha->marker_needed != 0) {
1634                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1635                     QLA_SUCCESS)
1636                         return QLA_FUNCTION_FAILED;
1637                 vha->marker_needed = 0;
1638         }
1639
1640         /* Acquire ring specific lock */
1641         spin_lock_irqsave(&ha->hardware_lock, flags);
1642
1643         /* Check for room in outstanding command list. */
1644         handle = req->current_outstanding_cmd;
1645         for (index = 1; index < req->num_outstanding_cmds; index++) {
1646                 handle++;
1647                 if (handle == req->num_outstanding_cmds)
1648                         handle = 1;
1649                 if (!req->outstanding_cmds[handle])
1650                         break;
1651         }
1652
1653         if (index == req->num_outstanding_cmds)
1654                 goto queuing_error;
1655
1656         /* Compute number of required data segments */
1657         /* Map the sg table so we have an accurate count of sg entries needed */
1658         if (scsi_sg_count(cmd)) {
1659                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1660                     scsi_sg_count(cmd), cmd->sc_data_direction);
1661                 if (unlikely(!nseg))
1662                         goto queuing_error;
1663                 else
1664                         sp->flags |= SRB_DMA_VALID;
1665
1666                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1667                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1668                         struct qla2_sgx sgx;
1669                         uint32_t        partial;
1670
1671                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1672                         sgx.tot_bytes = scsi_bufflen(cmd);
1673                         sgx.cur_sg = scsi_sglist(cmd);
1674                         sgx.sp = sp;
1675
1676                         nseg = 0;
1677                         while (qla24xx_get_one_block_sg(
1678                             cmd->device->sector_size, &sgx, &partial))
1679                                 nseg++;
1680                 }
1681         } else
1682                 nseg = 0;
1683
1684         /* number of required data segments */
1685         tot_dsds = nseg;
1686
1687         /* Compute number of required protection segments */
1688         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1689                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1690                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1691                 if (unlikely(!nseg))
1692                         goto queuing_error;
1693                 else
1694                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1695
1696                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1697                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1698                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1699                 }
1700         } else {
1701                 nseg = 0;
1702         }
1703
1704         req_cnt = 1;
1705         /* Total Data and protection sg segment(s) */
1706         tot_prot_dsds = nseg;
1707         tot_dsds += nseg;
1708         if (req->cnt < (req_cnt + 2)) {
1709                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1710
1711                 if (req->ring_index < cnt)
1712                         req->cnt = cnt - req->ring_index;
1713                 else
1714                         req->cnt = req->length -
1715                                 (req->ring_index - cnt);
1716                 if (req->cnt < (req_cnt + 2))
1717                         goto queuing_error;
1718         }
1719
1720         status |= QDSS_GOT_Q_SPACE;
1721
1722         /* Build header part of command packet (excluding the OPCODE). */
1723         req->current_outstanding_cmd = handle;
1724         req->outstanding_cmds[handle] = sp;
1725         sp->handle = handle;
1726         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1727         req->cnt -= req_cnt;
1728
1729         /* Fill-in common area */
1730         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1731         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1732
1733         clr_ptr = (uint32_t *)cmd_pkt + 2;
1734         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1735
1736         /* Set NPORT-ID and LUN number*/
1737         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1738         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1739         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1740         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1741
1742         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1743         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1744
1745         /* Total Data and protection segment(s) */
1746         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1747
1748         /* Build IOCB segments and adjust for data protection segments */
1749         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1750             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1751                 QLA_SUCCESS)
1752                 goto queuing_error;
1753
1754         cmd_pkt->entry_count = (uint8_t)req_cnt;
1755         /* Specify response queue number where completion should happen */
1756         cmd_pkt->entry_status = (uint8_t) rsp->id;
1757         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1758         wmb();
1759
1760         /* Adjust ring index. */
1761         req->ring_index++;
1762         if (req->ring_index == req->length) {
1763                 req->ring_index = 0;
1764                 req->ring_ptr = req->ring;
1765         } else
1766                 req->ring_ptr++;
1767
1768         /* Set chip new ring index. */
1769         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1770         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1771
1772         /* Manage unprocessed RIO/ZIO commands in response queue. */
1773         if (vha->flags.process_response_queue &&
1774             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1775                 qla24xx_process_response_queue(vha, rsp);
1776
1777         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1778
1779         return QLA_SUCCESS;
1780
1781 queuing_error:
1782         if (status & QDSS_GOT_Q_SPACE) {
1783                 req->outstanding_cmds[handle] = NULL;
1784                 req->cnt += req_cnt;
1785         }
1786         /* Cleanup will be performed by the caller (queuecommand) */
1787
1788         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1789         return QLA_FUNCTION_FAILED;
1790 }
1791
1792
1793 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1794 {
1795         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1796         struct qla_hw_data *ha = sp->fcport->vha->hw;
1797         int affinity = cmd->request->cpu;
1798
1799         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1800                 affinity < ha->max_rsp_queues - 1)
1801                 *rsp = ha->rsp_q_map[affinity + 1];
1802          else
1803                 *rsp = ha->rsp_q_map[0];
1804 }
1805
1806 /* Generic Control-SRB manipulation functions. */
1807 void *
1808 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1809 {
1810         struct qla_hw_data *ha = vha->hw;
1811         struct req_que *req = ha->req_q_map[0];
1812         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1813         uint32_t index, handle;
1814         request_t *pkt;
1815         uint16_t cnt, req_cnt;
1816
1817         pkt = NULL;
1818         req_cnt = 1;
1819         handle = 0;
1820
1821         if (!sp)
1822                 goto skip_cmd_array;
1823
1824         /* Check for room in outstanding command list. */
1825         handle = req->current_outstanding_cmd;
1826         for (index = 1; req->num_outstanding_cmds; index++) {
1827                 handle++;
1828                 if (handle == req->num_outstanding_cmds)
1829                         handle = 1;
1830                 if (!req->outstanding_cmds[handle])
1831                         break;
1832         }
1833         if (index == req->num_outstanding_cmds) {
1834                 ql_log(ql_log_warn, vha, 0x700b,
1835                     "No room on outstanding cmd array.\n");
1836                 goto queuing_error;
1837         }
1838
1839         /* Prep command array. */
1840         req->current_outstanding_cmd = handle;
1841         req->outstanding_cmds[handle] = sp;
1842         sp->handle = handle;
1843
1844         /* Adjust entry-counts as needed. */
1845         if (sp->type != SRB_SCSI_CMD)
1846                 req_cnt = sp->iocbs;
1847
1848 skip_cmd_array:
1849         /* Check for room on request queue. */
1850         if (req->cnt < req_cnt) {
1851                 if (ha->mqenable || IS_QLA83XX(ha))
1852                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1853                 else if (IS_P3P_TYPE(ha))
1854                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1855                 else if (IS_FWI2_CAPABLE(ha))
1856                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1857                 else if (IS_QLAFX00(ha))
1858                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1859                 else
1860                         cnt = qla2x00_debounce_register(
1861                             ISP_REQ_Q_OUT(ha, &reg->isp));
1862
1863                 if  (req->ring_index < cnt)
1864                         req->cnt = cnt - req->ring_index;
1865                 else
1866                         req->cnt = req->length -
1867                             (req->ring_index - cnt);
1868         }
1869         if (req->cnt < req_cnt)
1870                 goto queuing_error;
1871
1872         /* Prep packet */
1873         req->cnt -= req_cnt;
1874         pkt = req->ring_ptr;
1875         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1876         if (IS_QLAFX00(ha)) {
1877                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1878                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1879         } else {
1880                 pkt->entry_count = req_cnt;
1881                 pkt->handle = handle;
1882         }
1883
1884 queuing_error:
1885         return pkt;
1886 }
1887
1888 static void
1889 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1890 {
1891         struct srb_iocb *lio = &sp->u.iocb_cmd;
1892
1893         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1894         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1895         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1896                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1897         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1898                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1899         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1900         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1901         logio->port_id[1] = sp->fcport->d_id.b.area;
1902         logio->port_id[2] = sp->fcport->d_id.b.domain;
1903         logio->vp_index = sp->fcport->vha->vp_idx;
1904 }
1905
1906 static void
1907 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1908 {
1909         struct qla_hw_data *ha = sp->fcport->vha->hw;
1910         struct srb_iocb *lio = &sp->u.iocb_cmd;
1911         uint16_t opts;
1912
1913         mbx->entry_type = MBX_IOCB_TYPE;
1914         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1915         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1916         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1917         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1918         if (HAS_EXTENDED_IDS(ha)) {
1919                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1920                 mbx->mb10 = cpu_to_le16(opts);
1921         } else {
1922                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1923         }
1924         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1925         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1926             sp->fcport->d_id.b.al_pa);
1927         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1928 }
1929
1930 static void
1931 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1932 {
1933         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1934         logio->control_flags =
1935             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1936         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1937         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1938         logio->port_id[1] = sp->fcport->d_id.b.area;
1939         logio->port_id[2] = sp->fcport->d_id.b.domain;
1940         logio->vp_index = sp->fcport->vha->vp_idx;
1941 }
1942
1943 static void
1944 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1945 {
1946         struct qla_hw_data *ha = sp->fcport->vha->hw;
1947
1948         mbx->entry_type = MBX_IOCB_TYPE;
1949         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1950         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1951         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1952             cpu_to_le16(sp->fcport->loop_id):
1953             cpu_to_le16(sp->fcport->loop_id << 8);
1954         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1955         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1956             sp->fcport->d_id.b.al_pa);
1957         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1958         /* Implicit: mbx->mbx10 = 0. */
1959 }
1960
1961 static void
1962 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1963 {
1964         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1965         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1966         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1967         logio->vp_index = sp->fcport->vha->vp_idx;
1968 }
1969
1970 static void
1971 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1972 {
1973         struct qla_hw_data *ha = sp->fcport->vha->hw;
1974
1975         mbx->entry_type = MBX_IOCB_TYPE;
1976         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1977         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1978         if (HAS_EXTENDED_IDS(ha)) {
1979                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1980                 mbx->mb10 = cpu_to_le16(BIT_0);
1981         } else {
1982                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1983         }
1984         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1985         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1986         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1987         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1988         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1989 }
1990
1991 static void
1992 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1993 {
1994         uint32_t flags;
1995         unsigned int lun;
1996         struct fc_port *fcport = sp->fcport;
1997         scsi_qla_host_t *vha = fcport->vha;
1998         struct qla_hw_data *ha = vha->hw;
1999         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2000         struct req_que *req = vha->req;
2001
2002         flags = iocb->u.tmf.flags;
2003         lun = iocb->u.tmf.lun;
2004
2005         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2006         tsk->entry_count = 1;
2007         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2008         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2009         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2010         tsk->control_flags = cpu_to_le32(flags);
2011         tsk->port_id[0] = fcport->d_id.b.al_pa;
2012         tsk->port_id[1] = fcport->d_id.b.area;
2013         tsk->port_id[2] = fcport->d_id.b.domain;
2014         tsk->vp_index = fcport->vha->vp_idx;
2015
2016         if (flags == TCF_LUN_RESET) {
2017                 int_to_scsilun(lun, &tsk->lun);
2018                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2019                         sizeof(tsk->lun));
2020         }
2021 }
2022
2023 static void
2024 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2025 {
2026         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2027
2028         els_iocb->entry_type = ELS_IOCB_TYPE;
2029         els_iocb->entry_count = 1;
2030         els_iocb->sys_define = 0;
2031         els_iocb->entry_status = 0;
2032         els_iocb->handle = sp->handle;
2033         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2034         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2035         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2036         els_iocb->sof_type = EST_SOFI3;
2037         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2038
2039         els_iocb->opcode =
2040             sp->type == SRB_ELS_CMD_RPT ?
2041             bsg_job->request->rqst_data.r_els.els_code :
2042             bsg_job->request->rqst_data.h_els.command_code;
2043         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2044         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2045         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2046         els_iocb->control_flags = 0;
2047         els_iocb->rx_byte_count =
2048             cpu_to_le32(bsg_job->reply_payload.payload_len);
2049         els_iocb->tx_byte_count =
2050             cpu_to_le32(bsg_job->request_payload.payload_len);
2051
2052         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2053             (bsg_job->request_payload.sg_list)));
2054         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2055             (bsg_job->request_payload.sg_list)));
2056         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2057             (bsg_job->request_payload.sg_list));
2058
2059         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2060             (bsg_job->reply_payload.sg_list)));
2061         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2062             (bsg_job->reply_payload.sg_list)));
2063         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2064             (bsg_job->reply_payload.sg_list));
2065
2066         sp->fcport->vha->qla_stats.control_requests++;
2067 }
2068
2069 static void
2070 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2071 {
2072         uint16_t        avail_dsds;
2073         uint32_t        *cur_dsd;
2074         struct scatterlist *sg;
2075         int index;
2076         uint16_t tot_dsds;
2077         scsi_qla_host_t *vha = sp->fcport->vha;
2078         struct qla_hw_data *ha = vha->hw;
2079         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2080         int loop_iterartion = 0;
2081         int cont_iocb_prsnt = 0;
2082         int entry_count = 1;
2083
2084         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2085         ct_iocb->entry_type = CT_IOCB_TYPE;
2086         ct_iocb->entry_status = 0;
2087         ct_iocb->handle1 = sp->handle;
2088         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2089         ct_iocb->status = __constant_cpu_to_le16(0);
2090         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2091         ct_iocb->timeout = 0;
2092         ct_iocb->cmd_dsd_count =
2093             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2094         ct_iocb->total_dsd_count =
2095             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2096         ct_iocb->req_bytecount =
2097             cpu_to_le32(bsg_job->request_payload.payload_len);
2098         ct_iocb->rsp_bytecount =
2099             cpu_to_le32(bsg_job->reply_payload.payload_len);
2100
2101         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2102             (bsg_job->request_payload.sg_list)));
2103         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2104             (bsg_job->request_payload.sg_list)));
2105         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2106
2107         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2108             (bsg_job->reply_payload.sg_list)));
2109         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2110             (bsg_job->reply_payload.sg_list)));
2111         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2112
2113         avail_dsds = 1;
2114         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2115         index = 0;
2116         tot_dsds = bsg_job->reply_payload.sg_cnt;
2117
2118         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2119                 dma_addr_t       sle_dma;
2120                 cont_a64_entry_t *cont_pkt;
2121
2122                 /* Allocate additional continuation packets? */
2123                 if (avail_dsds == 0) {
2124                         /*
2125                         * Five DSDs are available in the Cont.
2126                         * Type 1 IOCB.
2127                                */
2128                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2129                             vha->hw->req_q_map[0]);
2130                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2131                         avail_dsds = 5;
2132                         cont_iocb_prsnt = 1;
2133                         entry_count++;
2134                 }
2135
2136                 sle_dma = sg_dma_address(sg);
2137                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2138                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2139                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2140                 loop_iterartion++;
2141                 avail_dsds--;
2142         }
2143         ct_iocb->entry_count = entry_count;
2144
2145         sp->fcport->vha->qla_stats.control_requests++;
2146 }
2147
2148 static void
2149 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2150 {
2151         uint16_t        avail_dsds;
2152         uint32_t        *cur_dsd;
2153         struct scatterlist *sg;
2154         int index;
2155         uint16_t tot_dsds;
2156         scsi_qla_host_t *vha = sp->fcport->vha;
2157         struct qla_hw_data *ha = vha->hw;
2158         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2159         int loop_iterartion = 0;
2160         int cont_iocb_prsnt = 0;
2161         int entry_count = 1;
2162
2163         ct_iocb->entry_type = CT_IOCB_TYPE;
2164         ct_iocb->entry_status = 0;
2165         ct_iocb->sys_define = 0;
2166         ct_iocb->handle = sp->handle;
2167
2168         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2169         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2170         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2171
2172         ct_iocb->cmd_dsd_count =
2173             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2174         ct_iocb->timeout = 0;
2175         ct_iocb->rsp_dsd_count =
2176             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2177         ct_iocb->rsp_byte_count =
2178             cpu_to_le32(bsg_job->reply_payload.payload_len);
2179         ct_iocb->cmd_byte_count =
2180             cpu_to_le32(bsg_job->request_payload.payload_len);
2181         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2182             (bsg_job->request_payload.sg_list)));
2183         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2184            (bsg_job->request_payload.sg_list)));
2185         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2186             (bsg_job->request_payload.sg_list));
2187
2188         avail_dsds = 1;
2189         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2190         index = 0;
2191         tot_dsds = bsg_job->reply_payload.sg_cnt;
2192
2193         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2194                 dma_addr_t       sle_dma;
2195                 cont_a64_entry_t *cont_pkt;
2196
2197                 /* Allocate additional continuation packets? */
2198                 if (avail_dsds == 0) {
2199                         /*
2200                         * Five DSDs are available in the Cont.
2201                         * Type 1 IOCB.
2202                                */
2203                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2204                             ha->req_q_map[0]);
2205                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2206                         avail_dsds = 5;
2207                         cont_iocb_prsnt = 1;
2208                         entry_count++;
2209                 }
2210
2211                 sle_dma = sg_dma_address(sg);
2212                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2213                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2214                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2215                 loop_iterartion++;
2216                 avail_dsds--;
2217         }
2218         ct_iocb->entry_count = entry_count;
2219 }
2220
2221 /*
2222  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223  * @sp: command to send to the ISP
2224  *
2225  * Returns non-zero if a failure occurred, else zero.
2226  */
2227 int
2228 qla82xx_start_scsi(srb_t *sp)
2229 {
2230         int             ret, nseg;
2231         unsigned long   flags;
2232         struct scsi_cmnd *cmd;
2233         uint32_t        *clr_ptr;
2234         uint32_t        index;
2235         uint32_t        handle;
2236         uint16_t        cnt;
2237         uint16_t        req_cnt;
2238         uint16_t        tot_dsds;
2239         struct device_reg_82xx __iomem *reg;
2240         uint32_t dbval;
2241         uint32_t *fcp_dl;
2242         uint8_t additional_cdb_len;
2243         struct ct6_dsd *ctx;
2244         struct scsi_qla_host *vha = sp->fcport->vha;
2245         struct qla_hw_data *ha = vha->hw;
2246         struct req_que *req = NULL;
2247         struct rsp_que *rsp = NULL;
2248         char tag[2];
2249
2250         /* Setup device pointers. */
2251         ret = 0;
2252         reg = &ha->iobase->isp82;
2253         cmd = GET_CMD_SP(sp);
2254         req = vha->req;
2255         rsp = ha->rsp_q_map[0];
2256
2257         /* So we know we haven't pci_map'ed anything yet */
2258         tot_dsds = 0;
2259
2260         dbval = 0x04 | (ha->portnum << 5);
2261
2262         /* Send marker if required */
2263         if (vha->marker_needed != 0) {
2264                 if (qla2x00_marker(vha, req,
2265                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2266                         ql_log(ql_log_warn, vha, 0x300c,
2267                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2268                         return QLA_FUNCTION_FAILED;
2269                 }
2270                 vha->marker_needed = 0;
2271         }
2272
2273         /* Acquire ring specific lock */
2274         spin_lock_irqsave(&ha->hardware_lock, flags);
2275
2276         /* Check for room in outstanding command list. */
2277         handle = req->current_outstanding_cmd;
2278         for (index = 1; index < req->num_outstanding_cmds; index++) {
2279                 handle++;
2280                 if (handle == req->num_outstanding_cmds)
2281                         handle = 1;
2282                 if (!req->outstanding_cmds[handle])
2283                         break;
2284         }
2285         if (index == req->num_outstanding_cmds)
2286                 goto queuing_error;
2287
2288         /* Map the sg table so we have an accurate count of sg entries needed */
2289         if (scsi_sg_count(cmd)) {
2290                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2291                     scsi_sg_count(cmd), cmd->sc_data_direction);
2292                 if (unlikely(!nseg))
2293                         goto queuing_error;
2294         } else
2295                 nseg = 0;
2296
2297         tot_dsds = nseg;
2298
2299         if (tot_dsds > ql2xshiftctondsd) {
2300                 struct cmd_type_6 *cmd_pkt;
2301                 uint16_t more_dsd_lists = 0;
2302                 struct dsd_dma *dsd_ptr;
2303                 uint16_t i;
2304
2305                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2306                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2307                         ql_dbg(ql_dbg_io, vha, 0x300d,
2308                             "Num of DSD list %d is than %d for cmd=%p.\n",
2309                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2310                             cmd);
2311                         goto queuing_error;
2312                 }
2313
2314                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2315                         goto sufficient_dsds;
2316                 else
2317                         more_dsd_lists -= ha->gbl_dsd_avail;
2318
2319                 for (i = 0; i < more_dsd_lists; i++) {
2320                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2321                         if (!dsd_ptr) {
2322                                 ql_log(ql_log_fatal, vha, 0x300e,
2323                                     "Failed to allocate memory for dsd_dma "
2324                                     "for cmd=%p.\n", cmd);
2325                                 goto queuing_error;
2326                         }
2327
2328                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2329                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2330                         if (!dsd_ptr->dsd_addr) {
2331                                 kfree(dsd_ptr);
2332                                 ql_log(ql_log_fatal, vha, 0x300f,
2333                                     "Failed to allocate memory for dsd_addr "
2334                                     "for cmd=%p.\n", cmd);
2335                                 goto queuing_error;
2336                         }
2337                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2338                         ha->gbl_dsd_avail++;
2339                 }
2340
2341 sufficient_dsds:
2342                 req_cnt = 1;
2343
2344                 if (req->cnt < (req_cnt + 2)) {
2345                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2346                                 &reg->req_q_out[0]);
2347                         if (req->ring_index < cnt)
2348                                 req->cnt = cnt - req->ring_index;
2349                         else
2350                                 req->cnt = req->length -
2351                                         (req->ring_index - cnt);
2352                         if (req->cnt < (req_cnt + 2))
2353                                 goto queuing_error;
2354                 }
2355
2356                 ctx = sp->u.scmd.ctx =
2357                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2358                 if (!ctx) {
2359                         ql_log(ql_log_fatal, vha, 0x3010,
2360                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2361                         goto queuing_error;
2362                 }
2363
2364                 memset(ctx, 0, sizeof(struct ct6_dsd));
2365                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2366                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2367                 if (!ctx->fcp_cmnd) {
2368                         ql_log(ql_log_fatal, vha, 0x3011,
2369                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2370                         goto queuing_error;
2371                 }
2372
2373                 /* Initialize the DSD list and dma handle */
2374                 INIT_LIST_HEAD(&ctx->dsd_list);
2375                 ctx->dsd_use_cnt = 0;
2376
2377                 if (cmd->cmd_len > 16) {
2378                         additional_cdb_len = cmd->cmd_len - 16;
2379                         if ((cmd->cmd_len % 4) != 0) {
2380                                 /* SCSI command bigger than 16 bytes must be
2381                                  * multiple of 4
2382                                  */
2383                                 ql_log(ql_log_warn, vha, 0x3012,
2384                                     "scsi cmd len %d not multiple of 4 "
2385                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2386                                 goto queuing_error_fcp_cmnd;
2387                         }
2388                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2389                 } else {
2390                         additional_cdb_len = 0;
2391                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2392                 }
2393
2394                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2395                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2396
2397                 /* Zero out remaining portion of packet. */
2398                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2399                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2400                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2401                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2402
2403                 /* Set NPORT-ID and LUN number*/
2404                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2405                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2406                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2407                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2408                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2409
2410                 /* Build IOCB segments */
2411                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2412                         goto queuing_error_fcp_cmnd;
2413
2414                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2415                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2416
2417                 /* build FCP_CMND IU */
2418                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2419                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2420                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2421
2422                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2423                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2424                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2425                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2426
2427                 /*
2428                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2429                  */
2430                 if (scsi_populate_tag_msg(cmd, tag)) {
2431                         switch (tag[0]) {
2432                         case HEAD_OF_QUEUE_TAG:
2433                                 ctx->fcp_cmnd->task_attribute =
2434                                     TSK_HEAD_OF_QUEUE;
2435                                 break;
2436                         case ORDERED_QUEUE_TAG:
2437                                 ctx->fcp_cmnd->task_attribute =
2438                                     TSK_ORDERED;
2439                                 break;
2440                         }
2441                 }
2442
2443                 /* Populate the FCP_PRIO. */
2444                 if (ha->flags.fcp_prio_enabled)
2445                         ctx->fcp_cmnd->task_attribute |=
2446                             sp->fcport->fcp_prio << 3;
2447
2448                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2449
2450                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2451                     additional_cdb_len);
2452                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2453
2454                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2455                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2456                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2457                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2458                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2459
2460                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2461                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2462                 /* Set total data segment count. */
2463                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2464                 /* Specify response queue number where
2465                  * completion should happen
2466                  */
2467                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2468         } else {
2469                 struct cmd_type_7 *cmd_pkt;
2470                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2471                 if (req->cnt < (req_cnt + 2)) {
2472                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2473                             &reg->req_q_out[0]);
2474                         if (req->ring_index < cnt)
2475                                 req->cnt = cnt - req->ring_index;
2476                         else
2477                                 req->cnt = req->length -
2478                                         (req->ring_index - cnt);
2479                 }
2480                 if (req->cnt < (req_cnt + 2))
2481                         goto queuing_error;
2482
2483                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2484                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2485
2486                 /* Zero out remaining portion of packet. */
2487                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2488                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2489                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2490                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2491
2492                 /* Set NPORT-ID and LUN number*/
2493                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2494                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2495                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2496                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2497                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2498
2499                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2500                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2501                     sizeof(cmd_pkt->lun));
2502
2503                 /*
2504                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2505                  */
2506                 if (scsi_populate_tag_msg(cmd, tag)) {
2507                         switch (tag[0]) {
2508                         case HEAD_OF_QUEUE_TAG:
2509                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2510                                 break;
2511                         case ORDERED_QUEUE_TAG:
2512                                 cmd_pkt->task = TSK_ORDERED;
2513                                 break;
2514                         }
2515                 }
2516
2517                 /* Populate the FCP_PRIO. */
2518                 if (ha->flags.fcp_prio_enabled)
2519                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2520
2521                 /* Load SCSI command packet. */
2522                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2523                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2524
2525                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2526
2527                 /* Build IOCB segments */
2528                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2529
2530                 /* Set total data segment count. */
2531                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2532                 /* Specify response queue number where
2533                  * completion should happen.
2534                  */
2535                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2536
2537         }
2538         /* Build command packet. */
2539         req->current_outstanding_cmd = handle;
2540         req->outstanding_cmds[handle] = sp;
2541         sp->handle = handle;
2542         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2543         req->cnt -= req_cnt;
2544         wmb();
2545
2546         /* Adjust ring index. */
2547         req->ring_index++;
2548         if (req->ring_index == req->length) {
2549                 req->ring_index = 0;
2550                 req->ring_ptr = req->ring;
2551         } else
2552                 req->ring_ptr++;
2553
2554         sp->flags |= SRB_DMA_VALID;
2555
2556         /* Set chip new ring index. */
2557         /* write, read and verify logic */
2558         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2559         if (ql2xdbwr)
2560                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2561         else {
2562                 WRT_REG_DWORD(
2563                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2564                         dbval);
2565                 wmb();
2566                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2567                         WRT_REG_DWORD(
2568                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2569                                 dbval);
2570                         wmb();
2571                 }
2572         }
2573
2574         /* Manage unprocessed RIO/ZIO commands in response queue. */
2575         if (vha->flags.process_response_queue &&
2576             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2577                 qla24xx_process_response_queue(vha, rsp);
2578
2579         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2580         return QLA_SUCCESS;
2581
2582 queuing_error_fcp_cmnd:
2583         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2584 queuing_error:
2585         if (tot_dsds)
2586                 scsi_dma_unmap(cmd);
2587
2588         if (sp->u.scmd.ctx) {
2589                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2590                 sp->u.scmd.ctx = NULL;
2591         }
2592         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2593
2594         return QLA_FUNCTION_FAILED;
2595 }
2596
2597 int
2598 qla2x00_start_sp(srb_t *sp)
2599 {
2600         int rval;
2601         struct qla_hw_data *ha = sp->fcport->vha->hw;
2602         void *pkt;
2603         unsigned long flags;
2604
2605         rval = QLA_FUNCTION_FAILED;
2606         spin_lock_irqsave(&ha->hardware_lock, flags);
2607         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2608         if (!pkt) {
2609                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2610                     "qla2x00_alloc_iocbs failed.\n");
2611                 goto done;
2612         }
2613
2614         rval = QLA_SUCCESS;
2615         switch (sp->type) {
2616         case SRB_LOGIN_CMD:
2617                 IS_FWI2_CAPABLE(ha) ?
2618                     qla24xx_login_iocb(sp, pkt) :
2619                     qla2x00_login_iocb(sp, pkt);
2620                 break;
2621         case SRB_LOGOUT_CMD:
2622                 IS_FWI2_CAPABLE(ha) ?
2623                     qla24xx_logout_iocb(sp, pkt) :
2624                     qla2x00_logout_iocb(sp, pkt);
2625                 break;
2626         case SRB_ELS_CMD_RPT:
2627         case SRB_ELS_CMD_HST:
2628                 qla24xx_els_iocb(sp, pkt);
2629                 break;
2630         case SRB_CT_CMD:
2631                 IS_FWI2_CAPABLE(ha) ?
2632                     qla24xx_ct_iocb(sp, pkt) :
2633                     qla2x00_ct_iocb(sp, pkt);
2634                 break;
2635         case SRB_ADISC_CMD:
2636                 IS_FWI2_CAPABLE(ha) ?
2637                     qla24xx_adisc_iocb(sp, pkt) :
2638                     qla2x00_adisc_iocb(sp, pkt);
2639                 break;
2640         case SRB_TM_CMD:
2641                 IS_QLAFX00(ha) ?
2642                     qlafx00_tm_iocb(sp, pkt) :
2643                     qla24xx_tm_iocb(sp, pkt);
2644                 break;
2645         case SRB_FXIOCB_DCMD:
2646         case SRB_FXIOCB_BCMD:
2647                 qlafx00_fxdisc_iocb(sp, pkt);
2648                 break;
2649         case SRB_ABT_CMD:
2650                 qlafx00_abort_iocb(sp, pkt);
2651                 break;
2652         default:
2653                 break;
2654         }
2655
2656         wmb();
2657         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2658 done:
2659         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2660         return rval;
2661 }
2662
2663 static void
2664 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2665                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2666 {
2667         uint16_t avail_dsds;
2668         uint32_t *cur_dsd;
2669         uint32_t req_data_len = 0;
2670         uint32_t rsp_data_len = 0;
2671         struct scatterlist *sg;
2672         int index;
2673         int entry_count = 1;
2674         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2675
2676         /*Update entry type to indicate bidir command */
2677         *((uint32_t *)(&cmd_pkt->entry_type)) =
2678                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2679
2680         /* Set the transfer direction, in this set both flags
2681          * Also set the BD_WRAP_BACK flag, firmware will take care
2682          * assigning DID=SID for outgoing pkts.
2683          */
2684         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2685         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2686         cmd_pkt->control_flags =
2687                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2688                                                         BD_WRAP_BACK);
2689
2690         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2691         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2692         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2693         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2694
2695         vha->bidi_stats.transfer_bytes += req_data_len;
2696         vha->bidi_stats.io_count++;
2697
2698         vha->qla_stats.output_bytes += req_data_len;
2699         vha->qla_stats.output_requests++;
2700
2701         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2702          * are bundled in continuation iocb
2703          */
2704         avail_dsds = 1;
2705         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2706
2707         index = 0;
2708
2709         for_each_sg(bsg_job->request_payload.sg_list, sg,
2710                                 bsg_job->request_payload.sg_cnt, index) {
2711                 dma_addr_t sle_dma;
2712                 cont_a64_entry_t *cont_pkt;
2713
2714                 /* Allocate additional continuation packets */
2715                 if (avail_dsds == 0) {
2716                         /* Continuation type 1 IOCB can accomodate
2717                          * 5 DSDS
2718                          */
2719                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2720                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2721                         avail_dsds = 5;
2722                         entry_count++;
2723                 }
2724                 sle_dma = sg_dma_address(sg);
2725                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2726                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2727                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2728                 avail_dsds--;
2729         }
2730         /* For read request DSD will always goes to continuation IOCB
2731          * and follow the write DSD. If there is room on the current IOCB
2732          * then it is added to that IOCB else new continuation IOCB is
2733          * allocated.
2734          */
2735         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2736                                 bsg_job->reply_payload.sg_cnt, index) {
2737                 dma_addr_t sle_dma;
2738                 cont_a64_entry_t *cont_pkt;
2739
2740                 /* Allocate additional continuation packets */
2741                 if (avail_dsds == 0) {
2742                         /* Continuation type 1 IOCB can accomodate
2743                          * 5 DSDS
2744                          */
2745                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2746                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2747                         avail_dsds = 5;
2748                         entry_count++;
2749                 }
2750                 sle_dma = sg_dma_address(sg);
2751                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2752                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2753                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2754                 avail_dsds--;
2755         }
2756         /* This value should be same as number of IOCB required for this cmd */
2757         cmd_pkt->entry_count = entry_count;
2758 }
2759
2760 int
2761 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2762 {
2763
2764         struct qla_hw_data *ha = vha->hw;
2765         unsigned long flags;
2766         uint32_t handle;
2767         uint32_t index;
2768         uint16_t req_cnt;
2769         uint16_t cnt;
2770         uint32_t *clr_ptr;
2771         struct cmd_bidir *cmd_pkt = NULL;
2772         struct rsp_que *rsp;
2773         struct req_que *req;
2774         int rval = EXT_STATUS_OK;
2775
2776         rval = QLA_SUCCESS;
2777
2778         rsp = ha->rsp_q_map[0];
2779         req = vha->req;
2780
2781         /* Send marker if required */
2782         if (vha->marker_needed != 0) {
2783                 if (qla2x00_marker(vha, req,
2784                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2785                         return EXT_STATUS_MAILBOX;
2786                 vha->marker_needed = 0;
2787         }
2788
2789         /* Acquire ring specific lock */
2790         spin_lock_irqsave(&ha->hardware_lock, flags);
2791
2792         /* Check for room in outstanding command list. */
2793         handle = req->current_outstanding_cmd;
2794         for (index = 1; index < req->num_outstanding_cmds; index++) {
2795                 handle++;
2796         if (handle == req->num_outstanding_cmds)
2797                 handle = 1;
2798         if (!req->outstanding_cmds[handle])
2799                 break;
2800         }
2801
2802         if (index == req->num_outstanding_cmds) {
2803                 rval = EXT_STATUS_BUSY;
2804                 goto queuing_error;
2805         }
2806
2807         /* Calculate number of IOCB required */
2808         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2809
2810         /* Check for room on request queue. */
2811         if (req->cnt < req_cnt + 2) {
2812                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2813
2814                 if  (req->ring_index < cnt)
2815                         req->cnt = cnt - req->ring_index;
2816                 else
2817                         req->cnt = req->length -
2818                                 (req->ring_index - cnt);
2819         }
2820         if (req->cnt < req_cnt + 2) {
2821                 rval = EXT_STATUS_BUSY;
2822                 goto queuing_error;
2823         }
2824
2825         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2826         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2827
2828         /* Zero out remaining portion of packet. */
2829         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2830         clr_ptr = (uint32_t *)cmd_pkt + 2;
2831         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2832
2833         /* Set NPORT-ID  (of vha)*/
2834         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2835         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2836         cmd_pkt->port_id[1] = vha->d_id.b.area;
2837         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2838
2839         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2840         cmd_pkt->entry_status = (uint8_t) rsp->id;
2841         /* Build command packet. */
2842         req->current_outstanding_cmd = handle;
2843         req->outstanding_cmds[handle] = sp;
2844         sp->handle = handle;
2845         req->cnt -= req_cnt;
2846
2847         /* Send the command to the firmware */
2848         wmb();
2849         qla2x00_start_iocbs(vha, req);
2850 queuing_error:
2851         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2852         return rval;
2853 }