]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/scsi/be2iscsi/be_main.c
Merge commit 'v2.6.34-rc1' into for-linus
[mv-sheeva.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43
44 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46 MODULE_AUTHOR("ServerEngines Corporation");
47 MODULE_LICENSE("GPL");
48 module_param(be_iopoll_budget, int, 0);
49 module_param(enable_msix, int, 0);
50 module_param(be_max_phys_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52                                    "contiguous memory that can be allocated."
53                                    "Range is 16 - 128");
54
55 static int beiscsi_slave_configure(struct scsi_device *sdev)
56 {
57         blk_queue_max_segment_size(sdev->request_queue, 65536);
58         return 0;
59 }
60
61 /*------------------- PCI Driver operations and data ----------------- */
62 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
63         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
64         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68         { 0 }
69 };
70 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
71
72 static struct scsi_host_template beiscsi_sht = {
73         .module = THIS_MODULE,
74         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
75         .proc_name = DRV_NAME,
76         .queuecommand = iscsi_queuecommand,
77         .eh_abort_handler = iscsi_eh_abort,
78         .change_queue_depth = iscsi_change_queue_depth,
79         .slave_configure = beiscsi_slave_configure,
80         .target_alloc = iscsi_target_alloc,
81         .eh_device_reset_handler = iscsi_eh_device_reset,
82         .eh_target_reset_handler = iscsi_eh_target_reset,
83         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
84         .can_queue = BE2_IO_DEPTH,
85         .this_id = -1,
86         .max_sectors = BEISCSI_MAX_SECTORS,
87         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
88         .use_clustering = ENABLE_CLUSTERING,
89 };
90
91 static struct scsi_transport_template *beiscsi_scsi_transport;
92
93 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
94 {
95         struct beiscsi_hba *phba;
96         struct Scsi_Host *shost;
97
98         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
99         if (!shost) {
100                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
101                         "iscsi_host_alloc failed \n");
102                 return NULL;
103         }
104         shost->dma_boundary = pcidev->dma_mask;
105         shost->max_id = BE2_MAX_SESSIONS;
106         shost->max_channel = 0;
107         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
108         shost->max_lun = BEISCSI_NUM_MAX_LUN;
109         shost->transportt = beiscsi_scsi_transport;
110         phba = iscsi_host_priv(shost);
111         memset(phba, 0, sizeof(*phba));
112         phba->shost = shost;
113         phba->pcidev = pci_dev_get(pcidev);
114         pci_set_drvdata(pcidev, phba);
115
116         if (iscsi_host_add(shost, &phba->pcidev->dev))
117                 goto free_devices;
118         return phba;
119
120 free_devices:
121         pci_dev_put(phba->pcidev);
122         iscsi_host_free(phba->shost);
123         return NULL;
124 }
125
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127 {
128         if (phba->csr_va) {
129                 iounmap(phba->csr_va);
130                 phba->csr_va = NULL;
131         }
132         if (phba->db_va) {
133                 iounmap(phba->db_va);
134                 phba->db_va = NULL;
135         }
136         if (phba->pci_va) {
137                 iounmap(phba->pci_va);
138                 phba->pci_va = NULL;
139         }
140 }
141
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143                                 struct pci_dev *pcidev)
144 {
145         u8 __iomem *addr;
146         int pcicfg_reg;
147
148         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
149                                pci_resource_len(pcidev, 2));
150         if (addr == NULL)
151                 return -ENOMEM;
152         phba->ctrl.csr = addr;
153         phba->csr_va = addr;
154         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
155
156         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
157         if (addr == NULL)
158                 goto pci_map_err;
159         phba->ctrl.db = addr;
160         phba->db_va = addr;
161         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
162
163         if (phba->generation == BE_GEN2)
164                 pcicfg_reg = 1;
165         else
166                 pcicfg_reg = 0;
167
168         addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
169                                pci_resource_len(pcidev, pcicfg_reg));
170
171         if (addr == NULL)
172                 goto pci_map_err;
173         phba->ctrl.pcicfg = addr;
174         phba->pci_va = addr;
175         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
176         return 0;
177
178 pci_map_err:
179         beiscsi_unmap_pci_function(phba);
180         return -ENOMEM;
181 }
182
183 static int beiscsi_enable_pci(struct pci_dev *pcidev)
184 {
185         int ret;
186
187         ret = pci_enable_device(pcidev);
188         if (ret) {
189                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
190                         "failed. Returning -ENODEV\n");
191                 return ret;
192         }
193
194         pci_set_master(pcidev);
195         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
196                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
197                 if (ret) {
198                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
199                         pci_disable_device(pcidev);
200                         return ret;
201                 }
202         }
203         return 0;
204 }
205
206 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
207 {
208         struct be_ctrl_info *ctrl = &phba->ctrl;
209         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
210         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
211         int status = 0;
212
213         ctrl->pdev = pdev;
214         status = beiscsi_map_pci_bars(phba, pdev);
215         if (status)
216                 return status;
217         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
218         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
219                                                   mbox_mem_alloc->size,
220                                                   &mbox_mem_alloc->dma);
221         if (!mbox_mem_alloc->va) {
222                 beiscsi_unmap_pci_function(phba);
223                 status = -ENOMEM;
224                 return status;
225         }
226
227         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
228         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
229         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
230         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
231         spin_lock_init(&ctrl->mbox_lock);
232         spin_lock_init(&phba->ctrl.mcc_lock);
233         spin_lock_init(&phba->ctrl.mcc_cq_lock);
234
235         return status;
236 }
237
238 static void beiscsi_get_params(struct beiscsi_hba *phba)
239 {
240         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
241                                     - (phba->fw_config.iscsi_cid_count
242                                     + BE2_TMFS
243                                     + BE2_NOPOUT_REQ));
244         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
245         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
246         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
247         phba->params.num_sge_per_io = BE2_SGE;
248         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
249         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
250         phba->params.eq_timer = 64;
251         phba->params.num_eq_entries =
252             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
253                                     + BE2_TMFS) / 512) + 1) * 512;
254         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
255                                 ? 1024 : phba->params.num_eq_entries;
256         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
257                              phba->params.num_eq_entries);
258         phba->params.num_cq_entries =
259             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
260                                     + BE2_TMFS) / 512) + 1) * 512;
261         phba->params.wrbs_per_cxn = 256;
262 }
263
264 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
265                            unsigned int id, unsigned int clr_interrupt,
266                            unsigned int num_processed,
267                            unsigned char rearm, unsigned char event)
268 {
269         u32 val = 0;
270         val |= id & DB_EQ_RING_ID_MASK;
271         if (rearm)
272                 val |= 1 << DB_EQ_REARM_SHIFT;
273         if (clr_interrupt)
274                 val |= 1 << DB_EQ_CLR_SHIFT;
275         if (event)
276                 val |= 1 << DB_EQ_EVNT_SHIFT;
277         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
278         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
279 }
280
281 /**
282  * be_isr_mcc - The isr routine of the driver.
283  * @irq: Not used
284  * @dev_id: Pointer to host adapter structure
285  */
286 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
287 {
288         struct beiscsi_hba *phba;
289         struct be_eq_entry *eqe = NULL;
290         struct be_queue_info *eq;
291         struct be_queue_info *mcc;
292         unsigned int num_eq_processed;
293         struct be_eq_obj *pbe_eq;
294         unsigned long flags;
295
296         pbe_eq = dev_id;
297         eq = &pbe_eq->q;
298         phba =  pbe_eq->phba;
299         mcc = &phba->ctrl.mcc_obj.cq;
300         eqe = queue_tail_node(eq);
301         if (!eqe)
302                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
303
304         num_eq_processed = 0;
305
306         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
307                                 & EQE_VALID_MASK) {
308                 if (((eqe->dw[offsetof(struct amap_eq_entry,
309                      resource_id) / 32] &
310                      EQE_RESID_MASK) >> 16) == mcc->id) {
311                         spin_lock_irqsave(&phba->isr_lock, flags);
312                         phba->todo_mcc_cq = 1;
313                         spin_unlock_irqrestore(&phba->isr_lock, flags);
314                 }
315                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
316                 queue_tail_inc(eq);
317                 eqe = queue_tail_node(eq);
318                 num_eq_processed++;
319         }
320         if (phba->todo_mcc_cq)
321                 queue_work(phba->wq, &phba->work_cqs);
322         if (num_eq_processed)
323                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
324
325         return IRQ_HANDLED;
326 }
327
328 /**
329  * be_isr_msix - The isr routine of the driver.
330  * @irq: Not used
331  * @dev_id: Pointer to host adapter structure
332  */
333 static irqreturn_t be_isr_msix(int irq, void *dev_id)
334 {
335         struct beiscsi_hba *phba;
336         struct be_eq_entry *eqe = NULL;
337         struct be_queue_info *eq;
338         struct be_queue_info *cq;
339         unsigned int num_eq_processed;
340         struct be_eq_obj *pbe_eq;
341         unsigned long flags;
342
343         pbe_eq = dev_id;
344         eq = &pbe_eq->q;
345         cq = pbe_eq->cq;
346         eqe = queue_tail_node(eq);
347         if (!eqe)
348                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
349
350         phba = pbe_eq->phba;
351         num_eq_processed = 0;
352         if (blk_iopoll_enabled) {
353                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
354                                         & EQE_VALID_MASK) {
355                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
356                                 blk_iopoll_sched(&pbe_eq->iopoll);
357
358                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
359                         queue_tail_inc(eq);
360                         eqe = queue_tail_node(eq);
361                         num_eq_processed++;
362                 }
363                 if (num_eq_processed)
364                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
365
366                 return IRQ_HANDLED;
367         } else {
368                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
369                                                 & EQE_VALID_MASK) {
370                         spin_lock_irqsave(&phba->isr_lock, flags);
371                         phba->todo_cq = 1;
372                         spin_unlock_irqrestore(&phba->isr_lock, flags);
373                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
374                         queue_tail_inc(eq);
375                         eqe = queue_tail_node(eq);
376                         num_eq_processed++;
377                 }
378                 if (phba->todo_cq)
379                         queue_work(phba->wq, &phba->work_cqs);
380
381                 if (num_eq_processed)
382                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
383
384                 return IRQ_HANDLED;
385         }
386 }
387
388 /**
389  * be_isr - The isr routine of the driver.
390  * @irq: Not used
391  * @dev_id: Pointer to host adapter structure
392  */
393 static irqreturn_t be_isr(int irq, void *dev_id)
394 {
395         struct beiscsi_hba *phba;
396         struct hwi_controller *phwi_ctrlr;
397         struct hwi_context_memory *phwi_context;
398         struct be_eq_entry *eqe = NULL;
399         struct be_queue_info *eq;
400         struct be_queue_info *cq;
401         struct be_queue_info *mcc;
402         unsigned long flags, index;
403         unsigned int num_mcceq_processed, num_ioeq_processed;
404         struct be_ctrl_info *ctrl;
405         struct be_eq_obj *pbe_eq;
406         int isr;
407
408         phba = dev_id;
409         ctrl = &phba->ctrl;;
410         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
411                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
412         if (!isr)
413                 return IRQ_NONE;
414
415         phwi_ctrlr = phba->phwi_ctrlr;
416         phwi_context = phwi_ctrlr->phwi_ctxt;
417         pbe_eq = &phwi_context->be_eq[0];
418
419         eq = &phwi_context->be_eq[0].q;
420         mcc = &phba->ctrl.mcc_obj.cq;
421         index = 0;
422         eqe = queue_tail_node(eq);
423         if (!eqe)
424                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
425
426         num_ioeq_processed = 0;
427         num_mcceq_processed = 0;
428         if (blk_iopoll_enabled) {
429                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
430                                         & EQE_VALID_MASK) {
431                         if (((eqe->dw[offsetof(struct amap_eq_entry,
432                              resource_id) / 32] &
433                              EQE_RESID_MASK) >> 16) == mcc->id) {
434                                 spin_lock_irqsave(&phba->isr_lock, flags);
435                                 phba->todo_mcc_cq = 1;
436                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
437                                 num_mcceq_processed++;
438                         } else {
439                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
440                                         blk_iopoll_sched(&pbe_eq->iopoll);
441                                 num_ioeq_processed++;
442                         }
443                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
444                         queue_tail_inc(eq);
445                         eqe = queue_tail_node(eq);
446                 }
447                 if (num_ioeq_processed || num_mcceq_processed) {
448                         if (phba->todo_mcc_cq)
449                                 queue_work(phba->wq, &phba->work_cqs);
450
451                         if ((num_mcceq_processed) && (!num_ioeq_processed))
452                                 hwi_ring_eq_db(phba, eq->id, 0,
453                                               (num_ioeq_processed +
454                                                num_mcceq_processed) , 1, 1);
455                         else
456                                 hwi_ring_eq_db(phba, eq->id, 0,
457                                                (num_ioeq_processed +
458                                                 num_mcceq_processed), 0, 1);
459
460                         return IRQ_HANDLED;
461                 } else
462                         return IRQ_NONE;
463         } else {
464                 cq = &phwi_context->be_cq[0];
465                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
466                                                 & EQE_VALID_MASK) {
467
468                         if (((eqe->dw[offsetof(struct amap_eq_entry,
469                              resource_id) / 32] &
470                              EQE_RESID_MASK) >> 16) != cq->id) {
471                                 spin_lock_irqsave(&phba->isr_lock, flags);
472                                 phba->todo_mcc_cq = 1;
473                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
474                         } else {
475                                 spin_lock_irqsave(&phba->isr_lock, flags);
476                                 phba->todo_cq = 1;
477                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
478                         }
479                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
480                         queue_tail_inc(eq);
481                         eqe = queue_tail_node(eq);
482                         num_ioeq_processed++;
483                 }
484                 if (phba->todo_cq || phba->todo_mcc_cq)
485                         queue_work(phba->wq, &phba->work_cqs);
486
487                 if (num_ioeq_processed) {
488                         hwi_ring_eq_db(phba, eq->id, 0,
489                                        num_ioeq_processed, 1, 1);
490                         return IRQ_HANDLED;
491                 } else
492                         return IRQ_NONE;
493         }
494 }
495
496 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
497 {
498         struct pci_dev *pcidev = phba->pcidev;
499         struct hwi_controller *phwi_ctrlr;
500         struct hwi_context_memory *phwi_context;
501         int ret, msix_vec, i = 0;
502         char desc[32];
503
504         phwi_ctrlr = phba->phwi_ctrlr;
505         phwi_context = phwi_ctrlr->phwi_ctxt;
506
507         if (phba->msix_enabled) {
508                 for (i = 0; i < phba->num_cpus; i++) {
509                         sprintf(desc, "beiscsi_msix_%04x", i);
510                         msix_vec = phba->msix_entries[i].vector;
511                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
512                                           &phwi_context->be_eq[i]);
513                 }
514                 msix_vec = phba->msix_entries[i].vector;
515                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
516                                   &phwi_context->be_eq[i]);
517         } else {
518                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
519                                   "beiscsi", phba);
520                 if (ret) {
521                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
522                                      "Failed to register irq\\n");
523                         return ret;
524                 }
525         }
526         return 0;
527 }
528
529 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
530                            unsigned int id, unsigned int num_processed,
531                            unsigned char rearm, unsigned char event)
532 {
533         u32 val = 0;
534         val |= id & DB_CQ_RING_ID_MASK;
535         if (rearm)
536                 val |= 1 << DB_CQ_REARM_SHIFT;
537         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
538         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
539 }
540
541 static unsigned int
542 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
543                           struct beiscsi_hba *phba,
544                           unsigned short cid,
545                           struct pdu_base *ppdu,
546                           unsigned long pdu_len,
547                           void *pbuffer, unsigned long buf_len)
548 {
549         struct iscsi_conn *conn = beiscsi_conn->conn;
550         struct iscsi_session *session = conn->session;
551         struct iscsi_task *task;
552         struct beiscsi_io_task *io_task;
553         struct iscsi_hdr *login_hdr;
554
555         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
556                                                 PDUBASE_OPCODE_MASK) {
557         case ISCSI_OP_NOOP_IN:
558                 pbuffer = NULL;
559                 buf_len = 0;
560                 break;
561         case ISCSI_OP_ASYNC_EVENT:
562                 break;
563         case ISCSI_OP_REJECT:
564                 WARN_ON(!pbuffer);
565                 WARN_ON(!(buf_len == 48));
566                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
567                 break;
568         case ISCSI_OP_LOGIN_RSP:
569         case ISCSI_OP_TEXT_RSP:
570                 task = conn->login_task;
571                 io_task = task->dd_data;
572                 login_hdr = (struct iscsi_hdr *)ppdu;
573                 login_hdr->itt = io_task->libiscsi_itt;
574                 break;
575         default:
576                 shost_printk(KERN_WARNING, phba->shost,
577                              "Unrecognized opcode 0x%x in async msg \n",
578                              (ppdu->
579                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
580                                                 & PDUBASE_OPCODE_MASK));
581                 return 1;
582         }
583
584         spin_lock_bh(&session->lock);
585         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
586         spin_unlock_bh(&session->lock);
587         return 0;
588 }
589
590 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
591 {
592         struct sgl_handle *psgl_handle;
593
594         if (phba->io_sgl_hndl_avbl) {
595                 SE_DEBUG(DBG_LVL_8,
596                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
597                          phba->io_sgl_alloc_index);
598                 psgl_handle = phba->io_sgl_hndl_base[phba->
599                                                 io_sgl_alloc_index];
600                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
601                 phba->io_sgl_hndl_avbl--;
602                 if (phba->io_sgl_alloc_index == (phba->params.
603                                                  ios_per_ctrl - 1))
604                         phba->io_sgl_alloc_index = 0;
605                 else
606                         phba->io_sgl_alloc_index++;
607         } else
608                 psgl_handle = NULL;
609         return psgl_handle;
610 }
611
612 static void
613 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
614 {
615         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
616                  phba->io_sgl_free_index);
617         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
618                 /*
619                  * this can happen if clean_task is called on a task that
620                  * failed in xmit_task or alloc_pdu.
621                  */
622                  SE_DEBUG(DBG_LVL_8,
623                          "Double Free in IO SGL io_sgl_free_index=%d,"
624                          "value there=%p \n", phba->io_sgl_free_index,
625                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
626                 return;
627         }
628         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
629         phba->io_sgl_hndl_avbl++;
630         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
631                 phba->io_sgl_free_index = 0;
632         else
633                 phba->io_sgl_free_index++;
634 }
635
636 /**
637  * alloc_wrb_handle - To allocate a wrb handle
638  * @phba: The hba pointer
639  * @cid: The cid to use for allocation
640  *
641  * This happens under session_lock until submission to chip
642  */
643 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
644 {
645         struct hwi_wrb_context *pwrb_context;
646         struct hwi_controller *phwi_ctrlr;
647         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
648
649         phwi_ctrlr = phba->phwi_ctrlr;
650         pwrb_context = &phwi_ctrlr->wrb_context[cid];
651         if (pwrb_context->wrb_handles_available >= 2) {
652                 pwrb_handle = pwrb_context->pwrb_handle_base[
653                                             pwrb_context->alloc_index];
654                 pwrb_context->wrb_handles_available--;
655                 if (pwrb_context->alloc_index ==
656                                                 (phba->params.wrbs_per_cxn - 1))
657                         pwrb_context->alloc_index = 0;
658                 else
659                         pwrb_context->alloc_index++;
660                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
661                                                 pwrb_context->alloc_index];
662                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
663         } else
664                 pwrb_handle = NULL;
665         return pwrb_handle;
666 }
667
668 /**
669  * free_wrb_handle - To free the wrb handle back to pool
670  * @phba: The hba pointer
671  * @pwrb_context: The context to free from
672  * @pwrb_handle: The wrb_handle to free
673  *
674  * This happens under session_lock until submission to chip
675  */
676 static void
677 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
678                 struct wrb_handle *pwrb_handle)
679 {
680         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
681         pwrb_context->wrb_handles_available++;
682         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
683                 pwrb_context->free_index = 0;
684         else
685                 pwrb_context->free_index++;
686
687         SE_DEBUG(DBG_LVL_8,
688                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
689                  "wrb_handles_available=%d \n",
690                  pwrb_handle, pwrb_context->free_index,
691                  pwrb_context->wrb_handles_available);
692 }
693
694 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
695 {
696         struct sgl_handle *psgl_handle;
697
698         if (phba->eh_sgl_hndl_avbl) {
699                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
700                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
701                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
702                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
703                 phba->eh_sgl_hndl_avbl--;
704                 if (phba->eh_sgl_alloc_index ==
705                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
706                      1))
707                         phba->eh_sgl_alloc_index = 0;
708                 else
709                         phba->eh_sgl_alloc_index++;
710         } else
711                 psgl_handle = NULL;
712         return psgl_handle;
713 }
714
715 void
716 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
717 {
718
719         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
720                              phba->eh_sgl_free_index);
721         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
722                 /*
723                  * this can happen if clean_task is called on a task that
724                  * failed in xmit_task or alloc_pdu.
725                  */
726                 SE_DEBUG(DBG_LVL_8,
727                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
728                          phba->eh_sgl_free_index);
729                 return;
730         }
731         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
732         phba->eh_sgl_hndl_avbl++;
733         if (phba->eh_sgl_free_index ==
734             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
735                 phba->eh_sgl_free_index = 0;
736         else
737                 phba->eh_sgl_free_index++;
738 }
739
740 static void
741 be_complete_io(struct beiscsi_conn *beiscsi_conn,
742                struct iscsi_task *task, struct sol_cqe *psol)
743 {
744         struct beiscsi_io_task *io_task = task->dd_data;
745         struct be_status_bhs *sts_bhs =
746                                 (struct be_status_bhs *)io_task->cmd_bhs;
747         struct iscsi_conn *conn = beiscsi_conn->conn;
748         unsigned int sense_len;
749         unsigned char *sense;
750         u32 resid = 0, exp_cmdsn, max_cmdsn;
751         u8 rsp, status, flags;
752
753         exp_cmdsn = (psol->
754                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
755                         & SOL_EXP_CMD_SN_MASK);
756         max_cmdsn = ((psol->
757                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
758                         & SOL_EXP_CMD_SN_MASK) +
759                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
760                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
761         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
762                                                 & SOL_RESP_MASK) >> 16);
763         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
764                                                 & SOL_STS_MASK) >> 8);
765         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
766                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
767
768         task->sc->result = (DID_OK << 16) | status;
769         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
770                 task->sc->result = DID_ERROR << 16;
771                 goto unmap;
772         }
773
774         /* bidi not initially supported */
775         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
776                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
777                                 32] & SOL_RES_CNT_MASK);
778
779                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
780                         task->sc->result = DID_ERROR << 16;
781
782                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
783                         scsi_set_resid(task->sc, resid);
784                         if (!status && (scsi_bufflen(task->sc) - resid <
785                             task->sc->underflow))
786                                 task->sc->result = DID_ERROR << 16;
787                 }
788         }
789
790         if (status == SAM_STAT_CHECK_CONDITION) {
791                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
792                 sense = sts_bhs->sense_info + sizeof(unsigned short);
793                 sense_len =  cpu_to_be16(*slen);
794                 memcpy(task->sc->sense_buffer, sense,
795                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
796         }
797
798         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
799                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
800                                                         & SOL_RES_CNT_MASK)
801                          conn->rxdata_octets += (psol->
802                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
803                              & SOL_RES_CNT_MASK);
804         }
805 unmap:
806         scsi_dma_unmap(io_task->scsi_cmnd);
807         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
808 }
809
810 static void
811 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
812                    struct iscsi_task *task, struct sol_cqe *psol)
813 {
814         struct iscsi_logout_rsp *hdr;
815         struct beiscsi_io_task *io_task = task->dd_data;
816         struct iscsi_conn *conn = beiscsi_conn->conn;
817
818         hdr = (struct iscsi_logout_rsp *)task->hdr;
819         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
820         hdr->t2wait = 5;
821         hdr->t2retain = 0;
822         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
823                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
824         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
825                                         32] & SOL_RESP_MASK);
826         hdr->exp_cmdsn = cpu_to_be32(psol->
827                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
828                                         & SOL_EXP_CMD_SN_MASK);
829         hdr->max_cmdsn = be32_to_cpu((psol->
830                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
831                                         & SOL_EXP_CMD_SN_MASK) +
832                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
833                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
834         hdr->dlength[0] = 0;
835         hdr->dlength[1] = 0;
836         hdr->dlength[2] = 0;
837         hdr->hlength = 0;
838         hdr->itt = io_task->libiscsi_itt;
839         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
840 }
841
842 static void
843 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
844                 struct iscsi_task *task, struct sol_cqe *psol)
845 {
846         struct iscsi_tm_rsp *hdr;
847         struct iscsi_conn *conn = beiscsi_conn->conn;
848         struct beiscsi_io_task *io_task = task->dd_data;
849
850         hdr = (struct iscsi_tm_rsp *)task->hdr;
851         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
852         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
853                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
854         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
855                                         32] & SOL_RESP_MASK);
856         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
857                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
858         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
859                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
860                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
861                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
862         hdr->itt = io_task->libiscsi_itt;
863         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
864 }
865
866 static void
867 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
868                        struct beiscsi_hba *phba, struct sol_cqe *psol)
869 {
870         struct hwi_wrb_context *pwrb_context;
871         struct wrb_handle *pwrb_handle = NULL;
872         struct hwi_controller *phwi_ctrlr;
873         struct iscsi_task *task;
874         struct beiscsi_io_task *io_task;
875         struct iscsi_conn *conn = beiscsi_conn->conn;
876         struct iscsi_session *session = conn->session;
877
878         phwi_ctrlr = phba->phwi_ctrlr;
879         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
880                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
881                                 SOL_CID_MASK) >> 6) -
882                                 phba->fw_config.iscsi_cid_start];
883         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
884                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
885                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
886         task = pwrb_handle->pio_handle;
887
888         io_task = task->dd_data;
889         spin_lock(&phba->mgmt_sgl_lock);
890         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
891         spin_unlock(&phba->mgmt_sgl_lock);
892         spin_lock_bh(&session->lock);
893         free_wrb_handle(phba, pwrb_context, pwrb_handle);
894         spin_unlock_bh(&session->lock);
895 }
896
897 static void
898 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
899                        struct iscsi_task *task, struct sol_cqe *psol)
900 {
901         struct iscsi_nopin *hdr;
902         struct iscsi_conn *conn = beiscsi_conn->conn;
903         struct beiscsi_io_task *io_task = task->dd_data;
904
905         hdr = (struct iscsi_nopin *)task->hdr;
906         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
907                         & SOL_FLAGS_MASK) >> 24) | 0x80;
908         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
909                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
910         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
911                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
912                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
913                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
914         hdr->opcode = ISCSI_OP_NOOP_IN;
915         hdr->itt = io_task->libiscsi_itt;
916         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
917 }
918
919 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
920                              struct beiscsi_hba *phba, struct sol_cqe *psol)
921 {
922         struct hwi_wrb_context *pwrb_context;
923         struct wrb_handle *pwrb_handle;
924         struct iscsi_wrb *pwrb = NULL;
925         struct hwi_controller *phwi_ctrlr;
926         struct iscsi_task *task;
927         unsigned int type;
928         struct iscsi_conn *conn = beiscsi_conn->conn;
929         struct iscsi_session *session = conn->session;
930
931         phwi_ctrlr = phba->phwi_ctrlr;
932         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
933                                 (struct amap_sol_cqe, cid) / 32]
934                                 & SOL_CID_MASK) >> 6) -
935                                 phba->fw_config.iscsi_cid_start];
936         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
937                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
938                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
939         task = pwrb_handle->pio_handle;
940         pwrb = pwrb_handle->pwrb;
941         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
942                                  WRB_TYPE_MASK) >> 28;
943
944         spin_lock_bh(&session->lock);
945         switch (type) {
946         case HWH_TYPE_IO:
947         case HWH_TYPE_IO_RD:
948                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
949                     ISCSI_OP_NOOP_OUT) {
950                         be_complete_nopin_resp(beiscsi_conn, task, psol);
951                 } else
952                         be_complete_io(beiscsi_conn, task, psol);
953                 break;
954
955         case HWH_TYPE_LOGOUT:
956                 be_complete_logout(beiscsi_conn, task, psol);
957                 break;
958
959         case HWH_TYPE_LOGIN:
960                 SE_DEBUG(DBG_LVL_1,
961                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
962                          "- Solicited path \n");
963                 break;
964
965         case HWH_TYPE_TMF:
966                 be_complete_tmf(beiscsi_conn, task, psol);
967                 break;
968
969         case HWH_TYPE_NOP:
970                 be_complete_nopin_resp(beiscsi_conn, task, psol);
971                 break;
972
973         default:
974                 shost_printk(KERN_WARNING, phba->shost,
975                                 "In hwi_complete_cmd, unknown type = %d"
976                                 "wrb_index 0x%x CID 0x%x\n", type,
977                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
978                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
979                                 ((psol->dw[offsetof(struct amap_sol_cqe,
980                                 cid) / 32] & SOL_CID_MASK) >> 6));
981                 break;
982         }
983
984         spin_unlock_bh(&session->lock);
985 }
986
987 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
988                                           *pasync_ctx, unsigned int is_header,
989                                           unsigned int host_write_ptr)
990 {
991         if (is_header)
992                 return &pasync_ctx->async_entry[host_write_ptr].
993                     header_busy_list;
994         else
995                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
996 }
997
998 static struct async_pdu_handle *
999 hwi_get_async_handle(struct beiscsi_hba *phba,
1000                      struct beiscsi_conn *beiscsi_conn,
1001                      struct hwi_async_pdu_context *pasync_ctx,
1002                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1003 {
1004         struct be_bus_address phys_addr;
1005         struct list_head *pbusy_list;
1006         struct async_pdu_handle *pasync_handle = NULL;
1007         int buffer_len = 0;
1008         unsigned char buffer_index = -1;
1009         unsigned char is_header = 0;
1010
1011         phys_addr.u.a32.address_lo =
1012             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1013             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1014                                                 & PDUCQE_DPL_MASK) >> 16);
1015         phys_addr.u.a32.address_hi =
1016             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1017
1018         phys_addr.u.a64.address =
1019                         *((unsigned long long *)(&phys_addr.u.a64.address));
1020
1021         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1022                         & PDUCQE_CODE_MASK) {
1023         case UNSOL_HDR_NOTIFY:
1024                 is_header = 1;
1025
1026                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1027                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1028                         index) / 32] & PDUCQE_INDEX_MASK));
1029
1030                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1031                                 pasync_ctx->async_header.pa_base.u.a64.address);
1032
1033                 buffer_index = buffer_len /
1034                                 pasync_ctx->async_header.buffer_size;
1035
1036                 break;
1037         case UNSOL_DATA_NOTIFY:
1038                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1039                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1040                                         index) / 32] & PDUCQE_INDEX_MASK));
1041                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1042                                         pasync_ctx->async_data.pa_base.u.
1043                                         a64.address);
1044                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1045                 break;
1046         default:
1047                 pbusy_list = NULL;
1048                 shost_printk(KERN_WARNING, phba->shost,
1049                         "Unexpected code=%d \n",
1050                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1051                                         code) / 32] & PDUCQE_CODE_MASK);
1052                 return NULL;
1053         }
1054
1055         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1056         WARN_ON(list_empty(pbusy_list));
1057         list_for_each_entry(pasync_handle, pbusy_list, link) {
1058                 WARN_ON(pasync_handle->consumed);
1059                 if (pasync_handle->index == buffer_index)
1060                         break;
1061         }
1062
1063         WARN_ON(!pasync_handle);
1064
1065         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1066                                              phba->fw_config.iscsi_cid_start;
1067         pasync_handle->is_header = is_header;
1068         pasync_handle->buffer_len = ((pdpdu_cqe->
1069                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1070                         & PDUCQE_DPL_MASK) >> 16);
1071
1072         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1073                         index) / 32] & PDUCQE_INDEX_MASK);
1074         return pasync_handle;
1075 }
1076
1077 static unsigned int
1078 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1079                            unsigned int is_header, unsigned int cq_index)
1080 {
1081         struct list_head *pbusy_list;
1082         struct async_pdu_handle *pasync_handle;
1083         unsigned int num_entries, writables = 0;
1084         unsigned int *pep_read_ptr, *pwritables;
1085
1086
1087         if (is_header) {
1088                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1089                 pwritables = &pasync_ctx->async_header.writables;
1090                 num_entries = pasync_ctx->async_header.num_entries;
1091         } else {
1092                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1093                 pwritables = &pasync_ctx->async_data.writables;
1094                 num_entries = pasync_ctx->async_data.num_entries;
1095         }
1096
1097         while ((*pep_read_ptr) != cq_index) {
1098                 (*pep_read_ptr)++;
1099                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1100
1101                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1102                                                      *pep_read_ptr);
1103                 if (writables == 0)
1104                         WARN_ON(list_empty(pbusy_list));
1105
1106                 if (!list_empty(pbusy_list)) {
1107                         pasync_handle = list_entry(pbusy_list->next,
1108                                                    struct async_pdu_handle,
1109                                                    link);
1110                         WARN_ON(!pasync_handle);
1111                         pasync_handle->consumed = 1;
1112                 }
1113
1114                 writables++;
1115         }
1116
1117         if (!writables) {
1118                 SE_DEBUG(DBG_LVL_1,
1119                          "Duplicate notification received - index 0x%x!!\n",
1120                          cq_index);
1121                 WARN_ON(1);
1122         }
1123
1124         *pwritables = *pwritables + writables;
1125         return 0;
1126 }
1127
1128 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1129                                        unsigned int cri)
1130 {
1131         struct hwi_controller *phwi_ctrlr;
1132         struct hwi_async_pdu_context *pasync_ctx;
1133         struct async_pdu_handle *pasync_handle, *tmp_handle;
1134         struct list_head *plist;
1135         unsigned int i = 0;
1136
1137         phwi_ctrlr = phba->phwi_ctrlr;
1138         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1139
1140         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1141
1142         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1143                 list_del(&pasync_handle->link);
1144
1145                 if (i == 0) {
1146                         list_add_tail(&pasync_handle->link,
1147                                       &pasync_ctx->async_header.free_list);
1148                         pasync_ctx->async_header.free_entries++;
1149                         i++;
1150                 } else {
1151                         list_add_tail(&pasync_handle->link,
1152                                       &pasync_ctx->async_data.free_list);
1153                         pasync_ctx->async_data.free_entries++;
1154                         i++;
1155                 }
1156         }
1157
1158         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1159         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1160         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1161         return 0;
1162 }
1163
1164 static struct phys_addr *
1165 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1166                      unsigned int is_header, unsigned int host_write_ptr)
1167 {
1168         struct phys_addr *pasync_sge = NULL;
1169
1170         if (is_header)
1171                 pasync_sge = pasync_ctx->async_header.ring_base;
1172         else
1173                 pasync_sge = pasync_ctx->async_data.ring_base;
1174
1175         return pasync_sge + host_write_ptr;
1176 }
1177
1178 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1179                                    unsigned int is_header)
1180 {
1181         struct hwi_controller *phwi_ctrlr;
1182         struct hwi_async_pdu_context *pasync_ctx;
1183         struct async_pdu_handle *pasync_handle;
1184         struct list_head *pfree_link, *pbusy_list;
1185         struct phys_addr *pasync_sge;
1186         unsigned int ring_id, num_entries;
1187         unsigned int host_write_num;
1188         unsigned int writables;
1189         unsigned int i = 0;
1190         u32 doorbell = 0;
1191
1192         phwi_ctrlr = phba->phwi_ctrlr;
1193         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1194
1195         if (is_header) {
1196                 num_entries = pasync_ctx->async_header.num_entries;
1197                 writables = min(pasync_ctx->async_header.writables,
1198                                 pasync_ctx->async_header.free_entries);
1199                 pfree_link = pasync_ctx->async_header.free_list.next;
1200                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1201                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1202         } else {
1203                 num_entries = pasync_ctx->async_data.num_entries;
1204                 writables = min(pasync_ctx->async_data.writables,
1205                                 pasync_ctx->async_data.free_entries);
1206                 pfree_link = pasync_ctx->async_data.free_list.next;
1207                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1208                 ring_id = phwi_ctrlr->default_pdu_data.id;
1209         }
1210
1211         writables = (writables / 8) * 8;
1212         if (writables) {
1213                 for (i = 0; i < writables; i++) {
1214                         pbusy_list =
1215                             hwi_get_async_busy_list(pasync_ctx, is_header,
1216                                                     host_write_num);
1217                         pasync_handle =
1218                             list_entry(pfree_link, struct async_pdu_handle,
1219                                                                 link);
1220                         WARN_ON(!pasync_handle);
1221                         pasync_handle->consumed = 0;
1222
1223                         pfree_link = pfree_link->next;
1224
1225                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1226                                                 is_header, host_write_num);
1227
1228                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1229                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1230
1231                         list_move(&pasync_handle->link, pbusy_list);
1232
1233                         host_write_num++;
1234                         host_write_num = host_write_num % num_entries;
1235                 }
1236
1237                 if (is_header) {
1238                         pasync_ctx->async_header.host_write_ptr =
1239                                                         host_write_num;
1240                         pasync_ctx->async_header.free_entries -= writables;
1241                         pasync_ctx->async_header.writables -= writables;
1242                         pasync_ctx->async_header.busy_entries += writables;
1243                 } else {
1244                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1245                         pasync_ctx->async_data.free_entries -= writables;
1246                         pasync_ctx->async_data.writables -= writables;
1247                         pasync_ctx->async_data.busy_entries += writables;
1248                 }
1249
1250                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1251                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1252                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1253                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1254                                         << DB_DEF_PDU_CQPROC_SHIFT;
1255
1256                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1257         }
1258 }
1259
1260 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1261                                          struct beiscsi_conn *beiscsi_conn,
1262                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1263 {
1264         struct hwi_controller *phwi_ctrlr;
1265         struct hwi_async_pdu_context *pasync_ctx;
1266         struct async_pdu_handle *pasync_handle = NULL;
1267         unsigned int cq_index = -1;
1268
1269         phwi_ctrlr = phba->phwi_ctrlr;
1270         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1271
1272         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1273                                              pdpdu_cqe, &cq_index);
1274         BUG_ON(pasync_handle->is_header != 0);
1275         if (pasync_handle->consumed == 0)
1276                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1277                                            cq_index);
1278
1279         hwi_free_async_msg(phba, pasync_handle->cri);
1280         hwi_post_async_buffers(phba, pasync_handle->is_header);
1281 }
1282
1283 static unsigned int
1284 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1285                   struct beiscsi_hba *phba,
1286                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1287 {
1288         struct list_head *plist;
1289         struct async_pdu_handle *pasync_handle;
1290         void *phdr = NULL;
1291         unsigned int hdr_len = 0, buf_len = 0;
1292         unsigned int status, index = 0, offset = 0;
1293         void *pfirst_buffer = NULL;
1294         unsigned int num_buf = 0;
1295
1296         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1297
1298         list_for_each_entry(pasync_handle, plist, link) {
1299                 if (index == 0) {
1300                         phdr = pasync_handle->pbuffer;
1301                         hdr_len = pasync_handle->buffer_len;
1302                 } else {
1303                         buf_len = pasync_handle->buffer_len;
1304                         if (!num_buf) {
1305                                 pfirst_buffer = pasync_handle->pbuffer;
1306                                 num_buf++;
1307                         }
1308                         memcpy(pfirst_buffer + offset,
1309                                pasync_handle->pbuffer, buf_len);
1310                         offset = buf_len;
1311                 }
1312                 index++;
1313         }
1314
1315         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1316                                            (beiscsi_conn->beiscsi_conn_cid -
1317                                             phba->fw_config.iscsi_cid_start),
1318                                             phdr, hdr_len, pfirst_buffer,
1319                                             buf_len);
1320
1321         if (status == 0)
1322                 hwi_free_async_msg(phba, cri);
1323         return 0;
1324 }
1325
1326 static unsigned int
1327 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1328                      struct beiscsi_hba *phba,
1329                      struct async_pdu_handle *pasync_handle)
1330 {
1331         struct hwi_async_pdu_context *pasync_ctx;
1332         struct hwi_controller *phwi_ctrlr;
1333         unsigned int bytes_needed = 0, status = 0;
1334         unsigned short cri = pasync_handle->cri;
1335         struct pdu_base *ppdu;
1336
1337         phwi_ctrlr = phba->phwi_ctrlr;
1338         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1339
1340         list_del(&pasync_handle->link);
1341         if (pasync_handle->is_header) {
1342                 pasync_ctx->async_header.busy_entries--;
1343                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1344                         hwi_free_async_msg(phba, cri);
1345                         BUG();
1346                 }
1347
1348                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1349                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1350                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1351                                 (unsigned short)pasync_handle->buffer_len;
1352                 list_add_tail(&pasync_handle->link,
1353                               &pasync_ctx->async_entry[cri].wait_queue.list);
1354
1355                 ppdu = pasync_handle->pbuffer;
1356                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1357                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1358                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1359                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1360                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1361
1362                 if (status == 0) {
1363                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1364                             bytes_needed;
1365
1366                         if (bytes_needed == 0)
1367                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1368                                                            pasync_ctx, cri);
1369                 }
1370         } else {
1371                 pasync_ctx->async_data.busy_entries--;
1372                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1373                         list_add_tail(&pasync_handle->link,
1374                                       &pasync_ctx->async_entry[cri].wait_queue.
1375                                       list);
1376                         pasync_ctx->async_entry[cri].wait_queue.
1377                                 bytes_received +=
1378                                 (unsigned short)pasync_handle->buffer_len;
1379
1380                         if (pasync_ctx->async_entry[cri].wait_queue.
1381                             bytes_received >=
1382                             pasync_ctx->async_entry[cri].wait_queue.
1383                             bytes_needed)
1384                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1385                                                            pasync_ctx, cri);
1386                 }
1387         }
1388         return status;
1389 }
1390
1391 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1392                                          struct beiscsi_hba *phba,
1393                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1394 {
1395         struct hwi_controller *phwi_ctrlr;
1396         struct hwi_async_pdu_context *pasync_ctx;
1397         struct async_pdu_handle *pasync_handle = NULL;
1398         unsigned int cq_index = -1;
1399
1400         phwi_ctrlr = phba->phwi_ctrlr;
1401         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1402         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1403                                              pdpdu_cqe, &cq_index);
1404
1405         if (pasync_handle->consumed == 0)
1406                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1407                                            cq_index);
1408         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1409         hwi_post_async_buffers(phba, pasync_handle->is_header);
1410 }
1411
1412 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1413 {
1414         struct be_queue_info *mcc_cq;
1415         struct  be_mcc_compl *mcc_compl;
1416         unsigned int num_processed = 0;
1417
1418         mcc_cq = &phba->ctrl.mcc_obj.cq;
1419         mcc_compl = queue_tail_node(mcc_cq);
1420         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1421         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1422
1423                 if (num_processed >= 32) {
1424                         hwi_ring_cq_db(phba, mcc_cq->id,
1425                                         num_processed, 0, 0);
1426                         num_processed = 0;
1427                 }
1428                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1429                         /* Interpret flags as an async trailer */
1430                         if (is_link_state_evt(mcc_compl->flags))
1431                                 /* Interpret compl as a async link evt */
1432                                 beiscsi_async_link_state_process(phba,
1433                                 (struct be_async_event_link_state *) mcc_compl);
1434                         else
1435                                 SE_DEBUG(DBG_LVL_1,
1436                                         " Unsupported Async Event, flags"
1437                                         " = 0x%08x \n", mcc_compl->flags);
1438                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1439                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1440                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1441                 }
1442
1443                 mcc_compl->flags = 0;
1444                 queue_tail_inc(mcc_cq);
1445                 mcc_compl = queue_tail_node(mcc_cq);
1446                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1447                 num_processed++;
1448         }
1449
1450         if (num_processed > 0)
1451                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1452
1453 }
1454
1455 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1456 {
1457         struct be_queue_info *cq;
1458         struct sol_cqe *sol;
1459         struct dmsg_cqe *dmsg;
1460         unsigned int num_processed = 0;
1461         unsigned int tot_nump = 0;
1462         struct beiscsi_conn *beiscsi_conn;
1463         struct beiscsi_endpoint *beiscsi_ep;
1464         struct iscsi_endpoint *ep;
1465         struct beiscsi_hba *phba;
1466
1467         cq = pbe_eq->cq;
1468         sol = queue_tail_node(cq);
1469         phba = pbe_eq->phba;
1470
1471         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1472                CQE_VALID_MASK) {
1473                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1474
1475                 ep = phba->ep_array[(u32) ((sol->
1476                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1477                                    SOL_CID_MASK) >> 6) -
1478                                    phba->fw_config.iscsi_cid_start];
1479
1480                 beiscsi_ep = ep->dd_data;
1481                 beiscsi_conn = beiscsi_ep->conn;
1482
1483                 if (num_processed >= 32) {
1484                         hwi_ring_cq_db(phba, cq->id,
1485                                         num_processed, 0, 0);
1486                         tot_nump += num_processed;
1487                         num_processed = 0;
1488                 }
1489
1490                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1491                         32] & CQE_CODE_MASK) {
1492                 case SOL_CMD_COMPLETE:
1493                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1494                         break;
1495                 case DRIVERMSG_NOTIFY:
1496                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1497                         dmsg = (struct dmsg_cqe *)sol;
1498                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1499                         break;
1500                 case UNSOL_HDR_NOTIFY:
1501                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1502                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1503                                              (struct i_t_dpdu_cqe *)sol);
1504                         break;
1505                 case UNSOL_DATA_NOTIFY:
1506                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1507                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1508                                              (struct i_t_dpdu_cqe *)sol);
1509                         break;
1510                 case CXN_INVALIDATE_INDEX_NOTIFY:
1511                 case CMD_INVALIDATED_NOTIFY:
1512                 case CXN_INVALIDATE_NOTIFY:
1513                         SE_DEBUG(DBG_LVL_1,
1514                                  "Ignoring CQ Error notification for cmd/cxn"
1515                                  "invalidate\n");
1516                         break;
1517                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1518                 case CMD_KILLED_INVALID_STATSN_RCVD:
1519                 case CMD_KILLED_INVALID_R2T_RCVD:
1520                 case CMD_CXN_KILLED_LUN_INVALID:
1521                 case CMD_CXN_KILLED_ICD_INVALID:
1522                 case CMD_CXN_KILLED_ITT_INVALID:
1523                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1524                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1525                         SE_DEBUG(DBG_LVL_1,
1526                                  "CQ Error notification for cmd.. "
1527                                  "code %d cid 0x%x\n",
1528                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1529                                  32] & CQE_CODE_MASK,
1530                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1531                                  32] & SOL_CID_MASK));
1532                         break;
1533                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1534                         SE_DEBUG(DBG_LVL_1,
1535                                  "Digest error on def pdu ring, dropping..\n");
1536                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1537                                              (struct i_t_dpdu_cqe *) sol);
1538                         break;
1539                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1540                 case CXN_KILLED_BURST_LEN_MISMATCH:
1541                 case CXN_KILLED_AHS_RCVD:
1542                 case CXN_KILLED_HDR_DIGEST_ERR:
1543                 case CXN_KILLED_UNKNOWN_HDR:
1544                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1545                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1546                 case CXN_KILLED_TIMED_OUT:
1547                 case CXN_KILLED_FIN_RCVD:
1548                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1549                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1550                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1551                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1552                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1553                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1554                                  "0x%x...\n",
1555                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1556                                  32] & CQE_CODE_MASK,
1557                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1558                                  32] & CQE_CID_MASK));
1559                         iscsi_conn_failure(beiscsi_conn->conn,
1560                                            ISCSI_ERR_CONN_FAILED);
1561                         break;
1562                 case CXN_KILLED_RST_SENT:
1563                 case CXN_KILLED_RST_RCVD:
1564                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1565                                 "received/sent on CID 0x%x...\n",
1566                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1567                                  32] & CQE_CODE_MASK,
1568                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1569                                  32] & CQE_CID_MASK));
1570                         iscsi_conn_failure(beiscsi_conn->conn,
1571                                            ISCSI_ERR_CONN_FAILED);
1572                         break;
1573                 default:
1574                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1575                                  "received on CID 0x%x...\n",
1576                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1577                                  32] & CQE_CODE_MASK,
1578                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1579                                  32] & CQE_CID_MASK));
1580                         break;
1581                 }
1582
1583                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1584                 queue_tail_inc(cq);
1585                 sol = queue_tail_node(cq);
1586                 num_processed++;
1587         }
1588
1589         if (num_processed > 0) {
1590                 tot_nump += num_processed;
1591                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1592         }
1593         return tot_nump;
1594 }
1595
1596 void beiscsi_process_all_cqs(struct work_struct *work)
1597 {
1598         unsigned long flags;
1599         struct hwi_controller *phwi_ctrlr;
1600         struct hwi_context_memory *phwi_context;
1601         struct be_eq_obj *pbe_eq;
1602         struct beiscsi_hba *phba =
1603             container_of(work, struct beiscsi_hba, work_cqs);
1604
1605         phwi_ctrlr = phba->phwi_ctrlr;
1606         phwi_context = phwi_ctrlr->phwi_ctxt;
1607         if (phba->msix_enabled)
1608                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1609         else
1610                 pbe_eq = &phwi_context->be_eq[0];
1611
1612         if (phba->todo_mcc_cq) {
1613                 spin_lock_irqsave(&phba->isr_lock, flags);
1614                 phba->todo_mcc_cq = 0;
1615                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1616                 beiscsi_process_mcc_isr(phba);
1617         }
1618
1619         if (phba->todo_cq) {
1620                 spin_lock_irqsave(&phba->isr_lock, flags);
1621                 phba->todo_cq = 0;
1622                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1623                 beiscsi_process_cq(pbe_eq);
1624         }
1625 }
1626
1627 static int be_iopoll(struct blk_iopoll *iop, int budget)
1628 {
1629         static unsigned int ret;
1630         struct beiscsi_hba *phba;
1631         struct be_eq_obj *pbe_eq;
1632
1633         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1634         ret = beiscsi_process_cq(pbe_eq);
1635         if (ret < budget) {
1636                 phba = pbe_eq->phba;
1637                 blk_iopoll_complete(iop);
1638                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1639                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1640         }
1641         return ret;
1642 }
1643
1644 static void
1645 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1646               unsigned int num_sg, struct beiscsi_io_task *io_task)
1647 {
1648         struct iscsi_sge *psgl;
1649         unsigned short sg_len, index;
1650         unsigned int sge_len = 0;
1651         unsigned long long addr;
1652         struct scatterlist *l_sg;
1653         unsigned int offset;
1654
1655         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1656                                       io_task->bhs_pa.u.a32.address_lo);
1657         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1658                                       io_task->bhs_pa.u.a32.address_hi);
1659
1660         l_sg = sg;
1661         for (index = 0; (index < num_sg) && (index < 2); index++,
1662                                                          sg = sg_next(sg)) {
1663                 if (index == 0) {
1664                         sg_len = sg_dma_len(sg);
1665                         addr = (u64) sg_dma_address(sg);
1666                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1667                                                         (addr & 0xFFFFFFFF));
1668                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1669                                                         (addr >> 32));
1670                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1671                                                         sg_len);
1672                         sge_len = sg_len;
1673                 } else {
1674                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1675                                                         pwrb, sge_len);
1676                         sg_len = sg_dma_len(sg);
1677                         addr = (u64) sg_dma_address(sg);
1678                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1679                                                         (addr & 0xFFFFFFFF));
1680                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1681                                                         (addr >> 32));
1682                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1683                                                         sg_len);
1684                 }
1685         }
1686         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1687         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1688
1689         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1690
1691         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1692                         io_task->bhs_pa.u.a32.address_hi);
1693         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1694                         io_task->bhs_pa.u.a32.address_lo);
1695
1696         if (num_sg == 1) {
1697                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1698                                                                 1);
1699                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1700                                                                 0);
1701         } else if (num_sg == 2) {
1702                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1703                                                                 0);
1704                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1705                                                                 1);
1706         } else {
1707                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1708                                                                 0);
1709                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1710                                                                 0);
1711         }
1712         sg = l_sg;
1713         psgl++;
1714         psgl++;
1715         offset = 0;
1716         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1717                 sg_len = sg_dma_len(sg);
1718                 addr = (u64) sg_dma_address(sg);
1719                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1720                                                 (addr & 0xFFFFFFFF));
1721                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1722                                                 (addr >> 32));
1723                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1724                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1725                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1726                 offset += sg_len;
1727         }
1728         psgl--;
1729         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1730 }
1731
1732 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1733 {
1734         struct iscsi_sge *psgl;
1735         unsigned long long addr;
1736         struct beiscsi_io_task *io_task = task->dd_data;
1737         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1738         struct beiscsi_hba *phba = beiscsi_conn->phba;
1739
1740         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1741         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1742                                 io_task->bhs_pa.u.a32.address_lo);
1743         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1744                                 io_task->bhs_pa.u.a32.address_hi);
1745
1746         if (task->data) {
1747                 if (task->data_count) {
1748                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1749                         addr = (u64) pci_map_single(phba->pcidev,
1750                                                     task->data,
1751                                                     task->data_count, 1);
1752                 } else {
1753                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1754                         addr = 0;
1755                 }
1756                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1757                                                 (addr & 0xFFFFFFFF));
1758                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1759                                                 (addr >> 32));
1760                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1761                                                 task->data_count);
1762
1763                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1764         } else {
1765                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1766                 addr = 0;
1767         }
1768
1769         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1770
1771         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1772
1773         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1774                       io_task->bhs_pa.u.a32.address_hi);
1775         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1776                       io_task->bhs_pa.u.a32.address_lo);
1777         if (task->data) {
1778                 psgl++;
1779                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1780                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1781                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1782                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1783                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1784                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1785
1786                 psgl++;
1787                 if (task->data) {
1788                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1789                                                 (addr & 0xFFFFFFFF));
1790                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1791                                                 (addr >> 32));
1792                 }
1793                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1794         }
1795         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1796 }
1797
1798 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1799 {
1800         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1801         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1802         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1803
1804         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1805                                       sizeof(struct sol_cqe));
1806         num_async_pdu_buf_pages =
1807                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1808                                        phba->params.defpdu_hdr_sz);
1809         num_async_pdu_buf_sgl_pages =
1810                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1811                                        sizeof(struct phys_addr));
1812         num_async_pdu_data_pages =
1813                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1814                                        phba->params.defpdu_data_sz);
1815         num_async_pdu_data_sgl_pages =
1816                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1817                                        sizeof(struct phys_addr));
1818
1819         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1820
1821         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1822                                                  BE_ISCSI_PDU_HEADER_SIZE;
1823         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1824                                             sizeof(struct hwi_context_memory);
1825
1826
1827         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1828             * (phba->params.wrbs_per_cxn)
1829             * phba->params.cxns_per_ctrl;
1830         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1831                                  (phba->params.wrbs_per_cxn);
1832         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1833                                 phba->params.cxns_per_ctrl);
1834
1835         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1836                 phba->params.icds_per_ctrl;
1837         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1838                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1839
1840         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1841                 num_async_pdu_buf_pages * PAGE_SIZE;
1842         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1843                 num_async_pdu_data_pages * PAGE_SIZE;
1844         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1845                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1846         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1847                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1848         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1849                 phba->params.asyncpdus_per_ctrl *
1850                 sizeof(struct async_pdu_handle);
1851         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1852                 phba->params.asyncpdus_per_ctrl *
1853                 sizeof(struct async_pdu_handle);
1854         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1855                 sizeof(struct hwi_async_pdu_context) +
1856                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1857 }
1858
1859 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1860 {
1861         struct be_mem_descriptor *mem_descr;
1862         dma_addr_t bus_add;
1863         struct mem_array *mem_arr, *mem_arr_orig;
1864         unsigned int i, j, alloc_size, curr_alloc_size;
1865
1866         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1867         if (!phba->phwi_ctrlr)
1868                 return -ENOMEM;
1869
1870         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1871                                  GFP_KERNEL);
1872         if (!phba->init_mem) {
1873                 kfree(phba->phwi_ctrlr);
1874                 return -ENOMEM;
1875         }
1876
1877         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1878                                GFP_KERNEL);
1879         if (!mem_arr_orig) {
1880                 kfree(phba->init_mem);
1881                 kfree(phba->phwi_ctrlr);
1882                 return -ENOMEM;
1883         }
1884
1885         mem_descr = phba->init_mem;
1886         for (i = 0; i < SE_MEM_MAX; i++) {
1887                 j = 0;
1888                 mem_arr = mem_arr_orig;
1889                 alloc_size = phba->mem_req[i];
1890                 memset(mem_arr, 0, sizeof(struct mem_array) *
1891                        BEISCSI_MAX_FRAGS_INIT);
1892                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1893                 do {
1894                         mem_arr->virtual_address = pci_alloc_consistent(
1895                                                         phba->pcidev,
1896                                                         curr_alloc_size,
1897                                                         &bus_add);
1898                         if (!mem_arr->virtual_address) {
1899                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1900                                         goto free_mem;
1901                                 if (curr_alloc_size -
1902                                         rounddown_pow_of_two(curr_alloc_size))
1903                                         curr_alloc_size = rounddown_pow_of_two
1904                                                              (curr_alloc_size);
1905                                 else
1906                                         curr_alloc_size = curr_alloc_size / 2;
1907                         } else {
1908                                 mem_arr->bus_address.u.
1909                                     a64.address = (__u64) bus_add;
1910                                 mem_arr->size = curr_alloc_size;
1911                                 alloc_size -= curr_alloc_size;
1912                                 curr_alloc_size = min(be_max_phys_size *
1913                                                       1024, alloc_size);
1914                                 j++;
1915                                 mem_arr++;
1916                         }
1917                 } while (alloc_size);
1918                 mem_descr->num_elements = j;
1919                 mem_descr->size_in_bytes = phba->mem_req[i];
1920                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1921                                                GFP_KERNEL);
1922                 if (!mem_descr->mem_array)
1923                         goto free_mem;
1924
1925                 memcpy(mem_descr->mem_array, mem_arr_orig,
1926                        sizeof(struct mem_array) * j);
1927                 mem_descr++;
1928         }
1929         kfree(mem_arr_orig);
1930         return 0;
1931 free_mem:
1932         mem_descr->num_elements = j;
1933         while ((i) || (j)) {
1934                 for (j = mem_descr->num_elements; j > 0; j--) {
1935                         pci_free_consistent(phba->pcidev,
1936                                             mem_descr->mem_array[j - 1].size,
1937                                             mem_descr->mem_array[j - 1].
1938                                             virtual_address,
1939                                             mem_descr->mem_array[j - 1].
1940                                             bus_address.u.a64.address);
1941                 }
1942                 if (i) {
1943                         i--;
1944                         kfree(mem_descr->mem_array);
1945                         mem_descr--;
1946                 }
1947         }
1948         kfree(mem_arr_orig);
1949         kfree(phba->init_mem);
1950         kfree(phba->phwi_ctrlr);
1951         return -ENOMEM;
1952 }
1953
1954 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1955 {
1956         beiscsi_find_mem_req(phba);
1957         return beiscsi_alloc_mem(phba);
1958 }
1959
1960 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1961 {
1962         struct pdu_data_out *pdata_out;
1963         struct pdu_nop_out *pnop_out;
1964         struct be_mem_descriptor *mem_descr;
1965
1966         mem_descr = phba->init_mem;
1967         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1968         pdata_out =
1969             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1970         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1971
1972         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1973                       IIOC_SCSI_DATA);
1974
1975         pnop_out =
1976             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1977                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1978
1979         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1980         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1981         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1982         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1983 }
1984
1985 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1986 {
1987         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1988         struct wrb_handle *pwrb_handle;
1989         struct hwi_controller *phwi_ctrlr;
1990         struct hwi_wrb_context *pwrb_context;
1991         struct iscsi_wrb *pwrb;
1992         unsigned int num_cxn_wrbh;
1993         unsigned int num_cxn_wrb, j, idx, index;
1994
1995         mem_descr_wrbh = phba->init_mem;
1996         mem_descr_wrbh += HWI_MEM_WRBH;
1997
1998         mem_descr_wrb = phba->init_mem;
1999         mem_descr_wrb += HWI_MEM_WRB;
2000
2001         idx = 0;
2002         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2003         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2004                         ((sizeof(struct wrb_handle)) *
2005                          phba->params.wrbs_per_cxn));
2006         phwi_ctrlr = phba->phwi_ctrlr;
2007
2008         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2009                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2010                 pwrb_context->pwrb_handle_base =
2011                                 kzalloc(sizeof(struct wrb_handle *) *
2012                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2013                 pwrb_context->pwrb_handle_basestd =
2014                                 kzalloc(sizeof(struct wrb_handle *) *
2015                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
2016                 if (num_cxn_wrbh) {
2017                         pwrb_context->alloc_index = 0;
2018                         pwrb_context->wrb_handles_available = 0;
2019                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2020                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2021                                 pwrb_context->pwrb_handle_basestd[j] =
2022                                                                 pwrb_handle;
2023                                 pwrb_context->wrb_handles_available++;
2024                                 pwrb_handle->wrb_index = j;
2025                                 pwrb_handle++;
2026                         }
2027                         pwrb_context->free_index = 0;
2028                         num_cxn_wrbh--;
2029                 } else {
2030                         idx++;
2031                         pwrb_handle =
2032                             mem_descr_wrbh->mem_array[idx].virtual_address;
2033                         num_cxn_wrbh =
2034                             ((mem_descr_wrbh->mem_array[idx].size) /
2035                              ((sizeof(struct wrb_handle)) *
2036                               phba->params.wrbs_per_cxn));
2037                         pwrb_context->alloc_index = 0;
2038                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2039                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2040                                 pwrb_context->pwrb_handle_basestd[j] =
2041                                     pwrb_handle;
2042                                 pwrb_context->wrb_handles_available++;
2043                                 pwrb_handle->wrb_index = j;
2044                                 pwrb_handle++;
2045                         }
2046                         pwrb_context->free_index = 0;
2047                         num_cxn_wrbh--;
2048                 }
2049         }
2050         idx = 0;
2051         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2052         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2053                       ((sizeof(struct iscsi_wrb) *
2054                         phba->params.wrbs_per_cxn));
2055         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2056                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2057                 if (num_cxn_wrb) {
2058                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2059                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2060                                 pwrb_handle->pwrb = pwrb;
2061                                 pwrb++;
2062                         }
2063                         num_cxn_wrb--;
2064                 } else {
2065                         idx++;
2066                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2067                         num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2068                                       ((sizeof(struct iscsi_wrb) *
2069                                         phba->params.wrbs_per_cxn));
2070                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2071                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2072                                 pwrb_handle->pwrb = pwrb;
2073                                 pwrb++;
2074                         }
2075                         num_cxn_wrb--;
2076                 }
2077         }
2078 }
2079
2080 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2081 {
2082         struct hwi_controller *phwi_ctrlr;
2083         struct hba_parameters *p = &phba->params;
2084         struct hwi_async_pdu_context *pasync_ctx;
2085         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2086         unsigned int index;
2087         struct be_mem_descriptor *mem_descr;
2088
2089         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2090         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2091
2092         phwi_ctrlr = phba->phwi_ctrlr;
2093         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2094                                 mem_descr->mem_array[0].virtual_address;
2095         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2096         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2097
2098         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2099         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2100         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2101         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2102
2103         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2104         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2105         if (mem_descr->mem_array[0].virtual_address) {
2106                 SE_DEBUG(DBG_LVL_8,
2107                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2108                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2109         } else
2110                 shost_printk(KERN_WARNING, phba->shost,
2111                              "No Virtual address \n");
2112
2113         pasync_ctx->async_header.va_base =
2114                         mem_descr->mem_array[0].virtual_address;
2115
2116         pasync_ctx->async_header.pa_base.u.a64.address =
2117                         mem_descr->mem_array[0].bus_address.u.a64.address;
2118
2119         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2120         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2121         if (mem_descr->mem_array[0].virtual_address) {
2122                 SE_DEBUG(DBG_LVL_8,
2123                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2124                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2125         } else
2126                 shost_printk(KERN_WARNING, phba->shost,
2127                             "No Virtual address \n");
2128         pasync_ctx->async_header.ring_base =
2129                         mem_descr->mem_array[0].virtual_address;
2130
2131         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2132         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2133         if (mem_descr->mem_array[0].virtual_address) {
2134                 SE_DEBUG(DBG_LVL_8,
2135                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2136                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2137         } else
2138                 shost_printk(KERN_WARNING, phba->shost,
2139                             "No Virtual address \n");
2140
2141         pasync_ctx->async_header.handle_base =
2142                         mem_descr->mem_array[0].virtual_address;
2143         pasync_ctx->async_header.writables = 0;
2144         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2145
2146         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2147         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2148         if (mem_descr->mem_array[0].virtual_address) {
2149                 SE_DEBUG(DBG_LVL_8,
2150                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2151                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2152         } else
2153                 shost_printk(KERN_WARNING, phba->shost,
2154                             "No Virtual address \n");
2155         pasync_ctx->async_data.va_base =
2156                         mem_descr->mem_array[0].virtual_address;
2157         pasync_ctx->async_data.pa_base.u.a64.address =
2158                         mem_descr->mem_array[0].bus_address.u.a64.address;
2159
2160         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2161         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2162         if (mem_descr->mem_array[0].virtual_address) {
2163                 SE_DEBUG(DBG_LVL_8,
2164                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2165                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2166         } else
2167                 shost_printk(KERN_WARNING, phba->shost,
2168                              "No Virtual address \n");
2169
2170         pasync_ctx->async_data.ring_base =
2171                         mem_descr->mem_array[0].virtual_address;
2172
2173         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2174         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2175         if (!mem_descr->mem_array[0].virtual_address)
2176                 shost_printk(KERN_WARNING, phba->shost,
2177                             "No Virtual address \n");
2178
2179         pasync_ctx->async_data.handle_base =
2180                         mem_descr->mem_array[0].virtual_address;
2181         pasync_ctx->async_data.writables = 0;
2182         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2183
2184         pasync_header_h =
2185                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2186         pasync_data_h =
2187                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2188
2189         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2190                 pasync_header_h->cri = -1;
2191                 pasync_header_h->index = (char)index;
2192                 INIT_LIST_HEAD(&pasync_header_h->link);
2193                 pasync_header_h->pbuffer =
2194                         (void *)((unsigned long)
2195                         (pasync_ctx->async_header.va_base) +
2196                         (p->defpdu_hdr_sz * index));
2197
2198                 pasync_header_h->pa.u.a64.address =
2199                         pasync_ctx->async_header.pa_base.u.a64.address +
2200                         (p->defpdu_hdr_sz * index);
2201
2202                 list_add_tail(&pasync_header_h->link,
2203                                 &pasync_ctx->async_header.free_list);
2204                 pasync_header_h++;
2205                 pasync_ctx->async_header.free_entries++;
2206                 pasync_ctx->async_header.writables++;
2207
2208                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2209                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2210                                header_busy_list);
2211                 pasync_data_h->cri = -1;
2212                 pasync_data_h->index = (char)index;
2213                 INIT_LIST_HEAD(&pasync_data_h->link);
2214                 pasync_data_h->pbuffer =
2215                         (void *)((unsigned long)
2216                         (pasync_ctx->async_data.va_base) +
2217                         (p->defpdu_data_sz * index));
2218
2219                 pasync_data_h->pa.u.a64.address =
2220                     pasync_ctx->async_data.pa_base.u.a64.address +
2221                     (p->defpdu_data_sz * index);
2222
2223                 list_add_tail(&pasync_data_h->link,
2224                               &pasync_ctx->async_data.free_list);
2225                 pasync_data_h++;
2226                 pasync_ctx->async_data.free_entries++;
2227                 pasync_ctx->async_data.writables++;
2228
2229                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2230         }
2231
2232         pasync_ctx->async_header.host_write_ptr = 0;
2233         pasync_ctx->async_header.ep_read_ptr = -1;
2234         pasync_ctx->async_data.host_write_ptr = 0;
2235         pasync_ctx->async_data.ep_read_ptr = -1;
2236 }
2237
2238 static int
2239 be_sgl_create_contiguous(void *virtual_address,
2240                          u64 physical_address, u32 length,
2241                          struct be_dma_mem *sgl)
2242 {
2243         WARN_ON(!virtual_address);
2244         WARN_ON(!physical_address);
2245         WARN_ON(!length > 0);
2246         WARN_ON(!sgl);
2247
2248         sgl->va = virtual_address;
2249         sgl->dma = physical_address;
2250         sgl->size = length;
2251
2252         return 0;
2253 }
2254
2255 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2256 {
2257         memset(sgl, 0, sizeof(*sgl));
2258 }
2259
2260 static void
2261 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2262                      struct mem_array *pmem, struct be_dma_mem *sgl)
2263 {
2264         if (sgl->va)
2265                 be_sgl_destroy_contiguous(sgl);
2266
2267         be_sgl_create_contiguous(pmem->virtual_address,
2268                                  pmem->bus_address.u.a64.address,
2269                                  pmem->size, sgl);
2270 }
2271
2272 static void
2273 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2274                            struct mem_array *pmem, struct be_dma_mem *sgl)
2275 {
2276         if (sgl->va)
2277                 be_sgl_destroy_contiguous(sgl);
2278
2279         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2280                                  pmem->bus_address.u.a64.address,
2281                                  pmem->size, sgl);
2282 }
2283
2284 static int be_fill_queue(struct be_queue_info *q,
2285                 u16 len, u16 entry_size, void *vaddress)
2286 {
2287         struct be_dma_mem *mem = &q->dma_mem;
2288
2289         memset(q, 0, sizeof(*q));
2290         q->len = len;
2291         q->entry_size = entry_size;
2292         mem->size = len * entry_size;
2293         mem->va = vaddress;
2294         if (!mem->va)
2295                 return -ENOMEM;
2296         memset(mem->va, 0, mem->size);
2297         return 0;
2298 }
2299
2300 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2301                              struct hwi_context_memory *phwi_context)
2302 {
2303         unsigned int i, num_eq_pages;
2304         int ret, eq_for_mcc;
2305         struct be_queue_info *eq;
2306         struct be_dma_mem *mem;
2307         void *eq_vaddress;
2308         dma_addr_t paddr;
2309
2310         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2311                                       sizeof(struct be_eq_entry));
2312
2313         if (phba->msix_enabled)
2314                 eq_for_mcc = 1;
2315         else
2316                 eq_for_mcc = 0;
2317         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2318                 eq = &phwi_context->be_eq[i].q;
2319                 mem = &eq->dma_mem;
2320                 phwi_context->be_eq[i].phba = phba;
2321                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2322                                                      num_eq_pages * PAGE_SIZE,
2323                                                      &paddr);
2324                 if (!eq_vaddress)
2325                         goto create_eq_error;
2326
2327                 mem->va = eq_vaddress;
2328                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2329                                     sizeof(struct be_eq_entry), eq_vaddress);
2330                 if (ret) {
2331                         shost_printk(KERN_ERR, phba->shost,
2332                                      "be_fill_queue Failed for EQ \n");
2333                         goto create_eq_error;
2334                 }
2335
2336                 mem->dma = paddr;
2337                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2338                                             phwi_context->cur_eqd);
2339                 if (ret) {
2340                         shost_printk(KERN_ERR, phba->shost,
2341                                      "beiscsi_cmd_eq_create"
2342                                      "Failedfor EQ \n");
2343                         goto create_eq_error;
2344                 }
2345                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2346         }
2347         return 0;
2348 create_eq_error:
2349         for (i = 0; i < (phba->num_cpus + 1); i++) {
2350                 eq = &phwi_context->be_eq[i].q;
2351                 mem = &eq->dma_mem;
2352                 if (mem->va)
2353                         pci_free_consistent(phba->pcidev, num_eq_pages
2354                                             * PAGE_SIZE,
2355                                             mem->va, mem->dma);
2356         }
2357         return ret;
2358 }
2359
2360 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2361                              struct hwi_context_memory *phwi_context)
2362 {
2363         unsigned int i, num_cq_pages;
2364         int ret;
2365         struct be_queue_info *cq, *eq;
2366         struct be_dma_mem *mem;
2367         struct be_eq_obj *pbe_eq;
2368         void *cq_vaddress;
2369         dma_addr_t paddr;
2370
2371         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2372                                       sizeof(struct sol_cqe));
2373
2374         for (i = 0; i < phba->num_cpus; i++) {
2375                 cq = &phwi_context->be_cq[i];
2376                 eq = &phwi_context->be_eq[i].q;
2377                 pbe_eq = &phwi_context->be_eq[i];
2378                 pbe_eq->cq = cq;
2379                 pbe_eq->phba = phba;
2380                 mem = &cq->dma_mem;
2381                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2382                                                      num_cq_pages * PAGE_SIZE,
2383                                                      &paddr);
2384                 if (!cq_vaddress)
2385                         goto create_cq_error;
2386                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2387                                     sizeof(struct sol_cqe), cq_vaddress);
2388                 if (ret) {
2389                         shost_printk(KERN_ERR, phba->shost,
2390                                      "be_fill_queue Failed for ISCSI CQ \n");
2391                         goto create_cq_error;
2392                 }
2393
2394                 mem->dma = paddr;
2395                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2396                                             false, 0);
2397                 if (ret) {
2398                         shost_printk(KERN_ERR, phba->shost,
2399                                      "beiscsi_cmd_eq_create"
2400                                      "Failed for ISCSI CQ \n");
2401                         goto create_cq_error;
2402                 }
2403                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2404                                                  cq->id, eq->id);
2405                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2406         }
2407         return 0;
2408
2409 create_cq_error:
2410         for (i = 0; i < phba->num_cpus; i++) {
2411                 cq = &phwi_context->be_cq[i];
2412                 mem = &cq->dma_mem;
2413                 if (mem->va)
2414                         pci_free_consistent(phba->pcidev, num_cq_pages
2415                                             * PAGE_SIZE,
2416                                             mem->va, mem->dma);
2417         }
2418         return ret;
2419
2420 }
2421
2422 static int
2423 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2424                        struct hwi_context_memory *phwi_context,
2425                        struct hwi_controller *phwi_ctrlr,
2426                        unsigned int def_pdu_ring_sz)
2427 {
2428         unsigned int idx;
2429         int ret;
2430         struct be_queue_info *dq, *cq;
2431         struct be_dma_mem *mem;
2432         struct be_mem_descriptor *mem_descr;
2433         void *dq_vaddress;
2434
2435         idx = 0;
2436         dq = &phwi_context->be_def_hdrq;
2437         cq = &phwi_context->be_cq[0];
2438         mem = &dq->dma_mem;
2439         mem_descr = phba->init_mem;
2440         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2441         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2442         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2443                             sizeof(struct phys_addr),
2444                             sizeof(struct phys_addr), dq_vaddress);
2445         if (ret) {
2446                 shost_printk(KERN_ERR, phba->shost,
2447                              "be_fill_queue Failed for DEF PDU HDR\n");
2448                 return ret;
2449         }
2450         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2451         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2452                                               def_pdu_ring_sz,
2453                                               phba->params.defpdu_hdr_sz);
2454         if (ret) {
2455                 shost_printk(KERN_ERR, phba->shost,
2456                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2457                 return ret;
2458         }
2459         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2460         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2461                  phwi_context->be_def_hdrq.id);
2462         hwi_post_async_buffers(phba, 1);
2463         return 0;
2464 }
2465
2466 static int
2467 beiscsi_create_def_data(struct beiscsi_hba *phba,
2468                         struct hwi_context_memory *phwi_context,
2469                         struct hwi_controller *phwi_ctrlr,
2470                         unsigned int def_pdu_ring_sz)
2471 {
2472         unsigned int idx;
2473         int ret;
2474         struct be_queue_info *dataq, *cq;
2475         struct be_dma_mem *mem;
2476         struct be_mem_descriptor *mem_descr;
2477         void *dq_vaddress;
2478
2479         idx = 0;
2480         dataq = &phwi_context->be_def_dataq;
2481         cq = &phwi_context->be_cq[0];
2482         mem = &dataq->dma_mem;
2483         mem_descr = phba->init_mem;
2484         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2485         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2486         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2487                             sizeof(struct phys_addr),
2488                             sizeof(struct phys_addr), dq_vaddress);
2489         if (ret) {
2490                 shost_printk(KERN_ERR, phba->shost,
2491                              "be_fill_queue Failed for DEF PDU DATA\n");
2492                 return ret;
2493         }
2494         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2495         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2496                                               def_pdu_ring_sz,
2497                                               phba->params.defpdu_data_sz);
2498         if (ret) {
2499                 shost_printk(KERN_ERR, phba->shost,
2500                              "be_cmd_create_default_pdu_queue Failed"
2501                              " for DEF PDU DATA\n");
2502                 return ret;
2503         }
2504         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2505         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2506                  phwi_context->be_def_dataq.id);
2507         hwi_post_async_buffers(phba, 0);
2508         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2509         return 0;
2510 }
2511
2512 static int
2513 beiscsi_post_pages(struct beiscsi_hba *phba)
2514 {
2515         struct be_mem_descriptor *mem_descr;
2516         struct mem_array *pm_arr;
2517         unsigned int page_offset, i;
2518         struct be_dma_mem sgl;
2519         int status;
2520
2521         mem_descr = phba->init_mem;
2522         mem_descr += HWI_MEM_SGE;
2523         pm_arr = mem_descr->mem_array;
2524
2525         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2526                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2527         for (i = 0; i < mem_descr->num_elements; i++) {
2528                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2529                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2530                                                 page_offset,
2531                                                 (pm_arr->size / PAGE_SIZE));
2532                 page_offset += pm_arr->size / PAGE_SIZE;
2533                 if (status != 0) {
2534                         shost_printk(KERN_ERR, phba->shost,
2535                                      "post sgl failed.\n");
2536                         return status;
2537                 }
2538                 pm_arr++;
2539         }
2540         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2541         return 0;
2542 }
2543
2544 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2545 {
2546         struct be_dma_mem *mem = &q->dma_mem;
2547         if (mem->va)
2548                 pci_free_consistent(phba->pcidev, mem->size,
2549                         mem->va, mem->dma);
2550 }
2551
2552 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2553                 u16 len, u16 entry_size)
2554 {
2555         struct be_dma_mem *mem = &q->dma_mem;
2556
2557         memset(q, 0, sizeof(*q));
2558         q->len = len;
2559         q->entry_size = entry_size;
2560         mem->size = len * entry_size;
2561         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2562         if (!mem->va)
2563                 return -1;
2564         memset(mem->va, 0, mem->size);
2565         return 0;
2566 }
2567
2568 static int
2569 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2570                          struct hwi_context_memory *phwi_context,
2571                          struct hwi_controller *phwi_ctrlr)
2572 {
2573         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2574         u64 pa_addr_lo;
2575         unsigned int idx, num, i;
2576         struct mem_array *pwrb_arr;
2577         void *wrb_vaddr;
2578         struct be_dma_mem sgl;
2579         struct be_mem_descriptor *mem_descr;
2580         int status;
2581
2582         idx = 0;
2583         mem_descr = phba->init_mem;
2584         mem_descr += HWI_MEM_WRB;
2585         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2586                            GFP_KERNEL);
2587         if (!pwrb_arr) {
2588                 shost_printk(KERN_ERR, phba->shost,
2589                              "Memory alloc failed in create wrb ring.\n");
2590                 return -ENOMEM;
2591         }
2592         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2593         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2594         num_wrb_rings = mem_descr->mem_array[idx].size /
2595                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2596
2597         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2598                 if (num_wrb_rings) {
2599                         pwrb_arr[num].virtual_address = wrb_vaddr;
2600                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2601                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2602                                             sizeof(struct iscsi_wrb);
2603                         wrb_vaddr += pwrb_arr[num].size;
2604                         pa_addr_lo += pwrb_arr[num].size;
2605                         num_wrb_rings--;
2606                 } else {
2607                         idx++;
2608                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2609                         pa_addr_lo = mem_descr->mem_array[idx].\
2610                                         bus_address.u.a64.address;
2611                         num_wrb_rings = mem_descr->mem_array[idx].size /
2612                                         (phba->params.wrbs_per_cxn *
2613                                         sizeof(struct iscsi_wrb));
2614                         pwrb_arr[num].virtual_address = wrb_vaddr;
2615                         pwrb_arr[num].bus_address.u.a64.address\
2616                                                 = pa_addr_lo;
2617                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2618                                                  sizeof(struct iscsi_wrb);
2619                         wrb_vaddr += pwrb_arr[num].size;
2620                         pa_addr_lo   += pwrb_arr[num].size;
2621                         num_wrb_rings--;
2622                 }
2623         }
2624         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2625                 wrb_mem_index = 0;
2626                 offset = 0;
2627                 size = 0;
2628
2629                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2630                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2631                                             &phwi_context->be_wrbq[i]);
2632                 if (status != 0) {
2633                         shost_printk(KERN_ERR, phba->shost,
2634                                      "wrbq create failed.");
2635                         return status;
2636                 }
2637                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2638                                                                    id;
2639         }
2640         kfree(pwrb_arr);
2641         return 0;
2642 }
2643
2644 static void free_wrb_handles(struct beiscsi_hba *phba)
2645 {
2646         unsigned int index;
2647         struct hwi_controller *phwi_ctrlr;
2648         struct hwi_wrb_context *pwrb_context;
2649
2650         phwi_ctrlr = phba->phwi_ctrlr;
2651         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2652                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2653                 kfree(pwrb_context->pwrb_handle_base);
2654                 kfree(pwrb_context->pwrb_handle_basestd);
2655         }
2656 }
2657
2658 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2659 {
2660         struct be_queue_info *q;
2661         struct be_ctrl_info *ctrl = &phba->ctrl;
2662
2663         q = &phba->ctrl.mcc_obj.q;
2664         if (q->created)
2665                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2666         be_queue_free(phba, q);
2667
2668         q = &phba->ctrl.mcc_obj.cq;
2669         if (q->created)
2670                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2671         be_queue_free(phba, q);
2672 }
2673
2674 static void hwi_cleanup(struct beiscsi_hba *phba)
2675 {
2676         struct be_queue_info *q;
2677         struct be_ctrl_info *ctrl = &phba->ctrl;
2678         struct hwi_controller *phwi_ctrlr;
2679         struct hwi_context_memory *phwi_context;
2680         int i, eq_num;
2681
2682         phwi_ctrlr = phba->phwi_ctrlr;
2683         phwi_context = phwi_ctrlr->phwi_ctxt;
2684         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2685                 q = &phwi_context->be_wrbq[i];
2686                 if (q->created)
2687                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2688         }
2689         free_wrb_handles(phba);
2690
2691         q = &phwi_context->be_def_hdrq;
2692         if (q->created)
2693                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2694
2695         q = &phwi_context->be_def_dataq;
2696         if (q->created)
2697                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2698
2699         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2700
2701         for (i = 0; i < (phba->num_cpus); i++) {
2702                 q = &phwi_context->be_cq[i];
2703                 if (q->created)
2704                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2705         }
2706         if (phba->msix_enabled)
2707                 eq_num = 1;
2708         else
2709                 eq_num = 0;
2710         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2711                 q = &phwi_context->be_eq[i].q;
2712                 if (q->created)
2713                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2714         }
2715         be_mcc_queues_destroy(phba);
2716 }
2717
2718 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2719                                 struct hwi_context_memory *phwi_context)
2720 {
2721         struct be_queue_info *q, *cq;
2722         struct be_ctrl_info *ctrl = &phba->ctrl;
2723
2724         /* Alloc MCC compl queue */
2725         cq = &phba->ctrl.mcc_obj.cq;
2726         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2727                         sizeof(struct be_mcc_compl)))
2728                 goto err;
2729         /* Ask BE to create MCC compl queue; */
2730         if (phba->msix_enabled) {
2731                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2732                                          [phba->num_cpus].q, false, true, 0))
2733                 goto mcc_cq_free;
2734         } else {
2735                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2736                                           false, true, 0))
2737                 goto mcc_cq_free;
2738         }
2739
2740         /* Alloc MCC queue */
2741         q = &phba->ctrl.mcc_obj.q;
2742         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2743                 goto mcc_cq_destroy;
2744
2745         /* Ask BE to create MCC queue */
2746         if (beiscsi_cmd_mccq_create(phba, q, cq))
2747                 goto mcc_q_free;
2748
2749         return 0;
2750
2751 mcc_q_free:
2752         be_queue_free(phba, q);
2753 mcc_cq_destroy:
2754         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2755 mcc_cq_free:
2756         be_queue_free(phba, cq);
2757 err:
2758         return -1;
2759 }
2760
2761 static int find_num_cpus(void)
2762 {
2763         int  num_cpus = 0;
2764
2765         num_cpus = num_online_cpus();
2766         if (num_cpus >= MAX_CPUS)
2767                 num_cpus = MAX_CPUS - 1;
2768
2769         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2770         return num_cpus;
2771 }
2772
2773 static int hwi_init_port(struct beiscsi_hba *phba)
2774 {
2775         struct hwi_controller *phwi_ctrlr;
2776         struct hwi_context_memory *phwi_context;
2777         unsigned int def_pdu_ring_sz;
2778         struct be_ctrl_info *ctrl = &phba->ctrl;
2779         int status;
2780
2781         def_pdu_ring_sz =
2782                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2783         phwi_ctrlr = phba->phwi_ctrlr;
2784         phwi_context = phwi_ctrlr->phwi_ctxt;
2785         phwi_context->max_eqd = 0;
2786         phwi_context->min_eqd = 0;
2787         phwi_context->cur_eqd = 64;
2788         be_cmd_fw_initialize(&phba->ctrl);
2789
2790         status = beiscsi_create_eqs(phba, phwi_context);
2791         if (status != 0) {
2792                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2793                 goto error;
2794         }
2795
2796         status = be_mcc_queues_create(phba, phwi_context);
2797         if (status != 0)
2798                 goto error;
2799
2800         status = mgmt_check_supported_fw(ctrl, phba);
2801         if (status != 0) {
2802                 shost_printk(KERN_ERR, phba->shost,
2803                              "Unsupported fw version \n");
2804                 goto error;
2805         }
2806
2807         status = beiscsi_create_cqs(phba, phwi_context);
2808         if (status != 0) {
2809                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2810                 goto error;
2811         }
2812
2813         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2814                                         def_pdu_ring_sz);
2815         if (status != 0) {
2816                 shost_printk(KERN_ERR, phba->shost,
2817                              "Default Header not created\n");
2818                 goto error;
2819         }
2820
2821         status = beiscsi_create_def_data(phba, phwi_context,
2822                                          phwi_ctrlr, def_pdu_ring_sz);
2823         if (status != 0) {
2824                 shost_printk(KERN_ERR, phba->shost,
2825                              "Default Data not created\n");
2826                 goto error;
2827         }
2828
2829         status = beiscsi_post_pages(phba);
2830         if (status != 0) {
2831                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2832                 goto error;
2833         }
2834
2835         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2836         if (status != 0) {
2837                 shost_printk(KERN_ERR, phba->shost,
2838                              "WRB Rings not created\n");
2839                 goto error;
2840         }
2841
2842         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2843         return 0;
2844
2845 error:
2846         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2847         hwi_cleanup(phba);
2848         return -ENOMEM;
2849 }
2850
2851 static int hwi_init_controller(struct beiscsi_hba *phba)
2852 {
2853         struct hwi_controller *phwi_ctrlr;
2854
2855         phwi_ctrlr = phba->phwi_ctrlr;
2856         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2857                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2858                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2859                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2860                          phwi_ctrlr->phwi_ctxt);
2861         } else {
2862                 shost_printk(KERN_ERR, phba->shost,
2863                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2864                              "Failing to load\n");
2865                 return -ENOMEM;
2866         }
2867
2868         iscsi_init_global_templates(phba);
2869         beiscsi_init_wrb_handle(phba);
2870         hwi_init_async_pdu_ctx(phba);
2871         if (hwi_init_port(phba) != 0) {
2872                 shost_printk(KERN_ERR, phba->shost,
2873                              "hwi_init_controller failed\n");
2874                 return -ENOMEM;
2875         }
2876         return 0;
2877 }
2878
2879 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2880 {
2881         struct be_mem_descriptor *mem_descr;
2882         int i, j;
2883
2884         mem_descr = phba->init_mem;
2885         i = 0;
2886         j = 0;
2887         for (i = 0; i < SE_MEM_MAX; i++) {
2888                 for (j = mem_descr->num_elements; j > 0; j--) {
2889                         pci_free_consistent(phba->pcidev,
2890                           mem_descr->mem_array[j - 1].size,
2891                           mem_descr->mem_array[j - 1].virtual_address,
2892                           mem_descr->mem_array[j - 1].bus_address.
2893                                 u.a64.address);
2894                 }
2895                 kfree(mem_descr->mem_array);
2896                 mem_descr++;
2897         }
2898         kfree(phba->init_mem);
2899         kfree(phba->phwi_ctrlr);
2900 }
2901
2902 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2903 {
2904         int ret = -ENOMEM;
2905
2906         ret = beiscsi_get_memory(phba);
2907         if (ret < 0) {
2908                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2909                              "Failed in beiscsi_alloc_memory \n");
2910                 return ret;
2911         }
2912
2913         ret = hwi_init_controller(phba);
2914         if (ret)
2915                 goto free_init;
2916         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2917         return 0;
2918
2919 free_init:
2920         beiscsi_free_mem(phba);
2921         return -ENOMEM;
2922 }
2923
2924 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2925 {
2926         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2927         struct sgl_handle *psgl_handle;
2928         struct iscsi_sge *pfrag;
2929         unsigned int arr_index, i, idx;
2930
2931         phba->io_sgl_hndl_avbl = 0;
2932         phba->eh_sgl_hndl_avbl = 0;
2933
2934         mem_descr_sglh = phba->init_mem;
2935         mem_descr_sglh += HWI_MEM_SGLH;
2936         if (1 == mem_descr_sglh->num_elements) {
2937                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2938                                                  phba->params.ios_per_ctrl,
2939                                                  GFP_KERNEL);
2940                 if (!phba->io_sgl_hndl_base) {
2941                         shost_printk(KERN_ERR, phba->shost,
2942                                      "Mem Alloc Failed. Failing to load\n");
2943                         return -ENOMEM;
2944                 }
2945                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2946                                                  (phba->params.icds_per_ctrl -
2947                                                  phba->params.ios_per_ctrl),
2948                                                  GFP_KERNEL);
2949                 if (!phba->eh_sgl_hndl_base) {
2950                         kfree(phba->io_sgl_hndl_base);
2951                         shost_printk(KERN_ERR, phba->shost,
2952                                      "Mem Alloc Failed. Failing to load\n");
2953                         return -ENOMEM;
2954                 }
2955         } else {
2956                 shost_printk(KERN_ERR, phba->shost,
2957                              "HWI_MEM_SGLH is more than one element."
2958                              "Failing to load\n");
2959                 return -ENOMEM;
2960         }
2961
2962         arr_index = 0;
2963         idx = 0;
2964         while (idx < mem_descr_sglh->num_elements) {
2965                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2966
2967                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2968                       sizeof(struct sgl_handle)); i++) {
2969                         if (arr_index < phba->params.ios_per_ctrl) {
2970                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2971                                 phba->io_sgl_hndl_avbl++;
2972                                 arr_index++;
2973                         } else {
2974                                 phba->eh_sgl_hndl_base[arr_index -
2975                                         phba->params.ios_per_ctrl] =
2976                                                                 psgl_handle;
2977                                 arr_index++;
2978                                 phba->eh_sgl_hndl_avbl++;
2979                         }
2980                         psgl_handle++;
2981                 }
2982                 idx++;
2983         }
2984         SE_DEBUG(DBG_LVL_8,
2985                  "phba->io_sgl_hndl_avbl=%d"
2986                  "phba->eh_sgl_hndl_avbl=%d \n",
2987                  phba->io_sgl_hndl_avbl,
2988                  phba->eh_sgl_hndl_avbl);
2989         mem_descr_sg = phba->init_mem;
2990         mem_descr_sg += HWI_MEM_SGE;
2991         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
2992                  mem_descr_sg->num_elements);
2993         arr_index = 0;
2994         idx = 0;
2995         while (idx < mem_descr_sg->num_elements) {
2996                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
2997
2998                 for (i = 0;
2999                      i < (mem_descr_sg->mem_array[idx].size) /
3000                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3001                      i++) {
3002                         if (arr_index < phba->params.ios_per_ctrl)
3003                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3004                         else
3005                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3006                                                 phba->params.ios_per_ctrl];
3007                         psgl_handle->pfrag = pfrag;
3008                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3009                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3010                         pfrag += phba->params.num_sge_per_io;
3011                         psgl_handle->sgl_index =
3012                                 phba->fw_config.iscsi_icd_start + arr_index++;
3013                 }
3014                 idx++;
3015         }
3016         phba->io_sgl_free_index = 0;
3017         phba->io_sgl_alloc_index = 0;
3018         phba->eh_sgl_free_index = 0;
3019         phba->eh_sgl_alloc_index = 0;
3020         return 0;
3021 }
3022
3023 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3024 {
3025         int i, new_cid;
3026
3027         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3028                                   GFP_KERNEL);
3029         if (!phba->cid_array) {
3030                 shost_printk(KERN_ERR, phba->shost,
3031                              "Failed to allocate memory in "
3032                              "hba_setup_cid_tbls\n");
3033                 return -ENOMEM;
3034         }
3035         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3036                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3037         if (!phba->ep_array) {
3038                 shost_printk(KERN_ERR, phba->shost,
3039                              "Failed to allocate memory in "
3040                              "hba_setup_cid_tbls \n");
3041                 kfree(phba->cid_array);
3042                 return -ENOMEM;
3043         }
3044         new_cid = phba->fw_config.iscsi_cid_start;
3045         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3046                 phba->cid_array[i] = new_cid;
3047                 new_cid += 2;
3048         }
3049         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3050         return 0;
3051 }
3052
3053 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3054 {
3055         struct be_ctrl_info *ctrl = &phba->ctrl;
3056         struct hwi_controller *phwi_ctrlr;
3057         struct hwi_context_memory *phwi_context;
3058         struct be_queue_info *eq;
3059         u8 __iomem *addr;
3060         u32 reg, i;
3061         u32 enabled;
3062
3063         phwi_ctrlr = phba->phwi_ctrlr;
3064         phwi_context = phwi_ctrlr->phwi_ctxt;
3065
3066         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3067                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3068         reg = ioread32(addr);
3069         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3070
3071         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3072         if (!enabled) {
3073                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3074                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3075                 iowrite32(reg, addr);
3076                 for (i = 0; i <= phba->num_cpus; i++) {
3077                         eq = &phwi_context->be_eq[i].q;
3078                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3079                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3080                 }
3081         } else
3082                 shost_printk(KERN_WARNING, phba->shost,
3083                              "In hwi_enable_intr, Not Enabled \n");
3084         return true;
3085 }
3086
3087 static void hwi_disable_intr(struct beiscsi_hba *phba)
3088 {
3089         struct be_ctrl_info *ctrl = &phba->ctrl;
3090
3091         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3092         u32 reg = ioread32(addr);
3093
3094         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3095         if (enabled) {
3096                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3097                 iowrite32(reg, addr);
3098         } else
3099                 shost_printk(KERN_WARNING, phba->shost,
3100                              "In hwi_disable_intr, Already Disabled \n");
3101 }
3102
3103 static int beiscsi_init_port(struct beiscsi_hba *phba)
3104 {
3105         int ret;
3106
3107         ret = beiscsi_init_controller(phba);
3108         if (ret < 0) {
3109                 shost_printk(KERN_ERR, phba->shost,
3110                              "beiscsi_dev_probe - Failed in"
3111                              "beiscsi_init_controller \n");
3112                 return ret;
3113         }
3114         ret = beiscsi_init_sgl_handle(phba);
3115         if (ret < 0) {
3116                 shost_printk(KERN_ERR, phba->shost,
3117                              "beiscsi_dev_probe - Failed in"
3118                              "beiscsi_init_sgl_handle \n");
3119                 goto do_cleanup_ctrlr;
3120         }
3121
3122         if (hba_setup_cid_tbls(phba)) {
3123                 shost_printk(KERN_ERR, phba->shost,
3124                              "Failed in hba_setup_cid_tbls\n");
3125                 kfree(phba->io_sgl_hndl_base);
3126                 kfree(phba->eh_sgl_hndl_base);
3127                 goto do_cleanup_ctrlr;
3128         }
3129
3130         return ret;
3131
3132 do_cleanup_ctrlr:
3133         hwi_cleanup(phba);
3134         return ret;
3135 }
3136
3137 static void hwi_purge_eq(struct beiscsi_hba *phba)
3138 {
3139         struct hwi_controller *phwi_ctrlr;
3140         struct hwi_context_memory *phwi_context;
3141         struct be_queue_info *eq;
3142         struct be_eq_entry *eqe = NULL;
3143         int i, eq_msix;
3144         unsigned int num_processed;
3145
3146         phwi_ctrlr = phba->phwi_ctrlr;
3147         phwi_context = phwi_ctrlr->phwi_ctxt;
3148         if (phba->msix_enabled)
3149                 eq_msix = 1;
3150         else
3151                 eq_msix = 0;
3152
3153         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3154                 eq = &phwi_context->be_eq[i].q;
3155                 eqe = queue_tail_node(eq);
3156                 num_processed = 0;
3157                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3158                                         & EQE_VALID_MASK) {
3159                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3160                         queue_tail_inc(eq);
3161                         eqe = queue_tail_node(eq);
3162                         num_processed++;
3163                 }
3164
3165                 if (num_processed)
3166                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3167         }
3168 }
3169
3170 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3171 {
3172         unsigned char mgmt_status;
3173
3174         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3175         if (mgmt_status)
3176                 shost_printk(KERN_WARNING, phba->shost,
3177                              "mgmt_epfw_cleanup FAILED \n");
3178
3179         hwi_purge_eq(phba);
3180         hwi_cleanup(phba);
3181         kfree(phba->io_sgl_hndl_base);
3182         kfree(phba->eh_sgl_hndl_base);
3183         kfree(phba->cid_array);
3184         kfree(phba->ep_array);
3185 }
3186
3187 void
3188 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3189                            struct beiscsi_offload_params *params)
3190 {
3191         struct wrb_handle *pwrb_handle;
3192         struct iscsi_target_context_update_wrb *pwrb = NULL;
3193         struct be_mem_descriptor *mem_descr;
3194         struct beiscsi_hba *phba = beiscsi_conn->phba;
3195         u32 doorbell = 0;
3196
3197         /*
3198          * We can always use 0 here because it is reserved by libiscsi for
3199          * login/startup related tasks.
3200          */
3201         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3202                                        phba->fw_config.iscsi_cid_start));
3203         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3204         memset(pwrb, 0, sizeof(*pwrb));
3205         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3206                       max_burst_length, pwrb, params->dw[offsetof
3207                       (struct amap_beiscsi_offload_params,
3208                       max_burst_length) / 32]);
3209         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3210                       max_send_data_segment_length, pwrb,
3211                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3212                       max_send_data_segment_length) / 32]);
3213         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3214                       first_burst_length,
3215                       pwrb,
3216                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3217                       first_burst_length) / 32]);
3218
3219         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3220                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3221                       erl) / 32] & OFFLD_PARAMS_ERL));
3222         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3223                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3224                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3225         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3226                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3227                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3228         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3229                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3230                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3231         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3232                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3233                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3234         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3235                       pwrb,
3236                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3237                       exp_statsn) / 32] + 1));
3238         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3239                       0x7);
3240         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3241                       pwrb, pwrb_handle->wrb_index);
3242         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3243                       pwrb, pwrb_handle->nxt_wrb_index);
3244         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3245                         session_state, pwrb, 0);
3246         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3247                       pwrb, 1);
3248         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3249                       pwrb, 0);
3250         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3251                       0);
3252
3253         mem_descr = phba->init_mem;
3254         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3255
3256         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3257                         pad_buffer_addr_hi, pwrb,
3258                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3259         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3260                         pad_buffer_addr_lo, pwrb,
3261                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3262
3263         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3264
3265         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3266         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3267                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3268         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3269
3270         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3271 }
3272
3273 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3274                               int *index, int *age)
3275 {
3276         *index = (int)itt;
3277         if (age)
3278                 *age = conn->session->age;
3279 }
3280
3281 /**
3282  * beiscsi_alloc_pdu - allocates pdu and related resources
3283  * @task: libiscsi task
3284  * @opcode: opcode of pdu for task
3285  *
3286  * This is called with the session lock held. It will allocate
3287  * the wrb and sgl if needed for the command. And it will prep
3288  * the pdu's itt. beiscsi_parse_pdu will later translate
3289  * the pdu itt to the libiscsi task itt.
3290  */
3291 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3292 {
3293         struct beiscsi_io_task *io_task = task->dd_data;
3294         struct iscsi_conn *conn = task->conn;
3295         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3296         struct beiscsi_hba *phba = beiscsi_conn->phba;
3297         struct hwi_wrb_context *pwrb_context;
3298         struct hwi_controller *phwi_ctrlr;
3299         itt_t itt;
3300         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3301         dma_addr_t paddr;
3302
3303         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3304                                           GFP_KERNEL, &paddr);
3305         if (!io_task->cmd_bhs)
3306                 return -ENOMEM;
3307         io_task->bhs_pa.u.a64.address = paddr;
3308         io_task->libiscsi_itt = (itt_t)task->itt;
3309         io_task->pwrb_handle = alloc_wrb_handle(phba,
3310                                                 beiscsi_conn->beiscsi_conn_cid -
3311                                                 phba->fw_config.iscsi_cid_start
3312                                                 );
3313         io_task->conn = beiscsi_conn;
3314
3315         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3316         task->hdr_max = sizeof(struct be_cmd_bhs);
3317
3318         if (task->sc) {
3319                 spin_lock(&phba->io_sgl_lock);
3320                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3321                 spin_unlock(&phba->io_sgl_lock);
3322                 if (!io_task->psgl_handle)
3323                         goto free_hndls;
3324         } else {
3325                 io_task->scsi_cmnd = NULL;
3326                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3327                         if (!beiscsi_conn->login_in_progress) {
3328                                 spin_lock(&phba->mgmt_sgl_lock);
3329                                 io_task->psgl_handle = (struct sgl_handle *)
3330                                                 alloc_mgmt_sgl_handle(phba);
3331                                 spin_unlock(&phba->mgmt_sgl_lock);
3332                                 if (!io_task->psgl_handle)
3333                                         goto free_hndls;
3334
3335                                 beiscsi_conn->login_in_progress = 1;
3336                                 beiscsi_conn->plogin_sgl_handle =
3337                                                         io_task->psgl_handle;
3338                         } else {
3339                                 io_task->psgl_handle =
3340                                                 beiscsi_conn->plogin_sgl_handle;
3341                         }
3342                 } else {
3343                         spin_lock(&phba->mgmt_sgl_lock);
3344                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3345                         spin_unlock(&phba->mgmt_sgl_lock);
3346                         if (!io_task->psgl_handle)
3347                                 goto free_hndls;
3348                 }
3349         }
3350         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3351                                  wrb_index << 16) | (unsigned int)
3352                                 (io_task->psgl_handle->sgl_index));
3353         io_task->pwrb_handle->pio_handle = task;
3354
3355         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3356         return 0;
3357
3358 free_hndls:
3359         phwi_ctrlr = phba->phwi_ctrlr;
3360         pwrb_context = &phwi_ctrlr->wrb_context[
3361                         beiscsi_conn->beiscsi_conn_cid -
3362                         phba->fw_config.iscsi_cid_start];
3363         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3364         io_task->pwrb_handle = NULL;
3365         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3366                       io_task->bhs_pa.u.a64.address);
3367         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3368         return -ENOMEM;
3369 }
3370
3371 static void beiscsi_cleanup_task(struct iscsi_task *task)
3372 {
3373         struct beiscsi_io_task *io_task = task->dd_data;
3374         struct iscsi_conn *conn = task->conn;
3375         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3376         struct beiscsi_hba *phba = beiscsi_conn->phba;
3377         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3378         struct hwi_wrb_context *pwrb_context;
3379         struct hwi_controller *phwi_ctrlr;
3380
3381         phwi_ctrlr = phba->phwi_ctrlr;
3382         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3383                         - phba->fw_config.iscsi_cid_start];
3384         if (io_task->pwrb_handle) {
3385                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3386                 io_task->pwrb_handle = NULL;
3387         }
3388
3389         if (io_task->cmd_bhs) {
3390                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3391                               io_task->bhs_pa.u.a64.address);
3392         }
3393
3394         if (task->sc) {
3395                 if (io_task->psgl_handle) {
3396                         spin_lock(&phba->io_sgl_lock);
3397                         free_io_sgl_handle(phba, io_task->psgl_handle);
3398                         spin_unlock(&phba->io_sgl_lock);
3399                         io_task->psgl_handle = NULL;
3400                 }
3401         } else {
3402                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3403                         return;
3404                 if (io_task->psgl_handle) {
3405                         spin_lock(&phba->mgmt_sgl_lock);
3406                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3407                         spin_unlock(&phba->mgmt_sgl_lock);
3408                         io_task->psgl_handle = NULL;
3409                 }
3410         }
3411 }
3412
3413 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3414                           unsigned int num_sg, unsigned int xferlen,
3415                           unsigned int writedir)
3416 {
3417
3418         struct beiscsi_io_task *io_task = task->dd_data;
3419         struct iscsi_conn *conn = task->conn;
3420         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3421         struct beiscsi_hba *phba = beiscsi_conn->phba;
3422         struct iscsi_wrb *pwrb = NULL;
3423         unsigned int doorbell = 0;
3424
3425         pwrb = io_task->pwrb_handle->pwrb;
3426         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3427         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3428
3429         if (writedir) {
3430                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3431                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3432                               &io_task->cmd_bhs->iscsi_data_pdu,
3433                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3434                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3435                               &io_task->cmd_bhs->iscsi_data_pdu,
3436                               ISCSI_OPCODE_SCSI_DATA_OUT);
3437                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3438                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3439                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3440                               INI_WR_CMD);
3441                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3442         } else {
3443                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3444                               INI_RD_CMD);
3445                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3446         }
3447         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3448                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3449                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3450
3451         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3452                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3453                                   lun[0]));
3454         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3455         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3456                       io_task->pwrb_handle->wrb_index);
3457         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3458                       be32_to_cpu(task->cmdsn));
3459         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3460                       io_task->psgl_handle->sgl_index);
3461
3462         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3463
3464         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3465                       io_task->pwrb_handle->nxt_wrb_index);
3466         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3467
3468         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3469         doorbell |= (io_task->pwrb_handle->wrb_index &
3470                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3471         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3472
3473         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3474         return 0;
3475 }
3476
3477 static int beiscsi_mtask(struct iscsi_task *task)
3478 {
3479         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3480         struct iscsi_conn *conn = task->conn;
3481         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3482         struct beiscsi_hba *phba = beiscsi_conn->phba;
3483         struct iscsi_session *session;
3484         struct iscsi_wrb *pwrb = NULL;
3485         struct hwi_controller *phwi_ctrlr;
3486         struct hwi_wrb_context *pwrb_context;
3487         struct wrb_handle *pwrb_handle;
3488         unsigned int doorbell = 0;
3489         unsigned int i, cid;
3490         struct iscsi_task *aborted_task;
3491         unsigned int tag;
3492
3493         cid = beiscsi_conn->beiscsi_conn_cid;
3494         pwrb = io_task->pwrb_handle->pwrb;
3495         memset(pwrb, 0, sizeof(*pwrb));
3496         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3497                       be32_to_cpu(task->cmdsn));
3498         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3499                       io_task->pwrb_handle->wrb_index);
3500         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3501                       io_task->psgl_handle->sgl_index);
3502         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3503         case ISCSI_OP_LOGIN:
3504                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3505                               TGT_DM_CMD);
3506                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3507                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3508                 hwi_write_buffer(pwrb, task);
3509                 break;
3510         case ISCSI_OP_NOOP_OUT:
3511                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3512                               INI_RD_CMD);
3513                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3514                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3515                 else
3516                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3517                 hwi_write_buffer(pwrb, task);
3518                 break;
3519         case ISCSI_OP_TEXT:
3520                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3521                               TGT_DM_CMD);
3522                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3523                 hwi_write_buffer(pwrb, task);
3524                 break;
3525         case ISCSI_OP_SCSI_TMFUNC:
3526                 session = conn->session;
3527                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3528                 phwi_ctrlr = phba->phwi_ctrlr;
3529                 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3530                                             phba->fw_config.iscsi_cid_start];
3531                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3532                                                                 >> 16];
3533                 aborted_task = pwrb_handle->pio_handle;
3534                  if (!aborted_task)
3535                         return 0;
3536
3537                 aborted_io_task = aborted_task->dd_data;
3538                 if (!aborted_io_task->scsi_cmnd)
3539                         return 0;
3540
3541                 tag = mgmt_invalidate_icds(phba,
3542                                      aborted_io_task->psgl_handle->sgl_index,
3543                                      cid);
3544                 if (!tag) {
3545                         shost_printk(KERN_WARNING, phba->shost,
3546                                      "mgmt_invalidate_icds could not be"
3547                                      " submitted\n");
3548                 } else {
3549                         wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3550                                                  phba->ctrl.mcc_numtag[tag]);
3551                         free_mcc_tag(&phba->ctrl, tag);
3552                 }
3553                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3554                               INI_TMF_CMD);
3555                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3556                 hwi_write_buffer(pwrb, task);
3557                 break;
3558         case ISCSI_OP_LOGOUT:
3559                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3560                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3561                                 HWH_TYPE_LOGOUT);
3562                 hwi_write_buffer(pwrb, task);
3563                 break;
3564
3565         default:
3566                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3567                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3568                 return -EINVAL;
3569         }
3570
3571         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3572                       task->data_count);
3573         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3574                       io_task->pwrb_handle->nxt_wrb_index);
3575         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3576
3577         doorbell |= cid & DB_WRB_POST_CID_MASK;
3578         doorbell |= (io_task->pwrb_handle->wrb_index &
3579                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3580         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3581         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3582         return 0;
3583 }
3584
3585 static int beiscsi_task_xmit(struct iscsi_task *task)
3586 {
3587         struct iscsi_conn *conn = task->conn;
3588         struct beiscsi_io_task *io_task = task->dd_data;
3589         struct scsi_cmnd *sc = task->sc;
3590         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3591         struct scatterlist *sg;
3592         int num_sg;
3593         unsigned int  writedir = 0, xferlen = 0;
3594
3595         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3596                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3597                  task, conn, beiscsi_conn);
3598         if (!sc)
3599                 return beiscsi_mtask(task);
3600
3601         io_task->scsi_cmnd = sc;
3602         num_sg = scsi_dma_map(sc);
3603         if (num_sg < 0) {
3604                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3605                 return num_sg;
3606         }
3607         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3608                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3609         xferlen = scsi_bufflen(sc);
3610         sg = scsi_sglist(sc);
3611         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3612                 writedir = 1;
3613                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3614                          task->imm_count);
3615         } else
3616                 writedir = 0;
3617         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3618 }
3619
3620 static void beiscsi_remove(struct pci_dev *pcidev)
3621 {
3622         struct beiscsi_hba *phba = NULL;
3623         struct hwi_controller *phwi_ctrlr;
3624         struct hwi_context_memory *phwi_context;
3625         struct be_eq_obj *pbe_eq;
3626         unsigned int i, msix_vec;
3627
3628         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3629         if (!phba) {
3630                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3631                 return;
3632         }
3633
3634         phwi_ctrlr = phba->phwi_ctrlr;
3635         phwi_context = phwi_ctrlr->phwi_ctxt;
3636         hwi_disable_intr(phba);
3637         if (phba->msix_enabled) {
3638                 for (i = 0; i <= phba->num_cpus; i++) {
3639                         msix_vec = phba->msix_entries[i].vector;
3640                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3641                 }
3642         } else
3643                 if (phba->pcidev->irq)
3644                         free_irq(phba->pcidev->irq, phba);
3645         pci_disable_msix(phba->pcidev);
3646         destroy_workqueue(phba->wq);
3647         if (blk_iopoll_enabled)
3648                 for (i = 0; i < phba->num_cpus; i++) {
3649                         pbe_eq = &phwi_context->be_eq[i];
3650                         blk_iopoll_disable(&pbe_eq->iopoll);
3651                 }
3652
3653         beiscsi_clean_port(phba);
3654         beiscsi_free_mem(phba);
3655         beiscsi_unmap_pci_function(phba);
3656         pci_free_consistent(phba->pcidev,
3657                             phba->ctrl.mbox_mem_alloced.size,
3658                             phba->ctrl.mbox_mem_alloced.va,
3659                             phba->ctrl.mbox_mem_alloced.dma);
3660         iscsi_host_remove(phba->shost);
3661         pci_dev_put(phba->pcidev);
3662         iscsi_host_free(phba->shost);
3663 }
3664
3665 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3666 {
3667         int i, status;
3668
3669         for (i = 0; i <= phba->num_cpus; i++)
3670                 phba->msix_entries[i].entry = i;
3671
3672         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3673                                  (phba->num_cpus + 1));
3674         if (!status)
3675                 phba->msix_enabled = true;
3676
3677         return;
3678 }
3679
3680 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3681                                 const struct pci_device_id *id)
3682 {
3683         struct beiscsi_hba *phba = NULL;
3684         struct hwi_controller *phwi_ctrlr;
3685         struct hwi_context_memory *phwi_context;
3686         struct be_eq_obj *pbe_eq;
3687         int ret, msix_vec, num_cpus, i;
3688
3689         ret = beiscsi_enable_pci(pcidev);
3690         if (ret < 0) {
3691                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3692                              "Failed to enable pci device \n");
3693                 return ret;
3694         }
3695
3696         phba = beiscsi_hba_alloc(pcidev);
3697         if (!phba) {
3698                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3699                         " Failed in beiscsi_hba_alloc \n");
3700                 goto disable_pci;
3701         }
3702         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3703
3704         switch (pcidev->device) {
3705         case BE_DEVICE_ID1:
3706         case OC_DEVICE_ID1:
3707         case OC_DEVICE_ID2:
3708                 phba->generation = BE_GEN2;
3709                 break;
3710         case BE_DEVICE_ID2:
3711         case OC_DEVICE_ID3:
3712                 phba->generation = BE_GEN3;
3713                 break;
3714         default:
3715                 phba->generation = 0;
3716         }
3717
3718         if (enable_msix)
3719                 num_cpus = find_num_cpus();
3720         else
3721                 num_cpus = 1;
3722         phba->num_cpus = num_cpus;
3723         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3724
3725         if (enable_msix)
3726                 beiscsi_msix_enable(phba);
3727         ret = be_ctrl_init(phba, pcidev);
3728         if (ret) {
3729                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3730                                 "Failed in be_ctrl_init\n");
3731                 goto hba_free;
3732         }
3733
3734         spin_lock_init(&phba->io_sgl_lock);
3735         spin_lock_init(&phba->mgmt_sgl_lock);
3736         spin_lock_init(&phba->isr_lock);
3737         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3738         if (ret != 0) {
3739                 shost_printk(KERN_ERR, phba->shost,
3740                              "Error getting fw config\n");
3741                 goto free_port;
3742         }
3743         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3744         beiscsi_get_params(phba);
3745         phba->shost->can_queue = phba->params.ios_per_ctrl;
3746         ret = beiscsi_init_port(phba);
3747         if (ret < 0) {
3748                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3749                              "Failed in beiscsi_init_port\n");
3750                 goto free_port;
3751         }
3752
3753         for (i = 0; i < MAX_MCC_CMD ; i++) {
3754                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3755                 phba->ctrl.mcc_tag[i] = i + 1;
3756                 phba->ctrl.mcc_numtag[i + 1] = 0;
3757                 phba->ctrl.mcc_tag_available++;
3758         }
3759
3760         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3761
3762         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3763                  phba->shost->host_no);
3764         phba->wq = create_workqueue(phba->wq_name);
3765         if (!phba->wq) {
3766                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3767                                 "Failed to allocate work queue\n");
3768                 goto free_twq;
3769         }
3770
3771         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3772
3773         phwi_ctrlr = phba->phwi_ctrlr;
3774         phwi_context = phwi_ctrlr->phwi_ctxt;
3775         if (blk_iopoll_enabled) {
3776                 for (i = 0; i < phba->num_cpus; i++) {
3777                         pbe_eq = &phwi_context->be_eq[i];
3778                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3779                                         be_iopoll);
3780                         blk_iopoll_enable(&pbe_eq->iopoll);
3781                 }
3782         }
3783         ret = beiscsi_init_irqs(phba);
3784         if (ret < 0) {
3785                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3786                              "Failed to beiscsi_init_irqs\n");
3787                 goto free_blkenbld;
3788         }
3789         ret = hwi_enable_intr(phba);
3790         if (ret < 0) {
3791                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3792                              "Failed to hwi_enable_intr\n");
3793                 goto free_ctrlr;
3794         }
3795         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3796         return 0;
3797
3798 free_ctrlr:
3799         if (phba->msix_enabled) {
3800                 for (i = 0; i <= phba->num_cpus; i++) {
3801                         msix_vec = phba->msix_entries[i].vector;
3802                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3803                 }
3804         } else
3805                 if (phba->pcidev->irq)
3806                         free_irq(phba->pcidev->irq, phba);
3807         pci_disable_msix(phba->pcidev);
3808 free_blkenbld:
3809         destroy_workqueue(phba->wq);
3810         if (blk_iopoll_enabled)
3811                 for (i = 0; i < phba->num_cpus; i++) {
3812                         pbe_eq = &phwi_context->be_eq[i];
3813                         blk_iopoll_disable(&pbe_eq->iopoll);
3814                 }
3815 free_twq:
3816         beiscsi_clean_port(phba);
3817         beiscsi_free_mem(phba);
3818 free_port:
3819         pci_free_consistent(phba->pcidev,
3820                             phba->ctrl.mbox_mem_alloced.size,
3821                             phba->ctrl.mbox_mem_alloced.va,
3822                            phba->ctrl.mbox_mem_alloced.dma);
3823         beiscsi_unmap_pci_function(phba);
3824 hba_free:
3825         iscsi_host_remove(phba->shost);
3826         pci_dev_put(phba->pcidev);
3827         iscsi_host_free(phba->shost);
3828 disable_pci:
3829         pci_disable_device(pcidev);
3830         return ret;
3831 }
3832
3833 struct iscsi_transport beiscsi_iscsi_transport = {
3834         .owner = THIS_MODULE,
3835         .name = DRV_NAME,
3836         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3837                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3838         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3839                 ISCSI_MAX_XMIT_DLENGTH |
3840                 ISCSI_HDRDGST_EN |
3841                 ISCSI_DATADGST_EN |
3842                 ISCSI_INITIAL_R2T_EN |
3843                 ISCSI_MAX_R2T |
3844                 ISCSI_IMM_DATA_EN |
3845                 ISCSI_FIRST_BURST |
3846                 ISCSI_MAX_BURST |
3847                 ISCSI_PDU_INORDER_EN |
3848                 ISCSI_DATASEQ_INORDER_EN |
3849                 ISCSI_ERL |
3850                 ISCSI_CONN_PORT |
3851                 ISCSI_CONN_ADDRESS |
3852                 ISCSI_EXP_STATSN |
3853                 ISCSI_PERSISTENT_PORT |
3854                 ISCSI_PERSISTENT_ADDRESS |
3855                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3856                 ISCSI_USERNAME | ISCSI_PASSWORD |
3857                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3858                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3859                 ISCSI_LU_RESET_TMO |
3860                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3861                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3862         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3863                                 ISCSI_HOST_INITIATOR_NAME,
3864         .create_session = beiscsi_session_create,
3865         .destroy_session = beiscsi_session_destroy,
3866         .create_conn = beiscsi_conn_create,
3867         .bind_conn = beiscsi_conn_bind,
3868         .destroy_conn = iscsi_conn_teardown,
3869         .set_param = beiscsi_set_param,
3870         .get_conn_param = beiscsi_conn_get_param,
3871         .get_session_param = iscsi_session_get_param,
3872         .get_host_param = beiscsi_get_host_param,
3873         .start_conn = beiscsi_conn_start,
3874         .stop_conn = beiscsi_conn_stop,
3875         .send_pdu = iscsi_conn_send_pdu,
3876         .xmit_task = beiscsi_task_xmit,
3877         .cleanup_task = beiscsi_cleanup_task,
3878         .alloc_pdu = beiscsi_alloc_pdu,
3879         .parse_pdu_itt = beiscsi_parse_pdu,
3880         .get_stats = beiscsi_conn_get_stats,
3881         .ep_connect = beiscsi_ep_connect,
3882         .ep_poll = beiscsi_ep_poll,
3883         .ep_disconnect = beiscsi_ep_disconnect,
3884         .session_recovery_timedout = iscsi_session_recovery_timedout,
3885 };
3886
3887 static struct pci_driver beiscsi_pci_driver = {
3888         .name = DRV_NAME,
3889         .probe = beiscsi_dev_probe,
3890         .remove = beiscsi_remove,
3891         .id_table = beiscsi_pci_id_table
3892 };
3893
3894
3895 static int __init beiscsi_module_init(void)
3896 {
3897         int ret;
3898
3899         beiscsi_scsi_transport =
3900                         iscsi_register_transport(&beiscsi_iscsi_transport);
3901         if (!beiscsi_scsi_transport) {
3902                 SE_DEBUG(DBG_LVL_1,
3903                          "beiscsi_module_init - Unable to  register beiscsi"
3904                          "transport.\n");
3905                 return -ENOMEM;
3906         }
3907         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3908                  &beiscsi_iscsi_transport);
3909
3910         ret = pci_register_driver(&beiscsi_pci_driver);
3911         if (ret) {
3912                 SE_DEBUG(DBG_LVL_1,
3913                          "beiscsi_module_init - Unable to  register"
3914                          "beiscsi pci driver.\n");
3915                 goto unregister_iscsi_transport;
3916         }
3917         return 0;
3918
3919 unregister_iscsi_transport:
3920         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3921         return ret;
3922 }
3923
3924 static void __exit beiscsi_module_exit(void)
3925 {
3926         pci_unregister_driver(&beiscsi_pci_driver);
3927         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3928 }
3929
3930 module_init(beiscsi_module_init);
3931 module_exit(beiscsi_module_exit);