1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
4 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
22 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
24 struct fcoe_fcp_rsp_payload *fcp_rsp,
27 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
28 unsigned int timer_msec)
30 struct bnx2fc_interface *interface = io_req->port->priv;
32 if (queue_delayed_work(interface->timer_work_queue,
33 &io_req->timeout_work,
34 msecs_to_jiffies(timer_msec)))
35 kref_get(&io_req->refcount);
38 static void bnx2fc_cmd_timeout(struct work_struct *work)
40 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
42 struct fc_lport *lport;
43 struct fc_rport_priv *rdata;
44 u8 cmd_type = io_req->cmd_type;
45 struct bnx2fc_rport *tgt = io_req->tgt;
49 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
50 "req_flags = %lx\n", cmd_type, io_req->req_flags);
52 spin_lock_bh(&tgt->tgt_lock);
53 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
54 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
56 * ideally we should hold the io_req until RRQ complets,
57 * and release io_req from timeout hold.
59 spin_unlock_bh(&tgt->tgt_lock);
60 bnx2fc_send_rrq(io_req);
63 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
64 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
70 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
71 &io_req->req_flags)) {
72 /* Handle eh_abort timeout */
73 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
74 complete(&io_req->tm_done);
75 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
76 &io_req->req_flags)) {
77 /* Handle internally generated ABTS timeout */
78 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
79 io_req->refcount.refcount.counter);
80 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
81 &io_req->req_flags))) {
83 lport = io_req->port->lport;
84 rdata = io_req->tgt->rdata;
85 logo_issued = test_and_set_bit(
86 BNX2FC_FLAG_EXPL_LOGO,
88 kref_put(&io_req->refcount, bnx2fc_cmd_release);
89 spin_unlock_bh(&tgt->tgt_lock);
91 /* Explicitly logo the target */
93 BNX2FC_IO_DBG(io_req, "Explicit "
94 "logo - tgt flags = 0x%lx\n",
97 mutex_lock(&lport->disc.disc_mutex);
98 lport->tt.rport_logoff(rdata);
99 mutex_unlock(&lport->disc.disc_mutex);
104 /* Hanlde IO timeout */
105 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
106 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
107 &io_req->req_flags)) {
108 BNX2FC_IO_DBG(io_req, "IO completed before "
113 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
114 &io_req->req_flags)) {
115 rc = bnx2fc_initiate_abts(io_req);
119 * Explicitly logo the target if
120 * abts initiation fails
122 lport = io_req->port->lport;
123 rdata = io_req->tgt->rdata;
124 logo_issued = test_and_set_bit(
125 BNX2FC_FLAG_EXPL_LOGO,
127 kref_put(&io_req->refcount, bnx2fc_cmd_release);
128 spin_unlock_bh(&tgt->tgt_lock);
131 BNX2FC_IO_DBG(io_req, "Explicit "
132 "logo - tgt flags = 0x%lx\n",
136 mutex_lock(&lport->disc.disc_mutex);
137 lport->tt.rport_logoff(rdata);
138 mutex_unlock(&lport->disc.disc_mutex);
142 BNX2FC_IO_DBG(io_req, "IO already in "
143 "ABTS processing\n");
149 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
150 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
152 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
153 &io_req->req_flags)) {
154 lport = io_req->port->lport;
155 rdata = io_req->tgt->rdata;
156 logo_issued = test_and_set_bit(
157 BNX2FC_FLAG_EXPL_LOGO,
159 kref_put(&io_req->refcount, bnx2fc_cmd_release);
160 spin_unlock_bh(&tgt->tgt_lock);
162 /* Explicitly logo the target */
164 BNX2FC_IO_DBG(io_req, "Explicitly logo"
166 mutex_lock(&lport->disc.disc_mutex);
167 lport->tt.rport_logoff(rdata);
168 mutex_unlock(&lport->disc.disc_mutex);
174 * Handle ELS timeout.
175 * tgt_lock is used to sync compl path and timeout
176 * path. If els compl path is processing this IO, we
177 * have nothing to do here, just release the timer hold
179 BNX2FC_IO_DBG(io_req, "ELS timed out\n");
180 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
184 /* Indicate the cb_func that this ELS is timed out */
185 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
187 if ((io_req->cb_func) && (io_req->cb_arg)) {
188 io_req->cb_func(io_req->cb_arg);
189 io_req->cb_arg = NULL;
194 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
200 /* release the cmd that was held when timer was set */
201 kref_put(&io_req->refcount, bnx2fc_cmd_release);
202 spin_unlock_bh(&tgt->tgt_lock);
205 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
207 /* Called with host lock held */
208 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
211 * active_cmd_queue may have other command types as well,
212 * and during flush operation, we want to error back only
215 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220 /* Do not call scsi done for this IO */
224 bnx2fc_unmap_sg_list(io_req);
225 io_req->sc_cmd = NULL;
227 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
228 "IO(0x%x) already cleaned up\n",
232 sc_cmd->result = err_code << 16;
234 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
235 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
237 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
238 sc_cmd->SCp.ptr = NULL;
239 sc_cmd->scsi_done(sc_cmd);
242 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
243 u16 min_xid, u16 max_xid)
245 struct bnx2fc_cmd_mgr *cmgr;
246 struct io_bdt *bdt_info;
247 struct bnx2fc_cmd *io_req;
252 int num_ios, num_pri_ios;
254 int arr_sz = num_possible_cpus() + 1;
256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
258 and max_xid 0x%x\n", min_xid, max_xid);
261 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
263 num_ios = max_xid - min_xid + 1;
264 len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
265 len += sizeof(struct bnx2fc_cmd_mgr);
267 cmgr = kzalloc(len, GFP_KERNEL);
269 printk(KERN_ERR PFX "failed to alloc cmgr\n");
273 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
275 if (!cmgr->free_list) {
276 printk(KERN_ERR PFX "failed to alloc free_list\n");
280 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
282 if (!cmgr->free_list_lock) {
283 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
288 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
290 for (i = 0; i < arr_sz; i++) {
291 INIT_LIST_HEAD(&cmgr->free_list[i]);
292 spin_lock_init(&cmgr->free_list_lock[i]);
296 * Pre-allocated pool of bnx2fc_cmds.
297 * Last entry in the free list array is the free list
298 * of slow path requests.
300 xid = BNX2FC_MIN_XID;
301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
302 for (i = 0; i < num_ios; i++) {
303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
306 printk(KERN_ERR PFX "failed to alloc io_req\n");
310 INIT_LIST_HEAD(&io_req->link);
311 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
315 list_add_tail(&io_req->link,
316 &cmgr->free_list[io_req->xid %
317 num_possible_cpus()]);
319 list_add_tail(&io_req->link,
320 &cmgr->free_list[num_possible_cpus()]);
324 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
325 mem_size = num_ios * sizeof(struct io_bdt *);
326 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
327 if (!cmgr->io_bdt_pool) {
328 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
332 mem_size = sizeof(struct io_bdt);
333 for (i = 0; i < num_ios; i++) {
334 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
335 if (!cmgr->io_bdt_pool[i]) {
336 printk(KERN_ERR PFX "failed to alloc "
337 "io_bdt_pool[%d]\n", i);
342 /* Allocate an map fcoe_bdt_ctx structures */
343 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
344 for (i = 0; i < num_ios; i++) {
345 bdt_info = cmgr->io_bdt_pool[i];
346 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
348 &bdt_info->bd_tbl_dma,
350 if (!bdt_info->bd_tbl) {
351 printk(KERN_ERR PFX "failed to alloc "
360 bnx2fc_cmd_mgr_free(cmgr);
364 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
366 struct io_bdt *bdt_info;
367 struct bnx2fc_hba *hba = cmgr->hba;
369 u16 min_xid = BNX2FC_MIN_XID;
370 u16 max_xid = BNX2FC_MAX_XID;
374 num_ios = max_xid - min_xid + 1;
376 /* Free fcoe_bdt_ctx structures */
377 if (!cmgr->io_bdt_pool)
380 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
381 for (i = 0; i < num_ios; i++) {
382 bdt_info = cmgr->io_bdt_pool[i];
383 if (bdt_info->bd_tbl) {
384 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
386 bdt_info->bd_tbl_dma);
387 bdt_info->bd_tbl = NULL;
391 /* Destroy io_bdt pool */
392 for (i = 0; i < num_ios; i++) {
393 kfree(cmgr->io_bdt_pool[i]);
394 cmgr->io_bdt_pool[i] = NULL;
397 kfree(cmgr->io_bdt_pool);
398 cmgr->io_bdt_pool = NULL;
401 kfree(cmgr->free_list_lock);
403 /* Destroy cmd pool */
404 if (!cmgr->free_list)
407 for (i = 0; i < num_possible_cpus() + 1; i++) {
408 struct bnx2fc_cmd *tmp, *io_req;
410 list_for_each_entry_safe(io_req, tmp,
411 &cmgr->free_list[i], link) {
412 list_del(&io_req->link);
416 kfree(cmgr->free_list);
418 /* Free command manager itself */
422 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
424 struct fcoe_port *port = tgt->port;
425 struct bnx2fc_interface *interface = port->priv;
426 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
427 struct bnx2fc_cmd *io_req;
428 struct list_head *listp;
429 struct io_bdt *bd_tbl;
430 int index = RESERVE_FREE_LIST_INDEX;
435 max_sqes = tgt->max_sqes;
437 case BNX2FC_TASK_MGMT_CMD:
438 max_sqes = BNX2FC_TM_MAX_SQES;
441 max_sqes = BNX2FC_ELS_MAX_SQES;
448 * NOTE: Free list insertions and deletions are protected with
451 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
452 free_sqes = atomic_read(&tgt->free_sqes);
453 if ((list_empty(&(cmd_mgr->free_list[index]))) ||
454 (tgt->num_active_ios.counter >= max_sqes) ||
455 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
456 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
457 "ios(%d):sqes(%d)\n",
458 tgt->num_active_ios.counter, tgt->max_sqes);
459 if (list_empty(&(cmd_mgr->free_list[index])))
460 printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
461 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
465 listp = (struct list_head *)
466 cmd_mgr->free_list[index].next;
467 list_del_init(listp);
468 io_req = (struct bnx2fc_cmd *) listp;
470 cmd_mgr->cmds[xid] = io_req;
471 atomic_inc(&tgt->num_active_ios);
472 atomic_dec(&tgt->free_sqes);
473 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
475 INIT_LIST_HEAD(&io_req->link);
478 io_req->cmd_mgr = cmd_mgr;
479 io_req->req_flags = 0;
480 io_req->cmd_type = type;
482 /* Bind io_bdt for this io_req */
483 /* Have a static link between io_req and io_bdt_pool */
484 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
485 bd_tbl->io_req = io_req;
487 /* Hold the io_req against deletion */
488 kref_init(&io_req->refcount);
492 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
494 struct fcoe_port *port = tgt->port;
495 struct bnx2fc_interface *interface = port->priv;
496 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
497 struct bnx2fc_cmd *io_req;
498 struct list_head *listp;
499 struct io_bdt *bd_tbl;
503 int index = get_cpu();
505 max_sqes = BNX2FC_SCSI_MAX_SQES;
507 * NOTE: Free list insertions and deletions are protected with
510 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
511 free_sqes = atomic_read(&tgt->free_sqes);
512 if ((list_empty(&cmd_mgr->free_list[index])) ||
513 (tgt->num_active_ios.counter >= max_sqes) ||
514 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
515 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
520 listp = (struct list_head *)
521 cmd_mgr->free_list[index].next;
522 list_del_init(listp);
523 io_req = (struct bnx2fc_cmd *) listp;
525 cmd_mgr->cmds[xid] = io_req;
526 atomic_inc(&tgt->num_active_ios);
527 atomic_dec(&tgt->free_sqes);
528 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
531 INIT_LIST_HEAD(&io_req->link);
534 io_req->cmd_mgr = cmd_mgr;
535 io_req->req_flags = 0;
537 /* Bind io_bdt for this io_req */
538 /* Have a static link between io_req and io_bdt_pool */
539 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
540 bd_tbl->io_req = io_req;
542 /* Hold the io_req against deletion */
543 kref_init(&io_req->refcount);
547 void bnx2fc_cmd_release(struct kref *ref)
549 struct bnx2fc_cmd *io_req = container_of(ref,
550 struct bnx2fc_cmd, refcount);
551 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
554 if (io_req->cmd_type == BNX2FC_SCSI_CMD)
555 index = io_req->xid % num_possible_cpus();
557 index = RESERVE_FREE_LIST_INDEX;
560 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
561 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
562 bnx2fc_free_mp_resc(io_req);
563 cmd_mgr->cmds[io_req->xid] = NULL;
564 /* Delete IO from retire queue */
565 list_del_init(&io_req->link);
566 /* Add it to the free list */
567 list_add(&io_req->link,
568 &cmd_mgr->free_list[index]);
569 atomic_dec(&io_req->tgt->num_active_ios);
570 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
574 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
576 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
577 struct bnx2fc_interface *interface = io_req->port->priv;
578 struct bnx2fc_hba *hba = interface->hba;
579 size_t sz = sizeof(struct fcoe_bd_ctx);
582 mp_req->tm_flags = 0;
583 if (mp_req->mp_req_bd) {
584 dma_free_coherent(&hba->pcidev->dev, sz,
586 mp_req->mp_req_bd_dma);
587 mp_req->mp_req_bd = NULL;
589 if (mp_req->mp_resp_bd) {
590 dma_free_coherent(&hba->pcidev->dev, sz,
592 mp_req->mp_resp_bd_dma);
593 mp_req->mp_resp_bd = NULL;
595 if (mp_req->req_buf) {
596 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
598 mp_req->req_buf_dma);
599 mp_req->req_buf = NULL;
601 if (mp_req->resp_buf) {
602 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
604 mp_req->resp_buf_dma);
605 mp_req->resp_buf = NULL;
609 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
611 struct bnx2fc_mp_req *mp_req;
612 struct fcoe_bd_ctx *mp_req_bd;
613 struct fcoe_bd_ctx *mp_resp_bd;
614 struct bnx2fc_interface *interface = io_req->port->priv;
615 struct bnx2fc_hba *hba = interface->hba;
619 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
620 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
622 mp_req->req_len = sizeof(struct fcp_cmnd);
623 io_req->data_xfer_len = mp_req->req_len;
624 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
625 &mp_req->req_buf_dma,
627 if (!mp_req->req_buf) {
628 printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
629 bnx2fc_free_mp_resc(io_req);
633 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
634 &mp_req->resp_buf_dma,
636 if (!mp_req->resp_buf) {
637 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
638 bnx2fc_free_mp_resc(io_req);
641 memset(mp_req->req_buf, 0, PAGE_SIZE);
642 memset(mp_req->resp_buf, 0, PAGE_SIZE);
644 /* Allocate and map mp_req_bd and mp_resp_bd */
645 sz = sizeof(struct fcoe_bd_ctx);
646 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
647 &mp_req->mp_req_bd_dma,
649 if (!mp_req->mp_req_bd) {
650 printk(KERN_ERR PFX "unable to alloc MP req bd\n");
651 bnx2fc_free_mp_resc(io_req);
654 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
655 &mp_req->mp_resp_bd_dma,
657 if (!mp_req->mp_resp_bd) {
658 printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
659 bnx2fc_free_mp_resc(io_req);
663 addr = mp_req->req_buf_dma;
664 mp_req_bd = mp_req->mp_req_bd;
665 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
666 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
667 mp_req_bd->buf_len = PAGE_SIZE;
668 mp_req_bd->flags = 0;
671 * MP buffer is either a task mgmt command or an ELS.
672 * So the assumption is that it consumes a single bd
673 * entry in the bd table
675 mp_resp_bd = mp_req->mp_resp_bd;
676 addr = mp_req->resp_buf_dma;
677 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
678 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
679 mp_resp_bd->buf_len = PAGE_SIZE;
680 mp_resp_bd->flags = 0;
685 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
687 struct fc_lport *lport;
688 struct fc_rport *rport;
689 struct fc_rport_libfc_priv *rp;
690 struct fcoe_port *port;
691 struct bnx2fc_interface *interface;
692 struct bnx2fc_rport *tgt;
693 struct bnx2fc_cmd *io_req;
694 struct bnx2fc_mp_req *tm_req;
695 struct fcoe_task_ctx_entry *task;
696 struct fcoe_task_ctx_entry *task_page;
697 struct Scsi_Host *host = sc_cmd->device->host;
698 struct fc_frame_header *fc_hdr;
699 struct fcp_cmnd *fcp_cmnd;
704 unsigned long start = jiffies;
706 lport = shost_priv(host);
707 rport = starget_to_rport(scsi_target(sc_cmd->device));
708 port = lport_priv(lport);
709 interface = port->priv;
712 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
718 rc = fc_block_scsi_eh(sc_cmd);
722 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
723 printk(KERN_ERR PFX "device_reset: link is not ready\n");
727 /* rport and tgt are allocated together, so tgt should be non-NULL */
728 tgt = (struct bnx2fc_rport *)&rp[1];
730 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
731 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
736 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
738 if (time_after(jiffies, start + HZ)) {
739 printk(KERN_ERR PFX "tmf: Failed TMF");
746 /* Initialize rest of io_req fields */
747 io_req->sc_cmd = sc_cmd;
751 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
753 rc = bnx2fc_init_mp_req(io_req);
755 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
756 spin_lock_bh(&tgt->tgt_lock);
757 kref_put(&io_req->refcount, bnx2fc_cmd_release);
758 spin_unlock_bh(&tgt->tgt_lock);
763 io_req->io_req_flags = 0;
764 tm_req->tm_flags = tm_flags;
767 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
768 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
769 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
773 fc_hdr = &(tm_req->req_fc_hdr);
775 did = rport->port_id;
776 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
777 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
779 /* Obtain exchange id */
782 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
783 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
784 index = xid % BNX2FC_TASKS_PER_PAGE;
786 /* Initialize task context for this IO request */
787 task_page = (struct fcoe_task_ctx_entry *)
788 interface->hba->task_ctx[task_idx];
789 task = &(task_page[index]);
790 bnx2fc_init_mp_task(io_req, task);
792 sc_cmd->SCp.ptr = (char *)io_req;
794 /* Obtain free SQ entry */
795 spin_lock_bh(&tgt->tgt_lock);
796 bnx2fc_add_2_sq(tgt, xid);
798 /* Enqueue the io_req to active_tm_queue */
799 io_req->on_tmf_queue = 1;
800 list_add_tail(&io_req->link, &tgt->active_tm_queue);
802 init_completion(&io_req->tm_done);
803 io_req->wait_for_comp = 1;
806 bnx2fc_ring_doorbell(tgt);
807 spin_unlock_bh(&tgt->tgt_lock);
809 rc = wait_for_completion_timeout(&io_req->tm_done,
810 BNX2FC_TM_TIMEOUT * HZ);
811 spin_lock_bh(&tgt->tgt_lock);
813 io_req->wait_for_comp = 0;
814 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
815 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
816 if (io_req->on_tmf_queue) {
817 list_del_init(&io_req->link);
818 io_req->on_tmf_queue = 0;
820 io_req->wait_for_comp = 1;
821 bnx2fc_initiate_cleanup(io_req);
822 spin_unlock_bh(&tgt->tgt_lock);
823 rc = wait_for_completion_timeout(&io_req->tm_done,
825 spin_lock_bh(&tgt->tgt_lock);
826 io_req->wait_for_comp = 0;
828 kref_put(&io_req->refcount, bnx2fc_cmd_release);
831 spin_unlock_bh(&tgt->tgt_lock);
834 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
837 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
844 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
846 struct fc_lport *lport;
847 struct bnx2fc_rport *tgt = io_req->tgt;
848 struct fc_rport *rport = tgt->rport;
849 struct fc_rport_priv *rdata = tgt->rdata;
850 struct bnx2fc_interface *interface;
851 struct fcoe_port *port;
852 struct bnx2fc_cmd *abts_io_req;
853 struct fcoe_task_ctx_entry *task;
854 struct fcoe_task_ctx_entry *task_page;
855 struct fc_frame_header *fc_hdr;
856 struct bnx2fc_mp_req *abts_req;
861 u32 r_a_tov = rdata->r_a_tov;
863 /* called with tgt_lock held */
864 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
867 interface = port->priv;
870 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
871 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
877 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
882 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
883 printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
888 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
890 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
895 /* Initialize rest of io_req fields */
896 abts_io_req->sc_cmd = NULL;
897 abts_io_req->port = port;
898 abts_io_req->tgt = tgt;
899 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
901 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
902 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
905 fc_hdr = &(abts_req->req_fc_hdr);
907 /* Obtain oxid and rxid for the original exchange to be aborted */
908 fc_hdr->fh_ox_id = htons(io_req->xid);
909 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
912 did = rport->port_id;
914 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
915 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
918 xid = abts_io_req->xid;
919 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
920 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
921 index = xid % BNX2FC_TASKS_PER_PAGE;
923 /* Initialize task context for this IO request */
924 task_page = (struct fcoe_task_ctx_entry *)
925 interface->hba->task_ctx[task_idx];
926 task = &(task_page[index]);
927 bnx2fc_init_mp_task(abts_io_req, task);
930 * ABTS task is a temporary task that will be cleaned up
931 * irrespective of ABTS response. We need to start the timer
932 * for the original exchange, as the CQE is posted for the original
935 * Timer for ABTS is started only when it is originated by a
936 * TM request. For the ABTS issued as part of ULP timeout,
937 * scsi-ml maintains the timers.
940 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
941 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
943 /* Obtain free SQ entry */
944 bnx2fc_add_2_sq(tgt, xid);
947 bnx2fc_ring_doorbell(tgt);
953 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
956 struct fc_lport *lport;
957 struct bnx2fc_rport *tgt = orig_io_req->tgt;
958 struct bnx2fc_interface *interface;
959 struct fcoe_port *port;
960 struct bnx2fc_cmd *seq_clnp_req;
961 struct fcoe_task_ctx_entry *task;
962 struct fcoe_task_ctx_entry *task_page;
963 struct bnx2fc_els_cb_arg *cb_arg = NULL;
968 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
970 kref_get(&orig_io_req->refcount);
972 port = orig_io_req->port;
973 interface = port->priv;
976 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
978 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
983 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
985 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
990 /* Initialize rest of io_req fields */
991 seq_clnp_req->sc_cmd = NULL;
992 seq_clnp_req->port = port;
993 seq_clnp_req->tgt = tgt;
994 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
996 xid = seq_clnp_req->xid;
998 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
999 index = xid % BNX2FC_TASKS_PER_PAGE;
1001 /* Initialize task context for this IO request */
1002 task_page = (struct fcoe_task_ctx_entry *)
1003 interface->hba->task_ctx[task_idx];
1004 task = &(task_page[index]);
1005 cb_arg->aborted_io_req = orig_io_req;
1006 cb_arg->io_req = seq_clnp_req;
1007 cb_arg->r_ctl = r_ctl;
1008 cb_arg->offset = offset;
1009 seq_clnp_req->cb_arg = cb_arg;
1011 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
1012 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
1014 /* Obtain free SQ entry */
1015 bnx2fc_add_2_sq(tgt, xid);
1018 bnx2fc_ring_doorbell(tgt);
1023 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
1025 struct fc_lport *lport;
1026 struct bnx2fc_rport *tgt = io_req->tgt;
1027 struct bnx2fc_interface *interface;
1028 struct fcoe_port *port;
1029 struct bnx2fc_cmd *cleanup_io_req;
1030 struct fcoe_task_ctx_entry *task;
1031 struct fcoe_task_ctx_entry *task_page;
1032 int task_idx, index;
1036 /* ASSUMPTION: called with tgt_lock held */
1037 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1039 port = io_req->port;
1040 interface = port->priv;
1041 lport = port->lport;
1043 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1044 if (!cleanup_io_req) {
1045 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
1050 /* Initialize rest of io_req fields */
1051 cleanup_io_req->sc_cmd = NULL;
1052 cleanup_io_req->port = port;
1053 cleanup_io_req->tgt = tgt;
1054 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1056 xid = cleanup_io_req->xid;
1058 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1059 index = xid % BNX2FC_TASKS_PER_PAGE;
1061 /* Initialize task context for this IO request */
1062 task_page = (struct fcoe_task_ctx_entry *)
1063 interface->hba->task_ctx[task_idx];
1064 task = &(task_page[index]);
1065 orig_xid = io_req->xid;
1067 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1069 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1071 /* Obtain free SQ entry */
1072 bnx2fc_add_2_sq(tgt, xid);
1075 bnx2fc_ring_doorbell(tgt);
1082 * bnx2fc_eh_target_reset: Reset a target
1084 * @sc_cmd: SCSI command
1086 * Set from SCSI host template to send task mgmt command to the target
1087 * and wait for the response
1089 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
1091 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
1095 * bnx2fc_eh_device_reset - Reset a single LUN
1097 * @sc_cmd: SCSI command
1099 * Set from SCSI host template to send task mgmt command to the target
1100 * and wait for the response
1102 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1104 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1107 int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1109 struct bnx2fc_rport *tgt = io_req->tgt;
1110 struct fc_rport_priv *rdata = tgt->rdata;
1115 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1117 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1119 io_req->wait_for_comp = 1;
1120 bnx2fc_initiate_cleanup(io_req);
1122 spin_unlock_bh(&tgt->tgt_lock);
1124 wait_for_completion(&io_req->tm_done);
1126 io_req->wait_for_comp = 0;
1128 * release the reference taken in eh_abort to allow the
1129 * target to re-login after flushing IOs
1131 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1134 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
1135 mutex_lock(&lport->disc.disc_mutex);
1136 lport->tt.rport_logoff(rdata);
1137 mutex_unlock(&lport->disc.disc_mutex);
1139 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1140 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1144 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1146 spin_lock_bh(&tgt->tgt_lock);
1150 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1153 * @sc_cmd: SCSI_ML command pointer
1155 * SCSI abort request handler
1157 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1159 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1160 struct fc_rport_libfc_priv *rp = rport->dd_data;
1161 struct bnx2fc_cmd *io_req;
1162 struct fc_lport *lport;
1163 struct bnx2fc_rport *tgt;
1167 rc = fc_block_scsi_eh(sc_cmd);
1171 lport = shost_priv(sc_cmd->device->host);
1172 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1173 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1177 tgt = (struct bnx2fc_rport *)&rp[1];
1179 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1181 spin_lock_bh(&tgt->tgt_lock);
1182 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1184 /* Command might have just completed */
1185 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1186 spin_unlock_bh(&tgt->tgt_lock);
1189 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1190 io_req->refcount.refcount.counter);
1192 /* Hold IO request across abort processing */
1193 kref_get(&io_req->refcount);
1195 BUG_ON(tgt != io_req->tgt);
1197 /* Remove the io_req from the active_q. */
1199 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1200 * issue an ABTS on this particular IO req, as the
1201 * io_req is no longer in the active_q.
1203 if (tgt->flush_in_prog) {
1204 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1205 "flush in progress\n", io_req->xid);
1206 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1207 spin_unlock_bh(&tgt->tgt_lock);
1211 if (io_req->on_active_queue == 0) {
1212 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1213 "not on active_q\n", io_req->xid);
1215 * This condition can happen only due to the FW bug,
1216 * where we do not receive cleanup response from
1217 * the FW. Handle this case gracefully by erroring
1218 * back the IO request to SCSI-ml
1220 bnx2fc_scsi_done(io_req, DID_ABORT);
1222 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1223 spin_unlock_bh(&tgt->tgt_lock);
1228 * Only eh_abort processing will remove the IO from
1229 * active_cmd_q before processing the request. this is
1230 * done to avoid race conditions between IOs aborted
1231 * as part of task management completion and eh_abort
1234 list_del_init(&io_req->link);
1235 io_req->on_active_queue = 0;
1236 /* Move IO req to retire queue */
1237 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1239 init_completion(&io_req->tm_done);
1241 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1242 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1243 "already in abts processing\n", io_req->xid);
1244 if (cancel_delayed_work(&io_req->timeout_work))
1245 kref_put(&io_req->refcount,
1246 bnx2fc_cmd_release); /* drop timer hold */
1247 rc = bnx2fc_expl_logo(lport, io_req);
1251 /* Cancel the current timer running on this io_req */
1252 if (cancel_delayed_work(&io_req->timeout_work))
1253 kref_put(&io_req->refcount,
1254 bnx2fc_cmd_release); /* drop timer hold */
1255 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1256 io_req->wait_for_comp = 1;
1257 rc = bnx2fc_initiate_abts(io_req);
1259 bnx2fc_initiate_cleanup(io_req);
1260 spin_unlock_bh(&tgt->tgt_lock);
1261 wait_for_completion(&io_req->tm_done);
1262 spin_lock_bh(&tgt->tgt_lock);
1263 io_req->wait_for_comp = 0;
1266 spin_unlock_bh(&tgt->tgt_lock);
1268 wait_for_completion(&io_req->tm_done);
1270 spin_lock_bh(&tgt->tgt_lock);
1271 io_req->wait_for_comp = 0;
1272 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1273 &io_req->req_flags))) {
1274 /* Let the scsi-ml try to recover this command */
1275 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1277 rc = bnx2fc_expl_logo(lport, io_req);
1281 * We come here even when there was a race condition
1282 * between timeout and abts completion, and abts
1283 * completion happens just in time.
1285 BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1287 bnx2fc_scsi_done(io_req, DID_ABORT);
1288 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1291 /* release the reference taken in eh_abort */
1292 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1294 spin_unlock_bh(&tgt->tgt_lock);
1298 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1299 struct fcoe_task_ctx_entry *task,
1302 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1303 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1304 u32 offset = cb_arg->offset;
1305 enum fc_rctl r_ctl = cb_arg->r_ctl;
1307 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1309 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1311 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1313 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1314 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1319 spin_unlock_bh(&tgt->tgt_lock);
1320 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1321 spin_lock_bh(&tgt->tgt_lock);
1324 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1325 " IO will abort\n");
1326 seq_clnp_req->cb_arg = NULL;
1327 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1333 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1334 struct fcoe_task_ctx_entry *task,
1337 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1338 "refcnt = %d, cmd_type = %d\n",
1339 io_req->refcount.refcount.counter, io_req->cmd_type);
1340 bnx2fc_scsi_done(io_req, DID_ERROR);
1341 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1342 if (io_req->wait_for_comp)
1343 complete(&io_req->tm_done);
1346 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1347 struct fcoe_task_ctx_entry *task,
1351 u32 r_a_tov = FC_DEF_R_A_TOV;
1353 struct bnx2fc_rport *tgt = io_req->tgt;
1355 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1356 "refcnt = %d, cmd_type = %d\n",
1358 io_req->refcount.refcount.counter, io_req->cmd_type);
1360 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1361 &io_req->req_flags)) {
1362 BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1367 /* Do not issue RRQ as this IO is already cleanedup */
1368 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1369 &io_req->req_flags))
1373 * For ABTS issued due to SCSI eh_abort_handler, timeout
1374 * values are maintained by scsi-ml itself. Cancel timeout
1375 * in case ABTS issued as part of task management function
1376 * or due to FW error.
1378 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1379 if (cancel_delayed_work(&io_req->timeout_work))
1380 kref_put(&io_req->refcount,
1381 bnx2fc_cmd_release); /* drop timer hold */
1383 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1386 case FC_RCTL_BA_ACC:
1388 * Dont release this cmd yet. It will be relesed
1389 * after we get RRQ response
1391 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1395 case FC_RCTL_BA_RJT:
1396 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1399 printk(KERN_ERR PFX "Unknown ABTS response\n");
1404 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1405 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1407 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1408 bnx2fc_cmd_timer_set(io_req, r_a_tov);
1411 if (io_req->wait_for_comp) {
1412 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1413 &io_req->req_flags))
1414 complete(&io_req->tm_done);
1417 * We end up here when ABTS is issued as
1418 * in asynchronous context, i.e., as part
1419 * of task management completion, or
1420 * when FW error is received or when the
1421 * ABTS is issued when the IO is timed
1425 if (io_req->on_active_queue) {
1426 list_del_init(&io_req->link);
1427 io_req->on_active_queue = 0;
1428 /* Move IO req to retire queue */
1429 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1431 bnx2fc_scsi_done(io_req, DID_ERROR);
1432 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1436 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1438 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1439 struct bnx2fc_rport *tgt = io_req->tgt;
1440 struct bnx2fc_cmd *cmd, *tmp;
1441 int tm_lun = sc_cmd->device->lun;
1445 /* called with tgt_lock held */
1446 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1448 * Walk thru the active_ios queue and ABORT the IO
1449 * that matches with the LUN that was reset
1451 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1452 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1453 lun = cmd->sc_cmd->device->lun;
1454 if (lun == tm_lun) {
1455 /* Initiate ABTS on this cmd */
1456 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1458 /* cancel the IO timeout */
1459 if (cancel_delayed_work(&io_req->timeout_work))
1460 kref_put(&io_req->refcount,
1461 bnx2fc_cmd_release);
1463 rc = bnx2fc_initiate_abts(cmd);
1464 /* abts shouldn't fail in this context */
1465 WARN_ON(rc != SUCCESS);
1467 printk(KERN_ERR PFX "lun_rst: abts already in"
1468 " progress for this IO 0x%x\n",
1474 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1476 struct bnx2fc_rport *tgt = io_req->tgt;
1477 struct bnx2fc_cmd *cmd, *tmp;
1480 /* called with tgt_lock held */
1481 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1483 * Walk thru the active_ios queue and ABORT the IO
1484 * that matches with the LUN that was reset
1486 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1487 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1489 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1491 /* cancel the IO timeout */
1492 if (cancel_delayed_work(&io_req->timeout_work))
1493 kref_put(&io_req->refcount,
1494 bnx2fc_cmd_release); /* timer hold */
1495 rc = bnx2fc_initiate_abts(cmd);
1496 /* abts shouldn't fail in this context */
1497 WARN_ON(rc != SUCCESS);
1500 printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1501 " for this IO 0x%x\n", cmd->xid);
1505 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1506 struct fcoe_task_ctx_entry *task, u8 num_rq)
1508 struct bnx2fc_mp_req *tm_req;
1509 struct fc_frame_header *fc_hdr;
1510 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1515 /* Called with tgt_lock held */
1516 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1518 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1519 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1521 /* TM has already timed out and we got
1522 * delayed completion. Ignore completion
1528 tm_req = &(io_req->mp_req);
1529 fc_hdr = &(tm_req->resp_fc_hdr);
1530 hdr = (u64 *)fc_hdr;
1532 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1533 hdr[0] = cpu_to_be64(temp_hdr[0]);
1534 hdr[1] = cpu_to_be64(temp_hdr[1]);
1535 hdr[2] = cpu_to_be64(temp_hdr[2]);
1538 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1540 rsp_buf = tm_req->resp_buf;
1542 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1543 bnx2fc_parse_fcp_rsp(io_req,
1544 (struct fcoe_fcp_rsp_payload *)
1546 if (io_req->fcp_rsp_code == 0) {
1548 if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1549 bnx2fc_lun_reset_cmpl(io_req);
1550 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1551 bnx2fc_tgt_reset_cmpl(io_req);
1554 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1557 if (!sc_cmd->SCp.ptr) {
1558 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1561 switch (io_req->fcp_status) {
1563 if (io_req->cdb_status == 0) {
1564 /* Good IO completion */
1565 sc_cmd->result = DID_OK << 16;
1567 /* Transport status is good, SCSI status not good */
1568 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1570 if (io_req->fcp_resid)
1571 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1575 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1576 io_req->fcp_status);
1580 sc_cmd = io_req->sc_cmd;
1581 io_req->sc_cmd = NULL;
1583 /* check if the io_req exists in tgt's tmf_q */
1584 if (io_req->on_tmf_queue) {
1586 list_del_init(&io_req->link);
1587 io_req->on_tmf_queue = 0;
1590 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1594 sc_cmd->SCp.ptr = NULL;
1595 sc_cmd->scsi_done(sc_cmd);
1597 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1598 if (io_req->wait_for_comp) {
1599 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1600 complete(&io_req->tm_done);
1604 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1607 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1608 int frag_size, sg_frags;
1612 if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1613 frag_size = BNX2FC_BD_SPLIT_SZ;
1616 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1617 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1618 bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1619 bd[bd_index + sg_frags].flags = 0;
1621 addr += (u64) frag_size;
1623 sg_len -= frag_size;
1629 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1631 struct bnx2fc_interface *interface = io_req->port->priv;
1632 struct bnx2fc_hba *hba = interface->hba;
1633 struct scsi_cmnd *sc = io_req->sc_cmd;
1634 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1635 struct scatterlist *sg;
1640 unsigned int sg_len;
1644 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1645 scsi_sg_count(sc), sc->sc_data_direction);
1646 scsi_for_each_sg(sc, sg, sg_count, i) {
1647 sg_len = sg_dma_len(sg);
1648 addr = sg_dma_address(sg);
1649 if (sg_len > BNX2FC_MAX_BD_LEN) {
1650 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1655 bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1656 bd[bd_count].buf_addr_hi = addr >> 32;
1657 bd[bd_count].buf_len = (u16)sg_len;
1658 bd[bd_count].flags = 0;
1660 bd_count += sg_frags;
1661 byte_count += sg_len;
1663 if (byte_count != scsi_bufflen(sc))
1664 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1665 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1670 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1672 struct scsi_cmnd *sc = io_req->sc_cmd;
1673 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1676 if (scsi_sg_count(sc)) {
1677 bd_count = bnx2fc_map_sg(io_req);
1682 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1683 bd[0].buf_len = bd[0].flags = 0;
1685 io_req->bd_tbl->bd_valid = bd_count;
1690 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1692 struct scsi_cmnd *sc = io_req->sc_cmd;
1694 if (io_req->bd_tbl->bd_valid && sc) {
1696 io_req->bd_tbl->bd_valid = 0;
1700 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1701 struct fcp_cmnd *fcp_cmnd)
1703 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1706 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1708 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
1710 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1711 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1713 fcp_cmnd->fc_cmdref = 0;
1714 fcp_cmnd->fc_pri_ta = 0;
1715 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1716 fcp_cmnd->fc_flags = io_req->io_req_flags;
1718 if (scsi_populate_tag_msg(sc_cmd, tag)) {
1720 case HEAD_OF_QUEUE_TAG:
1721 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1723 case ORDERED_QUEUE_TAG:
1724 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1727 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1731 fcp_cmnd->fc_pri_ta = 0;
1735 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1736 struct fcoe_fcp_rsp_payload *fcp_rsp,
1739 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1740 struct bnx2fc_rport *tgt = io_req->tgt;
1741 u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1742 u32 rq_buff_len = 0;
1744 unsigned char *rq_data;
1745 unsigned char *dummy;
1746 int fcp_sns_len = 0;
1747 int fcp_rsp_len = 0;
1749 io_req->fcp_status = FC_GOOD;
1750 io_req->fcp_resid = fcp_rsp->fcp_resid;
1752 io_req->scsi_comp_flags = rsp_flags;
1753 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1754 fcp_rsp->scsi_status_code;
1756 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1760 * We do not anticipate num_rq >1, as the linux defined
1761 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1762 * 256 bytes of single rq buffer is good enough to hold this.
1766 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1767 fcp_rsp_len = rq_buff_len
1768 = fcp_rsp->fcp_rsp_len;
1772 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1773 fcp_sns_len = fcp_rsp->fcp_sns_len;
1774 rq_buff_len += fcp_rsp->fcp_sns_len;
1777 io_req->fcp_rsp_len = fcp_rsp_len;
1778 io_req->fcp_sns_len = fcp_sns_len;
1780 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1781 /* Invalid sense sense length. */
1782 printk(KERN_ERR PFX "invalid sns length %d\n",
1784 /* reset rq_buff_len */
1785 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1788 rq_data = bnx2fc_get_next_rqe(tgt, 1);
1791 /* We do not need extra sense data */
1792 for (i = 1; i < num_rq; i++)
1793 dummy = bnx2fc_get_next_rqe(tgt, 1);
1796 /* fetch fcp_rsp_code */
1797 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1798 /* Only for task management function */
1799 io_req->fcp_rsp_code = rq_data[3];
1800 printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
1801 io_req->fcp_rsp_code);
1804 /* fetch sense data */
1805 rq_data += fcp_rsp_len;
1807 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1808 printk(KERN_ERR PFX "Truncating sense buffer\n");
1809 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1812 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1814 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1816 /* return RQ entries */
1817 for (i = 0; i < num_rq; i++)
1818 bnx2fc_return_rqe(tgt, 1);
1823 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1825 * @host: The Scsi_Host the command was issued to
1826 * @sc_cmd: struct scsi_cmnd to be executed
1828 * This is the IO strategy routine, called by SCSI-ML
1830 int bnx2fc_queuecommand(struct Scsi_Host *host,
1831 struct scsi_cmnd *sc_cmd)
1833 struct fc_lport *lport = shost_priv(host);
1834 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1835 struct fc_rport_libfc_priv *rp = rport->dd_data;
1836 struct bnx2fc_rport *tgt;
1837 struct bnx2fc_cmd *io_req;
1841 rval = fc_remote_port_chkready(rport);
1843 sc_cmd->result = rval;
1844 sc_cmd->scsi_done(sc_cmd);
1848 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1849 rc = SCSI_MLQUEUE_HOST_BUSY;
1853 /* rport and tgt are allocated together, so tgt should be non-NULL */
1854 tgt = (struct bnx2fc_rport *)&rp[1];
1856 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1858 * Session is not offloaded yet. Let SCSI-ml retry
1861 rc = SCSI_MLQUEUE_TARGET_BUSY;
1865 io_req = bnx2fc_cmd_alloc(tgt);
1867 rc = SCSI_MLQUEUE_HOST_BUSY;
1870 io_req->sc_cmd = sc_cmd;
1872 if (bnx2fc_post_io_req(tgt, io_req)) {
1873 printk(KERN_ERR PFX "Unable to post io_req\n");
1874 rc = SCSI_MLQUEUE_HOST_BUSY;
1881 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1882 struct fcoe_task_ctx_entry *task,
1885 struct fcoe_fcp_rsp_payload *fcp_rsp;
1886 struct bnx2fc_rport *tgt = io_req->tgt;
1887 struct scsi_cmnd *sc_cmd;
1888 struct Scsi_Host *host;
1891 /* scsi_cmd_cmpl is called with tgt lock held */
1893 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1894 /* we will not receive ABTS response for this IO */
1895 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1899 /* Cancel the timeout_work, as we received IO completion */
1900 if (cancel_delayed_work(&io_req->timeout_work))
1901 kref_put(&io_req->refcount,
1902 bnx2fc_cmd_release); /* drop timer hold */
1904 sc_cmd = io_req->sc_cmd;
1905 if (sc_cmd == NULL) {
1906 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1910 /* Fetch fcp_rsp from task context and perform cmd completion */
1911 fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1912 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1914 /* parse fcp_rsp and obtain sense data from RQ if available */
1915 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1917 host = sc_cmd->device->host;
1918 if (!sc_cmd->SCp.ptr) {
1919 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1923 if (io_req->on_active_queue) {
1924 list_del_init(&io_req->link);
1925 io_req->on_active_queue = 0;
1926 /* Move IO req to retire queue */
1927 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1929 /* This should not happen, but could have been pulled
1930 * by bnx2fc_flush_active_ios(), or during a race
1931 * between command abort and (late) completion.
1933 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1934 if (io_req->wait_for_comp)
1935 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1936 &io_req->req_flags))
1937 complete(&io_req->tm_done);
1940 bnx2fc_unmap_sg_list(io_req);
1941 io_req->sc_cmd = NULL;
1943 switch (io_req->fcp_status) {
1945 if (io_req->cdb_status == 0) {
1946 /* Good IO completion */
1947 sc_cmd->result = DID_OK << 16;
1949 /* Transport status is good, SCSI status not good */
1950 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1951 " fcp_resid = 0x%x\n",
1952 io_req->cdb_status, io_req->fcp_resid);
1953 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1955 if (io_req->fcp_resid)
1956 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1959 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1960 io_req->fcp_status);
1963 sc_cmd->SCp.ptr = NULL;
1964 sc_cmd->scsi_done(sc_cmd);
1965 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1968 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1969 struct bnx2fc_cmd *io_req)
1971 struct fcoe_task_ctx_entry *task;
1972 struct fcoe_task_ctx_entry *task_page;
1973 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1974 struct fcoe_port *port = tgt->port;
1975 struct bnx2fc_interface *interface = port->priv;
1976 struct bnx2fc_hba *hba = interface->hba;
1977 struct fc_lport *lport = port->lport;
1978 struct fc_stats *stats;
1979 int task_idx, index;
1982 /* Initialize rest of io_req fields */
1983 io_req->cmd_type = BNX2FC_SCSI_CMD;
1984 io_req->port = port;
1986 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1987 sc_cmd->SCp.ptr = (char *)io_req;
1989 stats = per_cpu_ptr(lport->stats, get_cpu());
1990 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1991 io_req->io_req_flags = BNX2FC_READ;
1992 stats->InputRequests++;
1993 stats->InputBytes += io_req->data_xfer_len;
1994 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1995 io_req->io_req_flags = BNX2FC_WRITE;
1996 stats->OutputRequests++;
1997 stats->OutputBytes += io_req->data_xfer_len;
1999 io_req->io_req_flags = 0;
2000 stats->ControlRequests++;
2006 /* Build buffer descriptor list for firmware from sg list */
2007 if (bnx2fc_build_bd_list_from_sg(io_req)) {
2008 printk(KERN_ERR PFX "BD list creation failed\n");
2009 spin_lock_bh(&tgt->tgt_lock);
2010 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2011 spin_unlock_bh(&tgt->tgt_lock);
2015 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2016 index = xid % BNX2FC_TASKS_PER_PAGE;
2018 /* Initialize task context for this IO request */
2019 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2020 task = &(task_page[index]);
2021 bnx2fc_init_task(io_req, task);
2023 spin_lock_bh(&tgt->tgt_lock);
2025 if (tgt->flush_in_prog) {
2026 printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2027 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2028 spin_unlock_bh(&tgt->tgt_lock);
2032 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2033 printk(KERN_ERR PFX "Session not ready...post_io\n");
2034 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2035 spin_unlock_bh(&tgt->tgt_lock);
2040 if (tgt->io_timeout)
2041 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
2042 /* Obtain free SQ entry */
2043 bnx2fc_add_2_sq(tgt, xid);
2045 /* Enqueue the io_req to active_cmd_queue */
2047 io_req->on_active_queue = 1;
2048 /* move io_req from pending_queue to active_queue */
2049 list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2052 bnx2fc_ring_doorbell(tgt);
2053 spin_unlock_bh(&tgt->tgt_lock);