2 * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
6 * Copyright (c) 2008 - 2013 Broadcom Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
17 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
19 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
21 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
22 void *data, u32 data_len,
23 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
24 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
26 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
28 struct bnx2fc_cmd *orig_io_req;
29 struct bnx2fc_cmd *rrq_req;
33 rrq_req = cb_arg->io_req;
34 orig_io_req = cb_arg->aborted_io_req;
36 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
37 orig_io_req->xid, rrq_req->xid);
39 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
41 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
43 * els req is timed out. cleanup the IO with FW and
44 * drop the completion. Remove from active_cmd_queue.
46 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
49 if (rrq_req->on_active_queue) {
50 list_del_init(&rrq_req->link);
51 rrq_req->on_active_queue = 0;
52 rc = bnx2fc_initiate_cleanup(rrq_req);
58 int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
61 struct fc_els_rrq rrq;
62 struct bnx2fc_rport *tgt = aborted_io_req->tgt;
63 struct fc_lport *lport = tgt->rdata->local_port;
64 struct bnx2fc_els_cb_arg *cb_arg = NULL;
66 u32 r_a_tov = lport->r_a_tov;
67 unsigned long start = jiffies;
70 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
72 memset(&rrq, 0, sizeof(rrq));
74 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
76 printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
81 cb_arg->aborted_io_req = aborted_io_req;
83 rrq.rrq_cmd = ELS_RRQ;
84 hton24(rrq.rrq_s_id, sid);
85 rrq.rrq_ox_id = htons(aborted_io_req->xid);
86 rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
89 rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
90 bnx2fc_rrq_compl, cb_arg,
93 if (time_after(jiffies, start + (10 * HZ))) {
94 BNX2FC_ELS_DBG("rrq Failed\n");
103 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
104 aborted_io_req->xid);
106 spin_lock_bh(&tgt->tgt_lock);
107 kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
108 spin_unlock_bh(&tgt->tgt_lock);
113 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
115 struct bnx2fc_cmd *els_req;
116 struct bnx2fc_rport *tgt;
117 struct bnx2fc_mp_req *mp_req;
118 struct fc_frame_header *fc_hdr;
121 u32 resp_len, hdr_len;
126 l2_oxid = cb_arg->l2_oxid;
127 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
129 els_req = cb_arg->io_req;
130 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
132 * els req is timed out. cleanup the IO with FW and
133 * drop the completion. libfc will handle the els timeout
135 if (els_req->on_active_queue) {
136 list_del_init(&els_req->link);
137 els_req->on_active_queue = 0;
138 rc = bnx2fc_initiate_cleanup(els_req);
145 mp_req = &(els_req->mp_req);
146 fc_hdr = &(mp_req->resp_fc_hdr);
147 resp_len = mp_req->resp_len;
148 resp_buf = mp_req->resp_buf;
150 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
152 printk(KERN_ERR PFX "Unable to alloc mp buf\n");
155 hdr_len = sizeof(*fc_hdr);
156 if (hdr_len + resp_len > PAGE_SIZE) {
157 printk(KERN_ERR PFX "l2_els_compl: resp len is "
158 "beyond page size\n");
161 memcpy(buf, fc_hdr, hdr_len);
162 memcpy(buf + hdr_len, resp_buf, resp_len);
163 frame_len = hdr_len + resp_len;
165 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
173 int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
175 struct fc_els_adisc *adisc;
176 struct fc_frame_header *fh;
177 struct bnx2fc_els_cb_arg *cb_arg;
178 struct fc_lport *lport = tgt->rdata->local_port;
179 u32 r_a_tov = lport->r_a_tov;
182 fh = fc_frame_header_get(fp);
183 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
185 printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
189 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
191 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
192 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
193 /* adisc is initialized by libfc */
194 rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
195 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
201 int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
203 struct fc_els_logo *logo;
204 struct fc_frame_header *fh;
205 struct bnx2fc_els_cb_arg *cb_arg;
206 struct fc_lport *lport = tgt->rdata->local_port;
207 u32 r_a_tov = lport->r_a_tov;
210 fh = fc_frame_header_get(fp);
211 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
213 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
217 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
219 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
220 logo = fc_frame_payload_get(fp, sizeof(*logo));
221 /* logo is initialized by libfc */
222 rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
223 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
229 int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
231 struct fc_els_rls *rls;
232 struct fc_frame_header *fh;
233 struct bnx2fc_els_cb_arg *cb_arg;
234 struct fc_lport *lport = tgt->rdata->local_port;
235 u32 r_a_tov = lport->r_a_tov;
238 fh = fc_frame_header_get(fp);
239 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
241 printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
245 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
247 rls = fc_frame_payload_get(fp, sizeof(*rls));
248 /* rls is initialized by libfc */
249 rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
250 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
256 void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
258 struct bnx2fc_mp_req *mp_req;
259 struct fc_frame_header *fc_hdr, *fh;
260 struct bnx2fc_cmd *srr_req;
261 struct bnx2fc_cmd *orig_io_req;
265 u32 resp_len, hdr_len;
269 orig_io_req = cb_arg->aborted_io_req;
270 srr_req = cb_arg->io_req;
271 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
273 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
276 rc = bnx2fc_initiate_abts(srr_req);
278 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
279 "failed. issue cleanup\n");
280 bnx2fc_initiate_cleanup(srr_req);
282 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
283 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
284 BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
285 orig_io_req->xid, orig_io_req->req_flags);
288 orig_io_req->srr_retry++;
289 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
290 struct bnx2fc_rport *tgt = orig_io_req->tgt;
291 spin_unlock_bh(&tgt->tgt_lock);
292 rc = bnx2fc_send_srr(orig_io_req,
293 orig_io_req->srr_offset,
294 orig_io_req->srr_rctl);
295 spin_lock_bh(&tgt->tgt_lock);
300 rc = bnx2fc_initiate_abts(orig_io_req);
302 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
303 "failed xid = 0x%x. issue cleanup\n",
305 bnx2fc_initiate_cleanup(orig_io_req);
309 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
310 test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
311 BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
312 orig_io_req->xid, orig_io_req->req_flags);
315 mp_req = &(srr_req->mp_req);
316 fc_hdr = &(mp_req->resp_fc_hdr);
317 resp_len = mp_req->resp_len;
318 resp_buf = mp_req->resp_buf;
320 hdr_len = sizeof(*fc_hdr);
321 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
323 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
326 memcpy(buf, fc_hdr, hdr_len);
327 memcpy(buf + hdr_len, resp_buf, resp_len);
329 fp = fc_frame_alloc(NULL, resp_len);
331 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
335 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
336 /* Copy FC Frame header and payload into the frame */
337 memcpy(fh, buf, hdr_len + resp_len);
339 opcode = fc_frame_payload_op(fp);
342 BNX2FC_IO_DBG(srr_req, "SRR success\n");
345 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
346 rc = bnx2fc_initiate_abts(orig_io_req);
348 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
349 "failed xid = 0x%x. issue cleanup\n",
351 bnx2fc_initiate_cleanup(orig_io_req);
355 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
363 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
366 void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
368 struct bnx2fc_cmd *orig_io_req, *new_io_req;
369 struct bnx2fc_cmd *rec_req;
370 struct bnx2fc_mp_req *mp_req;
371 struct fc_frame_header *fc_hdr, *fh;
372 struct fc_els_ls_rjt *rjt;
373 struct fc_els_rec_acc *acc;
374 struct bnx2fc_rport *tgt;
375 struct fcoe_err_report_entry *err_entry;
376 struct scsi_cmnd *sc_cmd;
384 u32 resp_len, hdr_len;
386 bool send_seq_clnp = false;
387 bool abort_io = false;
389 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
390 rec_req = cb_arg->io_req;
391 orig_io_req = cb_arg->aborted_io_req;
392 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
393 tgt = orig_io_req->tgt;
395 /* Handle REC timeout case */
396 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
397 BNX2FC_IO_DBG(rec_req, "timed out, abort "
400 /* els req is timed out. send abts for els */
401 rc = bnx2fc_initiate_abts(rec_req);
403 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
404 "failed. issue cleanup\n");
405 bnx2fc_initiate_cleanup(rec_req);
407 orig_io_req->rec_retry++;
408 /* REC timedout. send ABTS to the orig IO req */
409 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
410 spin_unlock_bh(&tgt->tgt_lock);
411 rc = bnx2fc_send_rec(orig_io_req);
412 spin_lock_bh(&tgt->tgt_lock);
416 rc = bnx2fc_initiate_abts(orig_io_req);
418 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
419 "failed xid = 0x%x. issue cleanup\n",
421 bnx2fc_initiate_cleanup(orig_io_req);
426 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
427 BNX2FC_IO_DBG(rec_req, "completed"
432 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
433 BNX2FC_IO_DBG(rec_req, "abts in prog "
439 mp_req = &(rec_req->mp_req);
440 fc_hdr = &(mp_req->resp_fc_hdr);
441 resp_len = mp_req->resp_len;
442 acc = resp_buf = mp_req->resp_buf;
444 hdr_len = sizeof(*fc_hdr);
446 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
448 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
451 memcpy(buf, fc_hdr, hdr_len);
452 memcpy(buf + hdr_len, resp_buf, resp_len);
454 fp = fc_frame_alloc(NULL, resp_len);
456 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
460 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
461 /* Copy FC Frame header and payload into the frame */
462 memcpy(fh, buf, hdr_len + resp_len);
464 opcode = fc_frame_payload_op(fp);
465 if (opcode == ELS_LS_RJT) {
466 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
467 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
468 if ((rjt->er_reason == ELS_RJT_LOGIC ||
469 rjt->er_reason == ELS_RJT_UNAB) &&
470 rjt->er_explan == ELS_EXPL_OXID_RXID) {
471 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
472 new_io_req = bnx2fc_cmd_alloc(tgt);
475 new_io_req->sc_cmd = orig_io_req->sc_cmd;
476 /* cleanup orig_io_req that is with the FW */
477 set_bit(BNX2FC_FLAG_CMD_LOST,
478 &orig_io_req->req_flags);
479 bnx2fc_initiate_cleanup(orig_io_req);
480 /* Post a new IO req with the same sc_cmd */
481 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
482 spin_unlock_bh(&tgt->tgt_lock);
483 rc = bnx2fc_post_io_req(tgt, new_io_req);
484 spin_lock_bh(&tgt->tgt_lock);
487 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
490 rc = bnx2fc_initiate_abts(orig_io_req);
492 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
493 "failed. issue cleanup\n");
494 bnx2fc_initiate_cleanup(orig_io_req);
496 } else if (opcode == ELS_LS_ACC) {
497 /* REVISIT: Check if the exchange is already aborted */
498 offset = ntohl(acc->reca_fc4value);
499 e_stat = ntohl(acc->reca_e_stat);
500 if (e_stat & ESB_ST_SEQ_INIT) {
501 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
504 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
506 /* Seq initiative is with us */
507 err_entry = (struct fcoe_err_report_entry *)
508 &orig_io_req->err_entry;
509 sc_cmd = orig_io_req->sc_cmd;
510 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
511 /* SCSI WRITE command */
512 if (offset == orig_io_req->data_xfer_len) {
513 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
515 r_ctl = FC_RCTL_DD_CMD_STATUS;
518 /* start transmitting from offset */
519 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
520 send_seq_clnp = true;
521 r_ctl = FC_RCTL_DD_DATA_DESC;
522 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
528 /* SCSI READ command */
529 if (err_entry->data.rx_buf_off ==
530 orig_io_req->data_xfer_len) {
532 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
533 r_ctl = FC_RCTL_DD_CMD_STATUS;
536 /* request retransmission from this offset */
537 send_seq_clnp = true;
538 offset = err_entry->data.rx_buf_off;
539 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
541 r_ctl = FC_RCTL_DD_SOL_DATA;
542 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
548 rc = bnx2fc_initiate_abts(orig_io_req);
550 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
551 " failed. issue cleanup\n");
552 bnx2fc_initiate_cleanup(orig_io_req);
554 } else if (!send_seq_clnp) {
555 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
556 spin_unlock_bh(&tgt->tgt_lock);
557 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
558 spin_lock_bh(&tgt->tgt_lock);
561 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
571 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
575 int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
577 struct fc_els_rec rec;
578 struct bnx2fc_rport *tgt = orig_io_req->tgt;
579 struct fc_lport *lport = tgt->rdata->local_port;
580 struct bnx2fc_els_cb_arg *cb_arg = NULL;
582 u32 r_a_tov = lport->r_a_tov;
585 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
586 memset(&rec, 0, sizeof(rec));
588 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
590 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
594 kref_get(&orig_io_req->refcount);
596 cb_arg->aborted_io_req = orig_io_req;
598 rec.rec_cmd = ELS_REC;
599 hton24(rec.rec_s_id, sid);
600 rec.rec_ox_id = htons(orig_io_req->xid);
601 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
603 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
604 bnx2fc_rec_compl, cb_arg,
608 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
609 spin_lock_bh(&tgt->tgt_lock);
610 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
611 spin_unlock_bh(&tgt->tgt_lock);
617 int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
620 struct bnx2fc_rport *tgt = orig_io_req->tgt;
621 struct fc_lport *lport = tgt->rdata->local_port;
622 struct bnx2fc_els_cb_arg *cb_arg = NULL;
623 u32 r_a_tov = lport->r_a_tov;
626 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
627 memset(&srr, 0, sizeof(srr));
629 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
631 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
635 kref_get(&orig_io_req->refcount);
637 cb_arg->aborted_io_req = orig_io_req;
639 srr.srr_op = ELS_SRR;
640 srr.srr_ox_id = htons(orig_io_req->xid);
641 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
642 srr.srr_rel_off = htonl(offset);
643 srr.srr_r_ctl = r_ctl;
644 orig_io_req->srr_offset = offset;
645 orig_io_req->srr_rctl = r_ctl;
647 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
648 bnx2fc_srr_compl, cb_arg,
652 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
653 spin_lock_bh(&tgt->tgt_lock);
654 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
655 spin_unlock_bh(&tgt->tgt_lock);
658 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
663 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
664 void *data, u32 data_len,
665 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
666 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
668 struct fcoe_port *port = tgt->port;
669 struct bnx2fc_interface *interface = port->priv;
670 struct fc_rport *rport = tgt->rport;
671 struct fc_lport *lport = port->lport;
672 struct bnx2fc_cmd *els_req;
673 struct bnx2fc_mp_req *mp_req;
674 struct fc_frame_header *fc_hdr;
675 struct fcoe_task_ctx_entry *task;
676 struct fcoe_task_ctx_entry *task_page;
682 rc = fc_remote_port_chkready(rport);
684 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
688 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
689 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
693 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
694 (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
695 printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
699 els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
705 els_req->sc_cmd = NULL;
706 els_req->port = port;
708 els_req->cb_func = cb_func;
709 cb_arg->io_req = els_req;
710 els_req->cb_arg = cb_arg;
712 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
713 rc = bnx2fc_init_mp_req(els_req);
715 printk(KERN_ERR PFX "ELS MP request init failed\n");
716 spin_lock_bh(&tgt->tgt_lock);
717 kref_put(&els_req->refcount, bnx2fc_cmd_release);
718 spin_unlock_bh(&tgt->tgt_lock);
726 /* Set the data_xfer_len to the size of ELS payload */
727 mp_req->req_len = data_len;
728 els_req->data_xfer_len = mp_req->req_len;
730 /* Fill ELS Payload */
731 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
732 memcpy(mp_req->req_buf, data, data_len);
734 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
735 els_req->cb_func = NULL;
736 els_req->cb_arg = NULL;
737 spin_lock_bh(&tgt->tgt_lock);
738 kref_put(&els_req->refcount, bnx2fc_cmd_release);
739 spin_unlock_bh(&tgt->tgt_lock);
747 fc_hdr = &(mp_req->req_fc_hdr);
749 did = tgt->rport->port_id;
753 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
754 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
755 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
757 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
758 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
759 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
761 /* Obtain exchange id */
763 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
764 index = xid % BNX2FC_TASKS_PER_PAGE;
766 /* Initialize task context for this IO request */
767 task_page = (struct fcoe_task_ctx_entry *)
768 interface->hba->task_ctx[task_idx];
769 task = &(task_page[index]);
770 bnx2fc_init_mp_task(els_req, task);
772 spin_lock_bh(&tgt->tgt_lock);
774 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
775 printk(KERN_ERR PFX "initiate_els.. session not ready\n");
776 els_req->cb_func = NULL;
777 els_req->cb_arg = NULL;
778 kref_put(&els_req->refcount, bnx2fc_cmd_release);
779 spin_unlock_bh(&tgt->tgt_lock);
784 bnx2fc_cmd_timer_set(els_req, timer_msec);
785 bnx2fc_add_2_sq(tgt, xid);
787 els_req->on_active_queue = 1;
788 list_add_tail(&els_req->link, &tgt->els_queue);
791 bnx2fc_ring_doorbell(tgt);
792 spin_unlock_bh(&tgt->tgt_lock);
798 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
799 struct fcoe_task_ctx_entry *task, u8 num_rq)
801 struct bnx2fc_mp_req *mp_req;
802 struct fc_frame_header *fc_hdr;
806 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
807 "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
809 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
810 &els_req->req_flags)) {
811 BNX2FC_ELS_DBG("Timer context finished processing this "
812 "els - 0x%x\n", els_req->xid);
813 /* This IO doesn't receive cleanup completion */
814 kref_put(&els_req->refcount, bnx2fc_cmd_release);
818 /* Cancel the timeout_work, as we received the response */
819 if (cancel_delayed_work(&els_req->timeout_work))
820 kref_put(&els_req->refcount,
821 bnx2fc_cmd_release); /* drop timer hold */
823 if (els_req->on_active_queue) {
824 list_del_init(&els_req->link);
825 els_req->on_active_queue = 0;
828 mp_req = &(els_req->mp_req);
829 fc_hdr = &(mp_req->resp_fc_hdr);
833 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
834 hdr[0] = cpu_to_be64(temp_hdr[0]);
835 hdr[1] = cpu_to_be64(temp_hdr[1]);
836 hdr[2] = cpu_to_be64(temp_hdr[2]);
839 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
841 /* Parse ELS response */
842 if ((els_req->cb_func) && (els_req->cb_arg)) {
843 els_req->cb_func(els_req->cb_arg);
844 els_req->cb_arg = NULL;
847 kref_put(&els_req->refcount, bnx2fc_cmd_release);
850 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
853 struct fcoe_ctlr *fip = arg;
854 struct fc_exch *exch = fc_seq_exch(seq);
855 struct fc_lport *lport = exch->lp;
862 mac = fr_cb(fp)->granted_mac;
863 if (is_zero_ether_addr(mac)) {
864 op = fc_frame_payload_op(fp);
866 if (op == ELS_LS_RJT) {
867 printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
868 fc_vport_terminate(lport->vport);
873 fcoe_ctlr_recv_flogi(fip, lport, fp);
875 if (!is_zero_ether_addr(mac))
876 fip->update_mac(lport, mac);
878 fc_lport_flogi_resp(seq, fp, lport);
881 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
884 struct fcoe_ctlr *fip = arg;
885 struct fc_exch *exch = fc_seq_exch(seq);
886 struct fc_lport *lport = exch->lp;
887 static u8 zero_mac[ETH_ALEN] = { 0 };
890 fip->update_mac(lport, zero_mac);
891 fc_lport_logo_resp(seq, fp, lport);
894 struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
895 struct fc_frame *fp, unsigned int op,
896 void (*resp)(struct fc_seq *,
899 void *arg, u32 timeout)
901 struct fcoe_port *port = lport_priv(lport);
902 struct bnx2fc_interface *interface = port->priv;
903 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
904 struct fc_frame_header *fh = fc_frame_header_get(fp);
909 return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
912 /* only hook onto fabric logouts, not port logouts */
913 if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
915 return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
918 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);