1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
5 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *destroy_kcqe);
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
54 * @hba: adapter structure pointer
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
109 ((u64) hba->hash_tbl_pbl_dma >> 32);
111 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
112 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_dma >> 32);
115 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
116 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
117 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
119 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
121 /* fill init3 KWQE */
122 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
123 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
124 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
125 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
126 fcoe_init3.error_bit_map_lo = 0xffffffff;
127 fcoe_init3.error_bit_map_hi = 0xffffffff;
129 fcoe_init3.perf_config = 1;
131 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
132 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
133 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
135 if (hba->cnic && hba->cnic->submit_kwqes)
136 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
140 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
142 struct fcoe_kwqe_destroy fcoe_destroy;
143 struct kwqe *kwqe_arr[2];
147 /* fill destroy KWQE */
148 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
149 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
150 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
151 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
152 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
154 if (hba->cnic && hba->cnic->submit_kwqes)
155 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
160 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
162 * @port: port structure pointer
163 * @tgt: bnx2fc_rport structure pointer
165 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
166 struct bnx2fc_rport *tgt)
168 struct fc_lport *lport = port->lport;
169 struct bnx2fc_interface *interface = port->priv;
170 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
171 struct bnx2fc_hba *hba = interface->hba;
172 struct kwqe *kwqe_arr[4];
173 struct fcoe_kwqe_conn_offload1 ofld_req1;
174 struct fcoe_kwqe_conn_offload2 ofld_req2;
175 struct fcoe_kwqe_conn_offload3 ofld_req3;
176 struct fcoe_kwqe_conn_offload4 ofld_req4;
177 struct fc_rport_priv *rdata = tgt->rdata;
178 struct fc_rport *rport = tgt->rport;
184 /* Initialize offload request 1 structure */
185 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
187 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
188 ofld_req1.hdr.flags =
189 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
192 conn_id = (u16)tgt->fcoe_conn_id;
193 ofld_req1.fcoe_conn_id = conn_id;
196 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
197 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
199 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
200 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
202 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
203 ofld_req1.rq_first_pbe_addr_hi =
204 (u32)((u64) tgt->rq_dma >> 32);
206 ofld_req1.rq_prod = 0x8000;
208 /* Initialize offload request 2 structure */
209 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
211 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
212 ofld_req2.hdr.flags =
213 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
215 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
217 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
218 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
220 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
221 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
223 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
224 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
226 /* Initialize offload request 3 structure */
227 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
229 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
230 ofld_req3.hdr.flags =
231 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
233 ofld_req3.vlan_tag = interface->vlan_id <<
234 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
235 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
237 port_id = fc_host_port_id(lport->host);
239 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
244 * Store s_id of the initiator for further reference. This will
245 * be used during disable/destroy during linkdown processing as
246 * when the lport is reset, the port_id also is reset to 0
249 ofld_req3.s_id[0] = (port_id & 0x000000FF);
250 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
251 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
253 port_id = rport->port_id;
254 ofld_req3.d_id[0] = (port_id & 0x000000FF);
255 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
256 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
258 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
260 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
261 ofld_req3.rx_max_fc_pay_len = lport->mfs;
263 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
264 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
265 ofld_req3.rx_open_seqs_exch_c3 = 1;
267 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
268 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
270 /* set mul_n_port_ids supported flag to 0, until it is supported */
273 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
276 /* Info from PLOGI response */
277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
280 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
281 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
284 * Info from PRLI response, this info is used for sequence level error
287 if (tgt->dev_type == TYPE_TAPE) {
288 ofld_req3.flags |= 1 <<
289 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
290 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
292 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
296 ofld_req3.flags |= (interface->vlan_enabled <<
297 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
299 /* C2_VALID and ACK flags are not set as they are not supported */
302 /* Initialize offload request 4 structure */
303 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
304 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
305 ofld_req4.hdr.flags =
306 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
308 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
311 ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
313 ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
314 ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
315 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
316 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
317 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
318 ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
320 ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
321 ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
322 ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
323 ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
324 ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
326 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
327 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
329 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
330 ofld_req4.confq_pbl_base_addr_hi =
331 (u32)((u64) tgt->confq_pbl_dma >> 32);
333 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
334 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
335 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
336 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
338 if (hba->cnic && hba->cnic->submit_kwqes)
339 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
345 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
347 * @port: port structure pointer
348 * @tgt: bnx2fc_rport structure pointer
350 int bnx2fc_send_session_enable_req(struct fcoe_port *port,
351 struct bnx2fc_rport *tgt)
353 struct kwqe *kwqe_arr[2];
354 struct bnx2fc_interface *interface = port->priv;
355 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
356 struct bnx2fc_hba *hba = interface->hba;
357 struct fcoe_kwqe_conn_enable_disable enbl_req;
358 struct fc_lport *lport = port->lport;
359 struct fc_rport *rport = tgt->rport;
364 memset(&enbl_req, 0x00,
365 sizeof(struct fcoe_kwqe_conn_enable_disable));
366 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
368 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
370 enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
372 enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
373 enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
374 enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
375 enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
376 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
377 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
379 enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
380 enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
381 enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
382 enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
383 enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
384 enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
386 port_id = fc_host_port_id(lport->host);
387 if (port_id != tgt->sid) {
388 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
389 "sid = 0x%x\n", port_id, tgt->sid);
392 enbl_req.s_id[0] = (port_id & 0x000000FF);
393 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
394 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
396 port_id = rport->port_id;
397 enbl_req.d_id[0] = (port_id & 0x000000FF);
398 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
399 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
400 enbl_req.vlan_tag = interface->vlan_id <<
401 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
402 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
403 enbl_req.vlan_flag = interface->vlan_enabled;
404 enbl_req.context_id = tgt->context_id;
405 enbl_req.conn_id = tgt->fcoe_conn_id;
407 kwqe_arr[0] = (struct kwqe *) &enbl_req;
409 if (hba->cnic && hba->cnic->submit_kwqes)
410 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
415 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
417 * @port: port structure pointer
418 * @tgt: bnx2fc_rport structure pointer
420 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
421 struct bnx2fc_rport *tgt)
423 struct bnx2fc_interface *interface = port->priv;
424 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
425 struct bnx2fc_hba *hba = interface->hba;
426 struct fcoe_kwqe_conn_enable_disable disable_req;
427 struct kwqe *kwqe_arr[2];
428 struct fc_rport *rport = tgt->rport;
433 memset(&disable_req, 0x00,
434 sizeof(struct fcoe_kwqe_conn_enable_disable));
435 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
436 disable_req.hdr.flags =
437 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
439 disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
440 disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
441 disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
442 disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
443 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
444 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
446 disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
447 disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
448 disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
449 disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
450 disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
451 disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
454 disable_req.s_id[0] = (port_id & 0x000000FF);
455 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
456 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
459 port_id = rport->port_id;
460 disable_req.d_id[0] = (port_id & 0x000000FF);
461 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
462 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
463 disable_req.context_id = tgt->context_id;
464 disable_req.conn_id = tgt->fcoe_conn_id;
465 disable_req.vlan_tag = interface->vlan_id <<
466 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
467 disable_req.vlan_tag |=
468 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
469 disable_req.vlan_flag = interface->vlan_enabled;
471 kwqe_arr[0] = (struct kwqe *) &disable_req;
473 if (hba->cnic && hba->cnic->submit_kwqes)
474 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
480 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
482 * @port: port structure pointer
483 * @tgt: bnx2fc_rport structure pointer
485 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
486 struct bnx2fc_rport *tgt)
488 struct fcoe_kwqe_conn_destroy destroy_req;
489 struct kwqe *kwqe_arr[2];
493 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
494 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
495 destroy_req.hdr.flags =
496 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
498 destroy_req.context_id = tgt->context_id;
499 destroy_req.conn_id = tgt->fcoe_conn_id;
501 kwqe_arr[0] = (struct kwqe *) &destroy_req;
503 if (hba->cnic && hba->cnic->submit_kwqes)
504 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
509 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
511 struct bnx2fc_lport *blport;
513 spin_lock_bh(&hba->hba_lock);
514 list_for_each_entry(blport, &hba->vports, list) {
515 if (blport->lport == lport) {
516 spin_unlock_bh(&hba->hba_lock);
520 spin_unlock_bh(&hba->hba_lock);
526 static void bnx2fc_unsol_els_work(struct work_struct *work)
528 struct bnx2fc_unsol_els *unsol_els;
529 struct fc_lport *lport;
530 struct bnx2fc_hba *hba;
533 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
534 lport = unsol_els->lport;
536 hba = unsol_els->hba;
537 if (is_valid_lport(hba, lport))
538 fc_exch_recv(lport, fp);
542 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
544 u32 frame_len, u16 l2_oxid)
546 struct fcoe_port *port = tgt->port;
547 struct fc_lport *lport = port->lport;
548 struct bnx2fc_interface *interface = port->priv;
549 struct bnx2fc_unsol_els *unsol_els;
550 struct fc_frame_header *fh;
558 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
560 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
564 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
567 payload_len = frame_len - sizeof(struct fc_frame_header);
569 fp = fc_frame_alloc(lport, payload_len);
571 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
576 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
577 /* Copy FC Frame header and payload into the frame */
578 memcpy(fh, buf, frame_len);
580 if (l2_oxid != FC_XID_UNKNOWN)
581 fh->fh_ox_id = htons(l2_oxid);
585 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
586 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
588 if (fh->fh_type == FC_TYPE_ELS) {
589 op = fc_frame_payload_op(fp);
590 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
591 (op == ELS_FAN) || (op == ELS_CSU)) {
593 * No need to reply for these
596 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
602 crc = fcoe_fc_crc(fp);
605 fr_sof(fp) = FC_SOF_I3;
606 fr_eof(fp) = FC_EOF_T;
607 fr_crc(fp) = cpu_to_le32(~crc);
608 unsol_els->lport = lport;
609 unsol_els->hba = interface->hba;
611 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
612 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
614 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
620 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
623 struct fcoe_err_report_entry *err_entry;
624 unsigned char *rq_data;
625 unsigned char *buf = NULL, *buf1;
629 struct bnx2fc_cmd *io_req = NULL;
630 struct fcoe_task_ctx_entry *task, *task_page;
631 struct bnx2fc_interface *interface = tgt->port->priv;
632 struct bnx2fc_hba *hba = interface->hba;
635 u64 err_warn_bit_map;
639 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
640 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
641 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
642 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
643 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
645 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
647 spin_lock_bh(&tgt->tgt_lock);
648 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
649 spin_unlock_bh(&tgt->tgt_lock);
654 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
658 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
662 for (i = 0; i < num_rq; i++) {
663 spin_lock_bh(&tgt->tgt_lock);
664 rq_data = (unsigned char *)
665 bnx2fc_get_next_rqe(tgt, 1);
666 spin_unlock_bh(&tgt->tgt_lock);
667 len = BNX2FC_RQ_BUF_SZ;
668 memcpy(buf1, rq_data, len);
672 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
677 spin_lock_bh(&tgt->tgt_lock);
678 bnx2fc_return_rqe(tgt, num_rq);
679 spin_unlock_bh(&tgt->tgt_lock);
682 case FCOE_ERROR_DETECTION_CQE_TYPE:
684 * In case of error reporting CQE a single RQ entry
687 spin_lock_bh(&tgt->tgt_lock);
689 err_entry = (struct fcoe_err_report_entry *)
690 bnx2fc_get_next_rqe(tgt, 1);
691 xid = err_entry->fc_hdr.ox_id;
692 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
693 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
694 err_entry->data.err_warn_bitmap_hi,
695 err_entry->data.err_warn_bitmap_lo);
696 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
700 if (xid > BNX2FC_MAX_XID) {
701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
706 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
707 index = xid % BNX2FC_TASKS_PER_PAGE;
708 task_page = (struct fcoe_task_ctx_entry *)
709 hba->task_ctx[task_idx];
710 task = &(task_page[index]);
712 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
716 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
717 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
721 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
722 &io_req->req_flags)) {
723 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
724 "progress.. ignore unsol err\n");
728 err_warn_bit_map = (u64)
729 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
730 (u64)err_entry->data.err_warn_bitmap_lo;
731 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
732 if (err_warn_bit_map & (u64)((u64)1 << i)) {
739 * If ABTS is already in progress, and FW error is
740 * received after that, do not cancel the timeout_work
741 * and let the error recovery continue by explicitly
742 * logging out the target, when the ABTS eventually
745 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
746 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
747 "in ABTS processing\n", xid);
750 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
751 if (tgt->dev_type != TYPE_TAPE)
754 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
755 case FCOE_ERROR_CODE_DATA_OOO_RO:
756 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
757 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
758 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
759 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
760 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
762 memcpy(&io_req->err_entry, err_entry,
763 sizeof(struct fcoe_err_report_entry));
764 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
765 &io_req->req_flags)) {
766 spin_unlock_bh(&tgt->tgt_lock);
767 rc = bnx2fc_send_rec(io_req);
768 spin_lock_bh(&tgt->tgt_lock);
773 printk(KERN_ERR PFX "SRR in progress\n");
781 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
783 * Cancel the timeout_work, as we received IO
784 * completion with FW error.
786 if (cancel_delayed_work(&io_req->timeout_work))
787 kref_put(&io_req->refcount, bnx2fc_cmd_release);
789 rc = bnx2fc_initiate_abts(io_req);
791 printk(KERN_ERR PFX "err_warn: initiate_abts "
792 "failed xid = 0x%x. issue cleanup\n",
794 bnx2fc_initiate_cleanup(io_req);
797 bnx2fc_return_rqe(tgt, 1);
798 spin_unlock_bh(&tgt->tgt_lock);
801 case FCOE_WARNING_DETECTION_CQE_TYPE:
803 *In case of warning reporting CQE a single RQ entry
806 spin_lock_bh(&tgt->tgt_lock);
808 err_entry = (struct fcoe_err_report_entry *)
809 bnx2fc_get_next_rqe(tgt, 1);
810 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
811 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
812 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
813 err_entry->data.err_warn_bitmap_hi,
814 err_entry->data.err_warn_bitmap_lo);
815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
818 if (xid > BNX2FC_MAX_XID) {
819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
823 err_warn_bit_map = (u64)
824 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
825 (u64)err_entry->data.err_warn_bitmap_lo;
826 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
827 if (err_warn_bit_map & (u64) (1 << i)) {
832 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
834 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
835 index = xid % BNX2FC_TASKS_PER_PAGE;
836 task_page = (struct fcoe_task_ctx_entry *)
837 interface->hba->task_ctx[task_idx];
838 task = &(task_page[index]);
839 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
843 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
844 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
848 memcpy(&io_req->err_entry, err_entry,
849 sizeof(struct fcoe_err_report_entry));
851 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
852 /* REC_TOV is not a warning code */
855 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
857 bnx2fc_return_rqe(tgt, 1);
858 spin_unlock_bh(&tgt->tgt_lock);
862 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
867 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
869 struct fcoe_task_ctx_entry *task;
870 struct fcoe_task_ctx_entry *task_page;
871 struct fcoe_port *port = tgt->port;
872 struct bnx2fc_interface *interface = port->priv;
873 struct bnx2fc_hba *hba = interface->hba;
874 struct bnx2fc_cmd *io_req;
881 spin_lock_bh(&tgt->tgt_lock);
882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
883 if (xid >= BNX2FC_MAX_TASKS) {
884 printk(KERN_ERR PFX "ERROR:xid out of range\n");
885 spin_unlock_bh(&tgt->tgt_lock);
888 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
889 index = xid % BNX2FC_TASKS_PER_PAGE;
890 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
891 task = &(task_page[index]);
893 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
894 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
895 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
897 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
899 if (io_req == NULL) {
900 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
901 spin_unlock_bh(&tgt->tgt_lock);
905 /* Timestamp IO completion time */
906 cmd_type = io_req->cmd_type;
908 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
909 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
910 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
912 /* Process other IO completion types */
914 case BNX2FC_SCSI_CMD:
915 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
916 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
917 spin_unlock_bh(&tgt->tgt_lock);
921 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
922 bnx2fc_process_abts_compl(io_req, task, num_rq);
924 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
925 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
927 printk(KERN_ERR PFX "Invalid rx state - %d\n",
931 case BNX2FC_TASK_MGMT_CMD:
932 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
933 bnx2fc_process_tm_compl(io_req, task, num_rq);
938 * ABTS request received by firmware. ABTS response
939 * will be delivered to the task belonging to the IO
942 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
943 kref_put(&io_req->refcount, bnx2fc_cmd_release);
947 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
948 bnx2fc_process_els_compl(io_req, task, num_rq);
949 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
950 bnx2fc_process_abts_compl(io_req, task, num_rq);
952 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
953 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
955 printk(KERN_ERR PFX "Invalid rx state = %d\n",
960 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
961 kref_put(&io_req->refcount, bnx2fc_cmd_release);
964 case BNX2FC_SEQ_CLEANUP:
965 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
967 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
968 kref_put(&io_req->refcount, bnx2fc_cmd_release);
972 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
975 spin_unlock_bh(&tgt->tgt_lock);
978 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
980 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
984 rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
985 FCOE_CQE_TOGGLE_BIT_SHIFT);
986 msg = *((u32 *)rx_db);
987 writel(cpu_to_le32(msg), tgt->ctx_base);
992 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
994 struct bnx2fc_work *work;
995 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
999 INIT_LIST_HEAD(&work->list);
1005 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1007 struct fcoe_cqe *cq;
1009 struct fcoe_cqe *cqe;
1010 u32 num_free_sqes = 0;
1015 * cq_lock is a low contention lock used to protect
1016 * the CQ data structure from being freed up during
1017 * the upload operation
1019 spin_lock_bh(&tgt->cq_lock);
1022 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1023 spin_unlock_bh(&tgt->cq_lock);
1027 cq_cons = tgt->cq_cons_idx;
1030 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1031 (tgt->cq_curr_toggle_bit <<
1032 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1034 /* new entry on the cq */
1035 if (wqe & FCOE_CQE_CQE_TYPE) {
1036 /* Unsolicited event notification */
1037 bnx2fc_process_unsol_compl(tgt, wqe);
1039 /* Pending work request completion */
1040 struct bnx2fc_work *work = NULL;
1041 struct bnx2fc_percpu_s *fps = NULL;
1042 unsigned int cpu = wqe % num_possible_cpus();
1044 fps = &per_cpu(bnx2fc_percpu, cpu);
1045 spin_lock_bh(&fps->fp_work_lock);
1046 if (unlikely(!fps->iothread))
1049 work = bnx2fc_alloc_work(tgt, wqe);
1051 list_add_tail(&work->list,
1054 spin_unlock_bh(&fps->fp_work_lock);
1056 /* Pending work request completion */
1057 if (fps->iothread && work)
1058 wake_up_process(fps->iothread);
1060 bnx2fc_process_cq_compl(tgt, wqe);
1067 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1068 tgt->cq_cons_idx = 0;
1070 tgt->cq_curr_toggle_bit =
1071 1 - tgt->cq_curr_toggle_bit;
1075 /* Arm CQ only if doorbell is mapped */
1078 atomic_add(num_free_sqes, &tgt->free_sqes);
1080 spin_unlock_bh(&tgt->cq_lock);
1085 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1087 * @hba: adapter structure pointer
1088 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
1090 * Fast path event notification handler
1092 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1093 struct fcoe_kcqe *new_cqe_kcqe)
1095 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1096 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1099 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1103 bnx2fc_process_new_cqes(tgt);
1107 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1109 * @hba: adapter structure pointer
1110 * @ofld_kcqe: connection offload kcqe pointer
1112 * handle session offload completion, enable the session if offload is
1115 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1116 struct fcoe_kcqe *ofld_kcqe)
1118 struct bnx2fc_rport *tgt;
1119 struct fcoe_port *port;
1120 struct bnx2fc_interface *interface;
1124 conn_id = ofld_kcqe->fcoe_conn_id;
1125 context_id = ofld_kcqe->fcoe_conn_context_id;
1126 tgt = hba->tgt_ofld_list[conn_id];
1128 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1131 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1132 ofld_kcqe->fcoe_conn_context_id);
1134 interface = tgt->port->priv;
1135 if (hba != interface->hba) {
1136 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1140 * cnic has allocated a context_id for this session; use this
1141 * while enabling the session.
1143 tgt->context_id = context_id;
1144 if (ofld_kcqe->completion_status) {
1145 if (ofld_kcqe->completion_status ==
1146 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1147 printk(KERN_ERR PFX "unable to allocate FCoE context "
1149 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1152 /* FW offload request successfully completed */
1153 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1156 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1157 wake_up_interruptible(&tgt->ofld_wait);
1161 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1163 * @hba: adapter structure pointer
1164 * @ofld_kcqe: connection offload kcqe pointer
1166 * handle session enable completion, mark the rport as ready
1169 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1170 struct fcoe_kcqe *ofld_kcqe)
1172 struct bnx2fc_rport *tgt;
1173 struct bnx2fc_interface *interface;
1177 context_id = ofld_kcqe->fcoe_conn_context_id;
1178 conn_id = ofld_kcqe->fcoe_conn_id;
1179 tgt = hba->tgt_ofld_list[conn_id];
1181 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1185 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1186 ofld_kcqe->fcoe_conn_context_id);
1189 * context_id should be the same for this target during offload
1192 if (tgt->context_id != context_id) {
1193 printk(KERN_ERR PFX "context id mis-match\n");
1196 interface = tgt->port->priv;
1197 if (hba != interface->hba) {
1198 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1201 if (!ofld_kcqe->completion_status)
1202 /* enable successful - rport ready for issuing IOs */
1203 set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1206 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1207 wake_up_interruptible(&tgt->ofld_wait);
1210 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1211 struct fcoe_kcqe *disable_kcqe)
1214 struct bnx2fc_rport *tgt;
1217 conn_id = disable_kcqe->fcoe_conn_id;
1218 tgt = hba->tgt_ofld_list[conn_id];
1220 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1224 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1226 if (disable_kcqe->completion_status) {
1227 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1228 disable_kcqe->completion_status);
1229 set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1230 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1231 wake_up_interruptible(&tgt->upld_wait);
1233 /* disable successful */
1234 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1235 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1236 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1237 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1238 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1239 wake_up_interruptible(&tgt->upld_wait);
1243 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1244 struct fcoe_kcqe *destroy_kcqe)
1246 struct bnx2fc_rport *tgt;
1249 conn_id = destroy_kcqe->fcoe_conn_id;
1250 tgt = hba->tgt_ofld_list[conn_id];
1252 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1256 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1258 if (destroy_kcqe->completion_status) {
1259 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1260 destroy_kcqe->completion_status);
1263 /* destroy successful */
1264 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1265 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1266 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1267 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1268 wake_up_interruptible(&tgt->upld_wait);
1272 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1275 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1276 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1279 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1280 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1283 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1284 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1286 case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1287 printk(KERN_ERR PFX "init failure due to compl status err\n");
1289 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1290 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1293 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1298 * bnx2fc_indicae_kcqe - process KCQE
1300 * @hba: adapter structure pointer
1301 * @kcqe: kcqe pointer
1302 * @num_cqe: Number of completion queue elements
1304 * Generic KCQ event handler
1306 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1309 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1311 struct fcoe_kcqe *kcqe = NULL;
1313 while (i < num_cqe) {
1314 kcqe = (struct fcoe_kcqe *) kcq[i++];
1316 switch (kcqe->op_code) {
1317 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1318 bnx2fc_fastpath_notification(hba, kcqe);
1321 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1322 bnx2fc_process_ofld_cmpl(hba, kcqe);
1325 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1326 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1329 case FCOE_KCQE_OPCODE_INIT_FUNC:
1330 if (kcqe->completion_status !=
1331 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1332 bnx2fc_init_failure(hba,
1333 kcqe->completion_status);
1335 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1336 bnx2fc_get_link_state(hba);
1337 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1338 (u8)hba->pcidev->bus->number);
1342 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1343 if (kcqe->completion_status !=
1344 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1346 printk(KERN_ERR PFX "DESTROY failed\n");
1348 printk(KERN_ERR PFX "DESTROY success\n");
1350 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1351 wake_up_interruptible(&hba->destroy_wait);
1354 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1355 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1358 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1359 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1362 case FCOE_KCQE_OPCODE_STAT_FUNC:
1363 if (kcqe->completion_status !=
1364 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1365 printk(KERN_ERR PFX "STAT failed\n");
1366 complete(&hba->stat_req_done);
1369 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1372 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1378 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1380 struct fcoe_sqe *sqe;
1382 sqe = &tgt->sq[tgt->sq_prod_idx];
1385 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1386 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1388 /* Advance SQ Prod Idx */
1389 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1390 tgt->sq_prod_idx = 0;
1391 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1395 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1397 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1401 sq_db->prod = tgt->sq_prod_idx |
1402 (tgt->sq_curr_toggle_bit << 15);
1403 msg = *((u32 *)sq_db);
1404 writel(cpu_to_le32(msg), tgt->ctx_base);
1409 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1411 u32 context_id = tgt->context_id;
1412 struct fcoe_port *port = tgt->port;
1414 resource_size_t reg_base;
1415 struct bnx2fc_interface *interface = port->priv;
1416 struct bnx2fc_hba *hba = interface->hba;
1418 reg_base = pci_resource_start(hba->pcidev,
1419 BNX2X_DOORBELL_PCI_BAR);
1420 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1421 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1422 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1428 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1430 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1432 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1435 tgt->rq_cons_idx += num_items;
1437 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1438 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1443 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1445 /* return the rq buffer */
1446 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1447 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1448 /* Wrap around RQ */
1449 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1451 tgt->rq_prod_idx = next_prod_idx;
1452 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1455 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1456 struct fcoe_task_ctx_entry *task,
1457 struct bnx2fc_cmd *orig_io_req,
1460 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1461 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1462 struct bnx2fc_interface *interface = tgt->port->priv;
1463 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1464 struct fcoe_task_ctx_entry *orig_task;
1465 struct fcoe_task_ctx_entry *task_page;
1466 struct fcoe_ext_mul_sges_ctx *sgl;
1467 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1469 u16 orig_xid = orig_io_req->xid;
1470 u32 context_id = tgt->context_id;
1471 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1472 u32 orig_offset = offset;
1474 int orig_task_idx, index;
1477 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1479 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1480 orig_task_type = FCOE_TASK_TYPE_WRITE;
1482 orig_task_type = FCOE_TASK_TYPE_READ;
1485 task->txwr_rxrd.const_ctx.tx_flags =
1486 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1487 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1489 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1490 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1491 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1492 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1493 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1494 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1495 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1496 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1498 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1500 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1501 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1503 bd_count = orig_io_req->bd_tbl->bd_valid;
1505 /* obtain the appropriate bd entry from relative offset */
1506 for (i = 0; i < bd_count; i++) {
1507 if (offset < bd[i].buf_len)
1509 offset -= bd[i].buf_len;
1511 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1513 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1514 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1516 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1517 (u32)((u64)phys_addr >> 32);
1518 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1520 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1521 offset; /* adjusted offset */
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1524 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1525 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1527 task_page = (struct fcoe_task_ctx_entry *)
1528 interface->hba->task_ctx[orig_task_idx];
1529 orig_task = &(task_page[index]);
1531 /* Multiple SGEs were used for this IO */
1532 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1533 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1534 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1535 sgl->mul_sgl.sgl_size = bd_count;
1536 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1537 sgl->mul_sgl.cur_sge_idx = i;
1539 memset(&task->rxwr_only.rx_seq_ctx, 0,
1540 sizeof(struct fcoe_rx_seq_ctx));
1541 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1542 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1545 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1546 struct fcoe_task_ctx_entry *task,
1549 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1550 struct bnx2fc_rport *tgt = io_req->tgt;
1551 u32 context_id = tgt->context_id;
1553 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1555 /* Tx Write Rx Read */
1557 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1558 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1559 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1560 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1561 if (tgt->dev_type == TYPE_TAPE)
1562 task->txwr_rxrd.const_ctx.init_flags |=
1563 FCOE_TASK_DEV_TYPE_TAPE <<
1564 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1566 task->txwr_rxrd.const_ctx.init_flags |=
1567 FCOE_TASK_DEV_TYPE_DISK <<
1568 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1569 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1572 task->txwr_rxrd.const_ctx.tx_flags =
1573 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1574 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1576 /* Rx Read Tx Write */
1577 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1578 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1579 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1580 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1583 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1584 struct fcoe_task_ctx_entry *task)
1586 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1587 struct bnx2fc_rport *tgt = io_req->tgt;
1588 struct fc_frame_header *fc_hdr;
1589 struct fcoe_ext_mul_sges_ctx *sgl;
1596 /* Obtain task_type */
1597 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1598 (io_req->cmd_type == BNX2FC_ELS)) {
1599 task_type = FCOE_TASK_TYPE_MIDPATH;
1600 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1601 task_type = FCOE_TASK_TYPE_ABTS;
1604 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1606 /* Setup the task from io_req for easy reference */
1607 io_req->task = task;
1609 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1610 io_req->cmd_type, task_type);
1613 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1614 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1615 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1616 (u32)mp_req->mp_req_bd_dma;
1617 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1618 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1619 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1622 /* Tx Write Rx Read */
1624 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1625 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1626 if (tgt->dev_type == TYPE_TAPE)
1627 task->txwr_rxrd.const_ctx.init_flags |=
1628 FCOE_TASK_DEV_TYPE_TAPE <<
1629 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1631 task->txwr_rxrd.const_ctx.init_flags |=
1632 FCOE_TASK_DEV_TYPE_DISK <<
1633 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1634 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1635 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1638 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1639 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1641 /* Rx Write Tx Read */
1642 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1645 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1646 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1648 context_id = tgt->context_id;
1649 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1650 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1652 fc_hdr = &(mp_req->req_fc_hdr);
1653 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1654 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1655 fc_hdr->fh_rx_id = htons(0xffff);
1656 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1657 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1658 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1661 /* Fill FC Header into middle path buffer */
1662 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1663 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1664 hdr[0] = cpu_to_be64(temp_hdr[0]);
1665 hdr[1] = cpu_to_be64(temp_hdr[1]);
1666 hdr[2] = cpu_to_be64(temp_hdr[2]);
1669 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1670 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1672 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1673 sgl->mul_sgl.cur_sge_addr.hi =
1674 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1675 sgl->mul_sgl.sgl_size = 1;
1679 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1680 struct fcoe_task_ctx_entry *task)
1683 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1684 struct io_bdt *bd_tbl = io_req->bd_tbl;
1685 struct bnx2fc_rport *tgt = io_req->tgt;
1686 struct fcoe_cached_sge_ctx *cached_sge;
1687 struct fcoe_ext_mul_sges_ctx *sgl;
1688 int dev_type = tgt->dev_type;
1690 u64 tmp_fcp_cmnd[4];
1695 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1697 /* Setup the task from io_req for easy reference */
1698 io_req->task = task;
1700 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1701 task_type = FCOE_TASK_TYPE_WRITE;
1703 task_type = FCOE_TASK_TYPE_READ;
1706 bd_count = bd_tbl->bd_valid;
1707 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1708 if (task_type == FCOE_TASK_TYPE_WRITE) {
1709 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1710 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1712 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1713 cached_sge->cur_buf_addr.lo =
1714 fcoe_bd_tbl->buf_addr_lo;
1715 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1716 cached_sge->cur_buf_addr.hi =
1717 fcoe_bd_tbl->buf_addr_hi;
1718 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1719 cached_sge->cur_buf_rem =
1720 fcoe_bd_tbl->buf_len;
1722 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1723 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1725 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1726 (u32)bd_tbl->bd_tbl_dma;
1727 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1728 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734 /*Tx Write Rx Read */
1735 /* Init state to NORMAL */
1736 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1737 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1738 if (dev_type == TYPE_TAPE) {
1739 task->txwr_rxrd.const_ctx.init_flags |=
1740 FCOE_TASK_DEV_TYPE_TAPE <<
1741 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1742 io_req->rec_retry = 0;
1743 io_req->rec_retry = 0;
1745 task->txwr_rxrd.const_ctx.init_flags |=
1746 FCOE_TASK_DEV_TYPE_DISK <<
1747 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1748 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1749 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1751 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1752 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1754 /* Set initial seq counter */
1755 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1757 /* Fill FCP_CMND IU */
1759 task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1760 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1763 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1765 for (i = 0; i < cnt; i++) {
1766 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1770 /* Rx Write Tx Read */
1771 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1773 context_id = tgt->context_id;
1774 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1775 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1778 /* Set state to "waiting for the first packet" */
1779 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1780 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1782 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1785 if (task_type != FCOE_TASK_TYPE_READ)
1788 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1789 bd_count = bd_tbl->bd_valid;
1791 if (dev_type == TYPE_DISK) {
1792 if (bd_count == 1) {
1794 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1796 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1797 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1798 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1799 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1800 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1801 } else if (bd_count == 2) {
1802 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1804 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1805 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1806 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1809 cached_sge->second_buf_addr.lo =
1810 fcoe_bd_tbl->buf_addr_lo;
1811 cached_sge->second_buf_addr.hi =
1812 fcoe_bd_tbl->buf_addr_hi;
1813 cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1814 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1815 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1818 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1819 sgl->mul_sgl.cur_sge_addr.hi =
1820 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1821 sgl->mul_sgl.sgl_size = bd_count;
1824 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825 sgl->mul_sgl.cur_sge_addr.hi =
1826 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827 sgl->mul_sgl.sgl_size = bd_count;
1832 * bnx2fc_setup_task_ctx - allocate and map task context
1834 * @hba: pointer to adapter structure
1836 * allocate memory for task context, and associated BD table to be used
1840 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1843 struct regpair *task_ctx_bdt;
1848 * Allocate task context bd table. A page size of bd table
1849 * can map 256 buffers. Each buffer contains 32 task context
1850 * entries. Hence the limit with one page is 8192 task context
1853 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1855 &hba->task_ctx_bd_dma,
1857 if (!hba->task_ctx_bd_tbl) {
1858 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1862 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1865 * Allocate task_ctx which is an array of pointers pointing to
1866 * a page containing 32 task contexts
1868 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1870 if (!hba->task_ctx) {
1871 printk(KERN_ERR PFX "unable to allocate task context array\n");
1877 * Allocate task_ctx_dma which is an array of dma addresses
1879 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1880 sizeof(dma_addr_t)), GFP_KERNEL);
1881 if (!hba->task_ctx_dma) {
1882 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1887 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1888 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1890 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1892 &hba->task_ctx_dma[i],
1894 if (!hba->task_ctx[i]) {
1895 printk(KERN_ERR PFX "unable to alloc task context\n");
1899 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1900 addr = (u64)hba->task_ctx_dma[i];
1901 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1902 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1908 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1909 if (hba->task_ctx[i]) {
1911 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1912 hba->task_ctx[i], hba->task_ctx_dma[i]);
1913 hba->task_ctx[i] = NULL;
1917 kfree(hba->task_ctx_dma);
1918 hba->task_ctx_dma = NULL;
1920 kfree(hba->task_ctx);
1921 hba->task_ctx = NULL;
1923 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1924 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1925 hba->task_ctx_bd_tbl = NULL;
1930 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1934 if (hba->task_ctx_bd_tbl) {
1935 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1936 hba->task_ctx_bd_tbl,
1937 hba->task_ctx_bd_dma);
1938 hba->task_ctx_bd_tbl = NULL;
1941 if (hba->task_ctx) {
1942 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1943 if (hba->task_ctx[i]) {
1944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1946 hba->task_ctx_dma[i]);
1947 hba->task_ctx[i] = NULL;
1950 kfree(hba->task_ctx);
1951 hba->task_ctx = NULL;
1954 kfree(hba->task_ctx_dma);
1955 hba->task_ctx_dma = NULL;
1958 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1962 int hash_table_size;
1965 segment_count = hba->hash_tbl_segment_count;
1966 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1967 sizeof(struct fcoe_hash_table_entry);
1969 pbl = hba->hash_tbl_pbl;
1970 for (i = 0; i < segment_count; ++i) {
1971 dma_addr_t dma_address;
1973 dma_address = le32_to_cpu(*pbl);
1975 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1977 dma_free_coherent(&hba->pcidev->dev,
1978 BNX2FC_HASH_TBL_CHUNK_SIZE,
1979 hba->hash_tbl_segments[i],
1984 if (hba->hash_tbl_pbl) {
1985 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1987 hba->hash_tbl_pbl_dma);
1988 hba->hash_tbl_pbl = NULL;
1992 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1995 int hash_table_size;
1997 int segment_array_size;
1998 int dma_segment_array_size;
1999 dma_addr_t *dma_segment_array;
2002 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2003 sizeof(struct fcoe_hash_table_entry);
2005 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2006 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2007 hba->hash_tbl_segment_count = segment_count;
2009 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2010 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2011 if (!hba->hash_tbl_segments) {
2012 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2015 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2016 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2017 if (!dma_segment_array) {
2018 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2022 for (i = 0; i < segment_count; ++i) {
2023 hba->hash_tbl_segments[i] =
2024 dma_alloc_coherent(&hba->pcidev->dev,
2025 BNX2FC_HASH_TBL_CHUNK_SIZE,
2026 &dma_segment_array[i],
2028 if (!hba->hash_tbl_segments[i]) {
2029 printk(KERN_ERR PFX "hash segment alloc failed\n");
2031 dma_free_coherent(&hba->pcidev->dev,
2032 BNX2FC_HASH_TBL_CHUNK_SIZE,
2033 hba->hash_tbl_segments[i],
2034 dma_segment_array[i]);
2035 hba->hash_tbl_segments[i] = NULL;
2037 kfree(dma_segment_array);
2040 memset(hba->hash_tbl_segments[i], 0,
2041 BNX2FC_HASH_TBL_CHUNK_SIZE);
2044 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2046 &hba->hash_tbl_pbl_dma,
2048 if (!hba->hash_tbl_pbl) {
2049 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2050 kfree(dma_segment_array);
2053 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2055 pbl = hba->hash_tbl_pbl;
2056 for (i = 0; i < segment_count; ++i) {
2057 u64 paddr = dma_segment_array[i];
2058 *pbl = cpu_to_le32((u32) paddr);
2060 *pbl = cpu_to_le32((u32) (paddr >> 32));
2063 pbl = hba->hash_tbl_pbl;
2065 while (*pbl && *(pbl + 1)) {
2074 kfree(dma_segment_array);
2079 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2081 * @hba: Pointer to adapter structure
2084 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2090 if (bnx2fc_allocate_hash_table(hba))
2093 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2094 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2095 &hba->t2_hash_tbl_ptr_dma,
2097 if (!hba->t2_hash_tbl_ptr) {
2098 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2099 bnx2fc_free_fw_resc(hba);
2102 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2104 mem_size = BNX2FC_NUM_MAX_SESS *
2105 sizeof(struct fcoe_t2_hash_table_entry);
2106 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2107 &hba->t2_hash_tbl_dma,
2109 if (!hba->t2_hash_tbl) {
2110 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2111 bnx2fc_free_fw_resc(hba);
2114 memset(hba->t2_hash_tbl, 0x00, mem_size);
2115 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2116 addr = (unsigned long) hba->t2_hash_tbl_dma +
2117 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2118 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2119 hba->t2_hash_tbl[i].next.hi = addr >> 32;
2122 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2123 PAGE_SIZE, &hba->dummy_buf_dma,
2125 if (!hba->dummy_buffer) {
2126 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2127 bnx2fc_free_fw_resc(hba);
2131 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2133 &hba->stats_buf_dma,
2135 if (!hba->stats_buffer) {
2136 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2137 bnx2fc_free_fw_resc(hba);
2140 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2145 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2149 if (hba->stats_buffer) {
2150 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2151 hba->stats_buffer, hba->stats_buf_dma);
2152 hba->stats_buffer = NULL;
2155 if (hba->dummy_buffer) {
2156 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2157 hba->dummy_buffer, hba->dummy_buf_dma);
2158 hba->dummy_buffer = NULL;
2161 if (hba->t2_hash_tbl_ptr) {
2162 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2163 dma_free_coherent(&hba->pcidev->dev, mem_size,
2164 hba->t2_hash_tbl_ptr,
2165 hba->t2_hash_tbl_ptr_dma);
2166 hba->t2_hash_tbl_ptr = NULL;
2169 if (hba->t2_hash_tbl) {
2170 mem_size = BNX2FC_NUM_MAX_SESS *
2171 sizeof(struct fcoe_t2_hash_table_entry);
2172 dma_free_coherent(&hba->pcidev->dev, mem_size,
2173 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2174 hba->t2_hash_tbl = NULL;
2176 bnx2fc_free_hash_table(hba);