2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <scsi/scsi_host.h>
21 #include <linux/netdevice.h>
22 #include <net/addrconf.h>
27 #include "cxgb4_uld.h"
32 static unsigned int dbg_level;
34 #include "../libcxgbi.h"
36 #define DRV_MODULE_NAME "cxgb4i"
37 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
38 #define DRV_MODULE_VERSION "0.9.4"
40 static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
42 " v" DRV_MODULE_VERSION "\n";
44 MODULE_AUTHOR("Chelsio Communications, Inc.");
45 MODULE_DESCRIPTION(DRV_MODULE_DESC);
46 MODULE_VERSION(DRV_MODULE_VERSION);
47 MODULE_LICENSE("GPL");
49 module_param(dbg_level, uint, 0644);
50 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
52 static int cxgb4i_rcv_win = 256 * 1024;
53 module_param(cxgb4i_rcv_win, int, 0644);
54 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
56 static int cxgb4i_snd_win = 128 * 1024;
57 module_param(cxgb4i_snd_win, int, 0644);
58 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
60 static int cxgb4i_rx_credit_thres = 10 * 1024;
61 module_param(cxgb4i_rx_credit_thres, int, 0644);
62 MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
65 static unsigned int cxgb4i_max_connect = (8 * 1024);
66 module_param(cxgb4i_max_connect, uint, 0644);
67 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
69 static unsigned short cxgb4i_sport_base = 20000;
70 module_param(cxgb4i_sport_base, ushort, 0644);
71 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
73 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
75 static void *t4_uld_add(const struct cxgb4_lld_info *);
76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77 static int t4_uld_state_change(void *, enum cxgb4_state state);
79 static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 .name = DRV_MODULE_NAME,
82 .rx_handler = t4_uld_rx_handler,
83 .state_change = t4_uld_state_change,
86 static struct scsi_host_template cxgb4i_host_template = {
87 .module = THIS_MODULE,
88 .name = DRV_MODULE_NAME,
89 .proc_name = DRV_MODULE_NAME,
90 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
91 .queuecommand = iscsi_queuecommand,
92 .change_queue_depth = iscsi_change_queue_depth,
93 .sg_tablesize = SG_ALL,
94 .max_sectors = 0xFFFF,
95 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
96 .eh_abort_handler = iscsi_eh_abort,
97 .eh_device_reset_handler = iscsi_eh_device_reset,
98 .eh_target_reset_handler = iscsi_eh_recover_target,
99 .target_alloc = iscsi_target_alloc,
100 .use_clustering = DISABLE_CLUSTERING,
104 static struct iscsi_transport cxgb4i_iscsi_transport = {
105 .owner = THIS_MODULE,
106 .name = DRV_MODULE_NAME,
107 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
108 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
110 .attr_is_visible = cxgbi_attr_is_visible,
111 .get_host_param = cxgbi_get_host_param,
112 .set_host_param = cxgbi_set_host_param,
113 /* session management */
114 .create_session = cxgbi_create_session,
115 .destroy_session = cxgbi_destroy_session,
116 .get_session_param = iscsi_session_get_param,
117 /* connection management */
118 .create_conn = cxgbi_create_conn,
119 .bind_conn = cxgbi_bind_conn,
120 .destroy_conn = iscsi_tcp_conn_teardown,
121 .start_conn = iscsi_conn_start,
122 .stop_conn = iscsi_conn_stop,
123 .get_conn_param = iscsi_conn_get_param,
124 .set_param = cxgbi_set_conn_param,
125 .get_stats = cxgbi_get_conn_stats,
126 /* pdu xmit req from user space */
127 .send_pdu = iscsi_conn_send_pdu,
129 .init_task = iscsi_tcp_task_init,
130 .xmit_task = iscsi_tcp_task_xmit,
131 .cleanup_task = cxgbi_cleanup_task,
133 .alloc_pdu = cxgbi_conn_alloc_pdu,
134 .init_pdu = cxgbi_conn_init_pdu,
135 .xmit_pdu = cxgbi_conn_xmit_pdu,
136 .parse_pdu_itt = cxgbi_parse_pdu_itt,
137 /* TCP connect/disconnect */
138 .get_ep_param = cxgbi_get_ep_param,
139 .ep_connect = cxgbi_ep_connect,
140 .ep_poll = cxgbi_ep_poll,
141 .ep_disconnect = cxgbi_ep_disconnect,
142 /* Error recovery timeout call */
143 .session_recovery_timedout = iscsi_session_recovery_timedout,
146 static struct scsi_transport_template *cxgb4i_stt;
149 * CPL (Chelsio Protocol Language) defines a message passing interface between
150 * the host driver and Chelsio asic.
151 * The section below implments CPLs that related to iscsi tcp connection
152 * open/close/abort and data send/receive.
155 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
156 #define RCV_BUFSIZ_MASK 0x3FFU
157 #define MAX_IMM_TX_PKT_LEN 128
159 static inline void set_queue(struct sk_buff *skb, unsigned int queue,
160 const struct cxgbi_sock *csk)
162 skb->queue_mapping = queue;
165 static int push_tx_frames(struct cxgbi_sock *, int);
168 * is_ofld_imm - check whether a packet can be sent as immediate data
171 * Returns true if a packet can be sent as an offload WR with immediate
172 * data. We currently use the same limit as for Ethernet packets.
174 static inline int is_ofld_imm(const struct sk_buff *skb)
176 return skb->len <= (MAX_IMM_TX_PKT_LEN -
177 sizeof(struct fw_ofld_tx_data_wr));
180 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
184 int t4 = is_t4(lldi->adapter_type);
185 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
186 unsigned long long opt0;
188 unsigned int qid_atid = ((unsigned int)csk->atid) |
189 (((unsigned int)csk->rss_qid) << 14);
191 opt0 = KEEP_ALIVE(1) |
193 MSS_IDX(csk->mss_idx) |
194 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
195 TX_CHAN(csk->tx_chan) |
196 SMAC_SEL(csk->smac_idx) |
197 ULP_MODE(ULP_MODE_ISCSI) |
198 RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
199 opt2 = RX_CHANNEL(0) |
202 RSS_QUEUE(csk->rss_qid);
204 if (is_t4(lldi->adapter_type)) {
205 struct cpl_act_open_req *req =
206 (struct cpl_act_open_req *)skb->head;
209 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
211 req->local_port = csk->saddr.sin_port;
212 req->peer_port = csk->daddr.sin_port;
213 req->local_ip = csk->saddr.sin_addr.s_addr;
214 req->peer_ip = csk->daddr.sin_addr.s_addr;
215 req->opt0 = cpu_to_be64(opt0);
216 req->params = cpu_to_be32(cxgb4_select_ntuple(
217 csk->cdev->ports[csk->port_id],
220 req->opt2 = cpu_to_be32(opt2);
222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
223 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
224 csk, &req->local_ip, ntohs(req->local_port),
225 &req->peer_ip, ntohs(req->peer_port),
226 csk->atid, csk->rss_qid);
228 struct cpl_t5_act_open_req *req =
229 (struct cpl_t5_act_open_req *)skb->head;
232 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
234 req->local_port = csk->saddr.sin_port;
235 req->peer_port = csk->daddr.sin_port;
236 req->local_ip = csk->saddr.sin_addr.s_addr;
237 req->peer_ip = csk->daddr.sin_addr.s_addr;
238 req->opt0 = cpu_to_be64(opt0);
239 req->params = cpu_to_be64(V_FILTER_TUPLE(
241 csk->cdev->ports[csk->port_id],
244 req->opt2 = cpu_to_be32(opt2);
246 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
247 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
248 csk, &req->local_ip, ntohs(req->local_port),
249 &req->peer_ip, ntohs(req->peer_port),
250 csk->atid, csk->rss_qid);
253 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
255 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
256 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
257 csk->state, csk->flags, csk->atid, csk->rss_qid);
259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
262 #if IS_ENABLED(CONFIG_IPV6)
263 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
266 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
267 int t4 = is_t4(lldi->adapter_type);
268 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
269 unsigned long long opt0;
271 unsigned int qid_atid = ((unsigned int)csk->atid) |
272 (((unsigned int)csk->rss_qid) << 14);
274 opt0 = KEEP_ALIVE(1) |
276 MSS_IDX(csk->mss_idx) |
277 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
278 TX_CHAN(csk->tx_chan) |
279 SMAC_SEL(csk->smac_idx) |
280 ULP_MODE(ULP_MODE_ISCSI) |
281 RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
283 opt2 = RX_CHANNEL(0) |
286 RSS_QUEUE(csk->rss_qid);
289 struct cpl_act_open_req6 *req =
290 (struct cpl_act_open_req6 *)skb->head;
293 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
295 req->local_port = csk->saddr6.sin6_port;
296 req->peer_port = csk->daddr6.sin6_port;
298 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
299 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
301 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
302 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
305 req->opt0 = cpu_to_be64(opt0);
308 req->opt2 = cpu_to_be32(opt2);
310 req->params = cpu_to_be32(cxgb4_select_ntuple(
311 csk->cdev->ports[csk->port_id],
314 struct cpl_t5_act_open_req6 *req =
315 (struct cpl_t5_act_open_req6 *)skb->head;
318 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
320 req->local_port = csk->saddr6.sin6_port;
321 req->peer_port = csk->daddr6.sin6_port;
322 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
323 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
325 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
326 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
328 req->opt0 = cpu_to_be64(opt0);
330 opt2 |= T5_OPT_2_VALID;
331 req->opt2 = cpu_to_be32(opt2);
333 req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
334 csk->cdev->ports[csk->port_id],
338 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
340 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
341 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
342 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
343 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
346 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
350 static void send_close_req(struct cxgbi_sock *csk)
352 struct sk_buff *skb = csk->cpl_close;
353 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
354 unsigned int tid = csk->tid;
356 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
357 "csk 0x%p,%u,0x%lx, tid %u.\n",
358 csk, csk->state, csk->flags, csk->tid);
359 csk->cpl_close = NULL;
360 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
361 INIT_TP_WR(req, tid);
362 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
365 cxgbi_sock_skb_entail(csk, skb);
366 if (csk->state >= CTP_ESTABLISHED)
367 push_tx_frames(csk, 1);
370 static void abort_arp_failure(void *handle, struct sk_buff *skb)
372 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
373 struct cpl_abort_req *req;
375 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
376 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
377 csk, csk->state, csk->flags, csk->tid);
378 req = (struct cpl_abort_req *)skb->data;
379 req->cmd = CPL_ABORT_NO_RST;
380 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
383 static void send_abort_req(struct cxgbi_sock *csk)
385 struct cpl_abort_req *req;
386 struct sk_buff *skb = csk->cpl_abort_req;
388 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
390 cxgbi_sock_set_state(csk, CTP_ABORTING);
391 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
392 cxgbi_sock_purge_write_queue(csk);
394 csk->cpl_abort_req = NULL;
395 req = (struct cpl_abort_req *)skb->head;
396 set_queue(skb, CPL_PRIORITY_DATA, csk);
397 req->cmd = CPL_ABORT_SEND_RST;
398 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
399 INIT_TP_WR(req, csk->tid);
400 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
401 req->rsvd0 = htonl(csk->snd_nxt);
402 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
404 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
405 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
406 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
409 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
412 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
414 struct sk_buff *skb = csk->cpl_abort_rpl;
415 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
417 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
418 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
419 csk, csk->state, csk->flags, csk->tid, rst_status);
421 csk->cpl_abort_rpl = NULL;
422 set_queue(skb, CPL_PRIORITY_DATA, csk);
423 INIT_TP_WR(rpl, csk->tid);
424 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
425 rpl->cmd = rst_status;
426 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
430 * CPL connection rx data ack: host ->
431 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
434 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
437 struct cpl_rx_data_ack *req;
439 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
440 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
441 csk, csk->state, csk->flags, csk->tid, credits);
443 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
445 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
448 req = (struct cpl_rx_data_ack *)skb->head;
450 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
451 INIT_TP_WR(req, csk->tid);
452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
454 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
455 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
460 * sgl_len - calculates the size of an SGL of the given capacity
461 * @n: the number of SGL entries
462 * Calculates the number of flits needed for a scatter/gather list that
463 * can hold the given number of entries.
465 static inline unsigned int sgl_len(unsigned int n)
468 return (3 * n) / 2 + (n & 1) + 2;
472 * calc_tx_flits_ofld - calculate # of flits for an offload packet
475 * Returns the number of flits needed for the given offload packet.
476 * These packets are already fully constructed and no additional headers
479 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
481 unsigned int flits, cnt;
483 if (is_ofld_imm(skb))
484 return DIV_ROUND_UP(skb->len, 8);
485 flits = skb_transport_offset(skb) / 8;
486 cnt = skb_shinfo(skb)->nr_frags;
487 if (skb_tail_pointer(skb) != skb_transport_header(skb))
489 return flits + sgl_len(cnt);
492 static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
495 struct fw_flowc_wr *flowc;
499 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
500 flowc = (struct fw_flowc_wr *)skb->head;
501 flowc->op_to_nparams =
502 htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
503 flowc->flowid_len16 =
504 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
505 FW_WR_FLOWID(csk->tid));
506 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
507 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
508 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
509 flowc->mnemval[1].val = htonl(csk->tx_chan);
510 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
511 flowc->mnemval[2].val = htonl(csk->tx_chan);
512 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
513 flowc->mnemval[3].val = htonl(csk->rss_qid);
514 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
515 flowc->mnemval[4].val = htonl(csk->snd_nxt);
516 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
517 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
518 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
519 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
520 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
521 flowc->mnemval[7].val = htonl(csk->advmss);
522 flowc->mnemval[8].mnemonic = 0;
523 flowc->mnemval[8].val = 0;
524 for (i = 0; i < 9; i++) {
525 flowc->mnemval[i].r4[0] = 0;
526 flowc->mnemval[i].r4[1] = 0;
527 flowc->mnemval[i].r4[2] = 0;
529 set_queue(skb, CPL_PRIORITY_DATA, csk);
531 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
532 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
533 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
534 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
537 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
540 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
541 int dlen, int len, u32 credits, int compl)
543 struct fw_ofld_tx_data_wr *req;
544 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
545 unsigned int wr_ulp_mode = 0;
547 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
549 if (is_ofld_imm(skb)) {
550 req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
552 FW_WR_IMMDLEN(dlen));
553 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
554 FW_WR_LEN16(credits));
557 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
561 cpu_to_be32(FW_WR_FLOWID(csk->tid) |
562 FW_WR_LEN16(credits));
565 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
566 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
567 req->tunnel_to_proxy = htonl(wr_ulp_mode |
568 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
569 req->plen = htonl(len);
570 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
571 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
574 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
579 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
584 if (unlikely(csk->state < CTP_ESTABLISHED ||
585 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
586 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
587 1 << CXGBI_DBG_PDU_TX,
588 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
589 csk, csk->state, csk->flags, csk->tid);
593 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
596 unsigned int credits_needed;
598 skb_reset_transport_header(skb);
599 if (is_ofld_imm(skb))
600 credits_needed = DIV_ROUND_UP(dlen +
601 sizeof(struct fw_ofld_tx_data_wr), 16);
603 credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
604 + sizeof(struct fw_ofld_tx_data_wr),
607 if (csk->wr_cred < credits_needed) {
608 log_debug(1 << CXGBI_DBG_PDU_TX,
609 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
610 csk, skb->len, skb->data_len,
611 credits_needed, csk->wr_cred);
614 __skb_unlink(skb, &csk->write_queue);
615 set_queue(skb, CPL_PRIORITY_DATA, csk);
616 skb->csum = credits_needed;
617 csk->wr_cred -= credits_needed;
618 csk->wr_una_cred += credits_needed;
619 cxgbi_sock_enqueue_wr(csk, skb);
621 log_debug(1 << CXGBI_DBG_PDU_TX,
622 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
623 csk, skb->len, skb->data_len, credits_needed,
624 csk->wr_cred, csk->wr_una_cred);
626 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
627 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
628 send_tx_flowc_wr(csk);
631 csk->wr_una_cred += 5;
633 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
634 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
637 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
639 total_size += skb->truesize;
640 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
642 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
643 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
644 csk, csk->state, csk->flags, csk->tid, skb, len);
646 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
651 static inline void free_atid(struct cxgbi_sock *csk)
653 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
655 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
656 cxgb4_free_atid(lldi->tids, csk->atid);
657 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
662 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
664 struct cxgbi_sock *csk;
665 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
666 unsigned short tcp_opt = ntohs(req->tcp_opt);
667 unsigned int tid = GET_TID(req);
668 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
669 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
670 struct tid_info *t = lldi->tids;
671 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
673 csk = lookup_atid(t, atid);
674 if (unlikely(!csk)) {
675 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
679 if (csk->atid != atid) {
680 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
681 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
685 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
686 (&csk->saddr), (&csk->daddr),
687 atid, tid, csk, csk->state, csk->flags, rcv_isn);
689 module_put(THIS_MODULE);
693 cxgb4_insert_tid(lldi->tids, csk, tid);
694 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
698 spin_lock_bh(&csk->lock);
699 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
700 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
701 csk, csk->state, csk->flags, csk->tid);
703 if (csk->retry_timer.function) {
704 del_timer(&csk->retry_timer);
705 csk->retry_timer.function = NULL;
708 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
710 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
713 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
714 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
716 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
717 if (GET_TCPOPT_TSTAMP(tcp_opt))
719 if (csk->advmss < 128)
722 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
723 "csk 0x%p, mss_idx %u, advmss %u.\n",
724 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
726 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
728 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
731 if (skb_queue_len(&csk->write_queue))
732 push_tx_frames(csk, 0);
733 cxgbi_conn_tx_open(csk);
735 spin_unlock_bh(&csk->lock);
741 static int act_open_rpl_status_to_errno(int status)
744 case CPL_ERR_CONN_RESET:
745 return -ECONNREFUSED;
746 case CPL_ERR_ARP_MISS:
747 return -EHOSTUNREACH;
748 case CPL_ERR_CONN_TIMEDOUT:
750 case CPL_ERR_TCAM_FULL:
752 case CPL_ERR_CONN_EXIST:
759 static void csk_act_open_retry_timer(unsigned long data)
761 struct sk_buff *skb = NULL;
762 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
763 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
764 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
766 int t4 = is_t4(lldi->adapter_type), size, size6;
768 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
769 "csk 0x%p,%u,0x%lx,%u.\n",
770 csk, csk->state, csk->flags, csk->tid);
773 spin_lock_bh(&csk->lock);
776 size = sizeof(struct cpl_act_open_req);
777 size6 = sizeof(struct cpl_act_open_req6);
779 size = sizeof(struct cpl_t5_act_open_req);
780 size6 = sizeof(struct cpl_t5_act_open_req6);
783 if (csk->csk_family == AF_INET) {
784 send_act_open_func = send_act_open_req;
785 skb = alloc_wr(size, 0, GFP_ATOMIC);
786 #if IS_ENABLED(CONFIG_IPV6)
788 send_act_open_func = send_act_open_req6;
789 skb = alloc_wr(size6, 0, GFP_ATOMIC);
794 cxgbi_sock_fail_act_open(csk, -ENOMEM);
796 skb->sk = (struct sock *)csk;
797 t4_set_arp_err_handler(skb, csk,
798 cxgbi_sock_act_open_req_arp_failure);
799 send_act_open_func(csk, skb, csk->l2t);
802 spin_unlock_bh(&csk->lock);
807 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
809 struct cxgbi_sock *csk;
810 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
811 unsigned int tid = GET_TID(rpl);
813 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
814 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
815 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
816 struct tid_info *t = lldi->tids;
818 csk = lookup_atid(t, atid);
819 if (unlikely(!csk)) {
820 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
824 pr_info_ipaddr("tid %u/%u, status %u.\n"
825 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
826 atid, tid, status, csk, csk->state, csk->flags);
828 if (status == CPL_ERR_RTX_NEG_ADVICE)
831 if (status && status != CPL_ERR_TCAM_FULL &&
832 status != CPL_ERR_CONN_EXIST &&
833 status != CPL_ERR_ARP_MISS)
834 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
837 spin_lock_bh(&csk->lock);
839 if (status == CPL_ERR_CONN_EXIST &&
840 csk->retry_timer.function != csk_act_open_retry_timer) {
841 csk->retry_timer.function = csk_act_open_retry_timer;
842 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
844 cxgbi_sock_fail_act_open(csk,
845 act_open_rpl_status_to_errno(status));
847 spin_unlock_bh(&csk->lock);
853 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
855 struct cxgbi_sock *csk;
856 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
857 unsigned int tid = GET_TID(req);
858 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
859 struct tid_info *t = lldi->tids;
861 csk = lookup_tid(t, tid);
862 if (unlikely(!csk)) {
863 pr_err("can't find connection for tid %u.\n", tid);
866 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
867 (&csk->saddr), (&csk->daddr),
868 csk, csk->state, csk->flags, csk->tid);
869 cxgbi_sock_rcv_peer_close(csk);
874 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
876 struct cxgbi_sock *csk;
877 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
878 unsigned int tid = GET_TID(rpl);
879 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
880 struct tid_info *t = lldi->tids;
882 csk = lookup_tid(t, tid);
883 if (unlikely(!csk)) {
884 pr_err("can't find connection for tid %u.\n", tid);
887 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
888 (&csk->saddr), (&csk->daddr),
889 csk, csk->state, csk->flags, csk->tid);
890 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
895 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
898 switch (abort_reason) {
899 case CPL_ERR_BAD_SYN: /* fall through */
900 case CPL_ERR_CONN_RESET:
901 return csk->state > CTP_ESTABLISHED ?
902 -EPIPE : -ECONNRESET;
903 case CPL_ERR_XMIT_TIMEDOUT:
904 case CPL_ERR_PERSIST_TIMEDOUT:
905 case CPL_ERR_FINWAIT2_TIMEDOUT:
906 case CPL_ERR_KEEPALIVE_TIMEDOUT:
913 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
915 struct cxgbi_sock *csk;
916 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
917 unsigned int tid = GET_TID(req);
918 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
919 struct tid_info *t = lldi->tids;
920 int rst_status = CPL_ABORT_NO_RST;
922 csk = lookup_tid(t, tid);
923 if (unlikely(!csk)) {
924 pr_err("can't find connection for tid %u.\n", tid);
928 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
929 (&csk->saddr), (&csk->daddr),
930 csk, csk->state, csk->flags, csk->tid, req->status);
932 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
933 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
937 spin_lock_bh(&csk->lock);
939 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
941 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
942 send_tx_flowc_wr(csk);
943 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
946 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
947 cxgbi_sock_set_state(csk, CTP_ABORTING);
949 send_abort_rpl(csk, rst_status);
951 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
952 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
953 cxgbi_sock_closed(csk);
956 spin_unlock_bh(&csk->lock);
962 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
964 struct cxgbi_sock *csk;
965 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
966 unsigned int tid = GET_TID(rpl);
967 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
968 struct tid_info *t = lldi->tids;
970 csk = lookup_tid(t, tid);
975 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
976 (&csk->saddr), (&csk->daddr), csk,
977 csk->state, csk->flags, csk->tid, rpl->status);
979 if (rpl->status == CPL_ERR_ABORT_FAILED)
982 cxgbi_sock_rcv_abort_rpl(csk);
987 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
989 struct cxgbi_sock *csk;
990 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
991 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
992 unsigned int tid = GET_TID(cpl);
993 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
994 struct tid_info *t = lldi->tids;
996 csk = lookup_tid(t, tid);
997 if (unlikely(!csk)) {
998 pr_err("can't find conn. for tid %u.\n", tid);
1002 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1003 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1004 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1007 spin_lock_bh(&csk->lock);
1009 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1010 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1011 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1012 csk, csk->state, csk->flags, csk->tid);
1013 if (csk->state != CTP_ABORTING)
1019 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1020 cxgbi_skcb_flags(skb) = 0;
1022 skb_reset_transport_header(skb);
1023 __skb_pull(skb, sizeof(*cpl));
1024 __pskb_trim(skb, ntohs(cpl->len));
1026 if (!csk->skb_ulp_lhdr) {
1028 unsigned int hlen, dlen, plen;
1030 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1031 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1032 csk, csk->state, csk->flags, csk->tid, skb);
1033 csk->skb_ulp_lhdr = skb;
1034 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1036 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
1037 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1038 csk->tid, cxgbi_skcb_tcp_seq(skb),
1044 hlen = ntohs(cpl->len);
1045 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1047 plen = ISCSI_PDU_LEN(pdu_len_ddp);
1048 if (is_t4(lldi->adapter_type))
1051 if ((hlen + dlen) != plen) {
1052 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1053 "mismatch %u != %u + %u, seq 0x%x.\n",
1054 csk->tid, plen, hlen, dlen,
1055 cxgbi_skcb_tcp_seq(skb));
1059 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1061 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1062 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1064 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1065 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1066 csk, skb, *bhs, hlen, dlen,
1067 ntohl(*((unsigned int *)(bhs + 16))),
1068 ntohl(*((unsigned int *)(bhs + 24))));
1071 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1073 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1074 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1075 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1076 csk, csk->state, csk->flags, skb, lskb);
1079 __skb_queue_tail(&csk->receive_queue, skb);
1080 spin_unlock_bh(&csk->lock);
1084 send_abort_req(csk);
1086 spin_unlock_bh(&csk->lock);
1091 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1092 struct sk_buff *skb)
1094 struct cxgbi_sock *csk;
1095 struct sk_buff *lskb;
1096 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1097 unsigned int tid = GET_TID(rpl);
1098 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1099 struct tid_info *t = lldi->tids;
1100 unsigned int status = ntohl(rpl->ddpvld);
1102 csk = lookup_tid(t, tid);
1103 if (unlikely(!csk)) {
1104 pr_err("can't find connection for tid %u.\n", tid);
1108 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1109 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1110 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1112 spin_lock_bh(&csk->lock);
1114 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1115 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1116 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1117 csk, csk->state, csk->flags, csk->tid);
1118 if (csk->state != CTP_ABORTING)
1124 if (!csk->skb_ulp_lhdr) {
1125 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1129 lskb = csk->skb_ulp_lhdr;
1130 csk->skb_ulp_lhdr = NULL;
1132 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1134 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1135 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1136 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1138 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1139 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1140 csk, lskb, status, cxgbi_skcb_flags(lskb));
1141 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1143 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1144 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1145 csk, lskb, status, cxgbi_skcb_flags(lskb));
1146 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1148 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1149 log_debug(1 << CXGBI_DBG_PDU_RX,
1150 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1152 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1154 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1155 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1156 log_debug(1 << CXGBI_DBG_PDU_RX,
1157 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1159 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1161 log_debug(1 << CXGBI_DBG_PDU_RX,
1162 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1163 csk, lskb, cxgbi_skcb_flags(lskb));
1165 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1166 cxgbi_conn_pdu_ready(csk);
1167 spin_unlock_bh(&csk->lock);
1171 send_abort_req(csk);
1173 spin_unlock_bh(&csk->lock);
1178 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1180 struct cxgbi_sock *csk;
1181 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1182 unsigned int tid = GET_TID(rpl);
1183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1184 struct tid_info *t = lldi->tids;
1186 csk = lookup_tid(t, tid);
1188 pr_err("can't find connection for tid %u.\n", tid);
1190 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1191 "csk 0x%p,%u,0x%lx,%u.\n",
1192 csk, csk->state, csk->flags, csk->tid);
1193 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1199 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1201 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1202 unsigned int tid = GET_TID(rpl);
1203 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1204 struct tid_info *t = lldi->tids;
1205 struct cxgbi_sock *csk;
1207 csk = lookup_tid(t, tid);
1209 pr_err("can't find conn. for tid %u.\n", tid);
1211 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1212 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1213 csk, csk->state, csk->flags, csk->tid, rpl->status);
1215 if (rpl->status != CPL_ERR_NONE)
1216 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1217 csk, tid, rpl->status);
1222 static int alloc_cpls(struct cxgbi_sock *csk)
1224 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1226 if (!csk->cpl_close)
1229 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1231 if (!csk->cpl_abort_req)
1234 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1236 if (!csk->cpl_abort_rpl)
1241 cxgbi_sock_free_cpl_skbs(csk);
1245 static inline void l2t_put(struct cxgbi_sock *csk)
1248 cxgb4_l2t_release(csk->l2t);
1250 cxgbi_sock_put(csk);
1254 static void release_offload_resources(struct cxgbi_sock *csk)
1256 struct cxgb4_lld_info *lldi;
1258 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1259 "csk 0x%p,%u,0x%lx,%u.\n",
1260 csk, csk->state, csk->flags, csk->tid);
1262 cxgbi_sock_free_cpl_skbs(csk);
1263 if (csk->wr_cred != csk->wr_max_cred) {
1264 cxgbi_sock_purge_wr_queue(csk);
1265 cxgbi_sock_reset_wr_list(csk);
1269 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1271 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1272 lldi = cxgbi_cdev_priv(csk->cdev);
1273 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1274 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1275 cxgbi_sock_put(csk);
1281 static int init_act_open(struct cxgbi_sock *csk)
1283 struct cxgbi_device *cdev = csk->cdev;
1284 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1285 struct net_device *ndev = cdev->ports[csk->port_id];
1286 struct sk_buff *skb = NULL;
1287 struct neighbour *n = NULL;
1290 unsigned int size, size6;
1291 int t4 = is_t4(lldi->adapter_type);
1293 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1294 "csk 0x%p,%u,0x%lx,%u.\n",
1295 csk, csk->state, csk->flags, csk->tid);
1297 if (csk->csk_family == AF_INET)
1298 daddr = &csk->daddr.sin_addr.s_addr;
1299 #if IS_ENABLED(CONFIG_IPV6)
1300 else if (csk->csk_family == AF_INET6)
1301 daddr = &csk->daddr6.sin6_addr;
1304 pr_err("address family 0x%x not supported\n", csk->csk_family);
1308 n = dst_neigh_lookup(csk->dst, daddr);
1311 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1315 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1316 if (csk->atid < 0) {
1317 pr_err("%s, NO atid available.\n", ndev->name);
1320 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1321 cxgbi_sock_get(csk);
1323 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1325 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1328 cxgbi_sock_get(csk);
1331 size = sizeof(struct cpl_act_open_req);
1332 size6 = sizeof(struct cpl_act_open_req6);
1334 size = sizeof(struct cpl_t5_act_open_req);
1335 size6 = sizeof(struct cpl_t5_act_open_req6);
1338 if (csk->csk_family == AF_INET)
1339 skb = alloc_wr(size, 0, GFP_NOIO);
1340 #if IS_ENABLED(CONFIG_IPV6)
1342 skb = alloc_wr(size6, 0, GFP_NOIO);
1347 skb->sk = (struct sock *)csk;
1348 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1351 csk->mtu = dst_mtu(csk->dst);
1352 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1353 csk->tx_chan = cxgb4_port_chan(ndev);
1354 /* SMT two entries per row */
1355 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1356 step = lldi->ntxq / lldi->nchan;
1357 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1358 step = lldi->nrxq / lldi->nchan;
1359 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1360 csk->wr_cred = lldi->wr_cred -
1361 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1362 csk->wr_max_cred = csk->wr_cred;
1363 csk->wr_una_cred = 0;
1364 cxgbi_sock_reset_wr_list(csk);
1367 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1368 (&csk->saddr), (&csk->daddr), csk, csk->state,
1369 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1370 csk->mtu, csk->mss_idx, csk->smac_idx);
1372 /* must wait for either a act_open_rpl or act_open_establish */
1373 try_module_get(THIS_MODULE);
1374 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1375 if (csk->csk_family == AF_INET)
1376 send_act_open_req(csk, skb, csk->l2t);
1377 #if IS_ENABLED(CONFIG_IPV6)
1379 send_act_open_req6(csk, skb, csk->l2t);
1393 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1394 [CPL_ACT_ESTABLISH] = do_act_establish,
1395 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1396 [CPL_PEER_CLOSE] = do_peer_close,
1397 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1398 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1399 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1400 [CPL_FW4_ACK] = do_fw4_ack,
1401 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1402 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
1403 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1404 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1405 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1408 int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1412 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1413 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1415 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1416 cxgb4i_max_connect);
1420 cdev->csk_release_offload_resources = release_offload_resources;
1421 cdev->csk_push_tx_frames = push_tx_frames;
1422 cdev->csk_send_abort_req = send_abort_req;
1423 cdev->csk_send_close_req = send_close_req;
1424 cdev->csk_send_rx_credits = send_rx_credits;
1425 cdev->csk_alloc_cpls = alloc_cpls;
1426 cdev->csk_init_act_open = init_act_open;
1428 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1433 * functions to program the pagepod in h/w
1435 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1436 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1437 struct ulp_mem_io *req,
1438 unsigned int wr_len, unsigned int dlen,
1439 unsigned int pm_addr)
1441 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1443 INIT_ULPTX_WR(req, wr_len, 0, 0);
1444 if (is_t4(lldi->adapter_type))
1445 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1446 (ULP_MEMIO_ORDER(1)));
1448 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1449 (V_T5_ULP_MEMIO_IMM(1)));
1450 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1451 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1452 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1454 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1455 idata->len = htonl(dlen);
1458 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1459 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1461 struct cxgbi_gather_list *gl,
1462 unsigned int gl_pidx)
1464 struct cxgbi_ddp_info *ddp = cdev->ddp;
1465 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1466 struct sk_buff *skb;
1467 struct ulp_mem_io *req;
1468 struct ulptx_idata *idata;
1469 struct cxgbi_pagepod *ppod;
1470 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1471 unsigned int dlen = PPOD_SIZE * npods;
1472 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1473 sizeof(struct ulptx_idata) + dlen, 16);
1476 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1478 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1482 req = (struct ulp_mem_io *)skb->head;
1483 set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1485 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
1486 idata = (struct ulptx_idata *)(req + 1);
1487 ppod = (struct cxgbi_pagepod *)(idata + 1);
1489 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1491 cxgbi_ddp_ppod_clear(ppod);
1493 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1496 cxgb4_ofld_send(cdev->ports[port_id], skb);
1500 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1501 unsigned int idx, unsigned int npods,
1502 struct cxgbi_gather_list *gl)
1504 unsigned int i, cnt;
1507 for (i = 0; i < npods; i += cnt, idx += cnt) {
1509 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1510 cnt = ULPMEM_IDATA_MAX_NPPODS;
1511 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1512 idx, cnt, gl, 4 * i);
1519 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1520 unsigned int idx, unsigned int npods)
1522 unsigned int i, cnt;
1525 for (i = 0; i < npods; i += cnt, idx += cnt) {
1527 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1528 cnt = ULPMEM_IDATA_MAX_NPPODS;
1529 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1536 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1537 int pg_idx, bool reply)
1539 struct sk_buff *skb;
1540 struct cpl_set_tcb_field *req;
1542 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1545 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1549 /* set up ulp page size */
1550 req = (struct cpl_set_tcb_field *)skb->head;
1551 INIT_TP_WR(req, csk->tid);
1552 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1553 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1554 req->word_cookie = htons(0);
1555 req->mask = cpu_to_be64(0x3 << 8);
1556 req->val = cpu_to_be64(pg_idx << 8);
1557 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1559 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1560 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1562 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1566 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1567 int hcrc, int dcrc, int reply)
1569 struct sk_buff *skb;
1570 struct cpl_set_tcb_field *req;
1575 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1579 csk->hcrc_len = (hcrc ? 4 : 0);
1580 csk->dcrc_len = (dcrc ? 4 : 0);
1581 /* set up ulp submode */
1582 req = (struct cpl_set_tcb_field *)skb->head;
1583 INIT_TP_WR(req, tid);
1584 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1585 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1586 req->word_cookie = htons(0);
1587 req->mask = cpu_to_be64(0x3 << 4);
1588 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1589 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1590 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1592 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1593 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1595 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1599 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1601 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1602 struct cxgbi_ddp_info *ddp = cdev->ddp;
1603 unsigned int tagmask, pgsz_factor[4];
1607 kref_get(&ddp->refcnt);
1608 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1613 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1614 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1615 lldi->iscsi_iolen, lldi->iscsi_iolen);
1621 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1622 cxgbi_ddp_page_size_factor(pgsz_factor);
1623 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1625 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1626 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1627 cdev->csk_ddp_set = ddp_set_map;
1628 cdev->csk_ddp_clear = ddp_clear_map;
1630 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1631 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1632 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1633 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1635 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1636 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1637 ddp->max_rxsz, lldi->iscsi_iolen);
1638 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1639 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1644 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1646 struct cxgbi_device *cdev;
1647 struct port_info *pi;
1650 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1652 pr_info("t4 device 0x%p, register failed.\n", lldi);
1655 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1656 cdev, lldi->adapter_type, lldi->nports,
1657 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1658 lldi->nrxq, lldi->wr_cred);
1659 for (i = 0; i < lldi->nrxq; i++)
1660 log_debug(1 << CXGBI_DBG_DEV,
1661 "t4 0x%p, rxq id #%d: %u.\n",
1662 cdev, i, lldi->rxq_ids[i]);
1664 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1665 cdev->flags = CXGBI_FLAG_DEV_T4;
1666 cdev->pdev = lldi->pdev;
1667 cdev->ports = lldi->ports;
1668 cdev->nports = lldi->nports;
1669 cdev->mtus = lldi->mtus;
1670 cdev->nmtus = NMTUS;
1671 cdev->snd_win = cxgb4i_snd_win;
1672 cdev->rcv_win = cxgb4i_rcv_win;
1673 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1674 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1675 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1676 cdev->itp = &cxgb4i_iscsi_transport;
1678 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1679 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1680 cdev, lldi->ports[0]->name, cdev->pfvf);
1682 rc = cxgb4i_ddp_init(cdev);
1684 pr_info("t4 0x%p ddp init failed.\n", cdev);
1687 rc = cxgb4i_ofld_init(cdev);
1689 pr_info("t4 0x%p ofld init failed.\n", cdev);
1693 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1694 &cxgb4i_host_template, cxgb4i_stt);
1698 for (i = 0; i < cdev->nports; i++) {
1699 pi = netdev_priv(lldi->ports[i]);
1700 cdev->hbas[i]->port_id = pi->port_id;
1705 cxgbi_device_unregister(cdev);
1706 return ERR_PTR(-ENOMEM);
1709 #define RX_PULL_LEN 128
1710 static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1711 const struct pkt_gl *pgl)
1713 const struct cpl_act_establish *rpl;
1714 struct sk_buff *skb;
1716 struct cxgbi_device *cdev = handle;
1719 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1721 skb = alloc_wr(len, 0, GFP_ATOMIC);
1724 skb_copy_to_linear_data(skb, &rsp[1], len);
1726 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1727 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1728 pgl->va, be64_to_cpu(*rsp),
1729 be64_to_cpu(*(u64 *)pgl->va),
1733 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1738 rpl = (struct cpl_act_establish *)skb->data;
1739 opc = rpl->ot.opcode;
1740 log_debug(1 << CXGBI_DBG_TOE,
1741 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1742 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1743 if (cxgb4i_cplhandlers[opc])
1744 cxgb4i_cplhandlers[opc](cdev, skb);
1746 pr_err("No handler for opcode 0x%x.\n", opc);
1751 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1755 static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1757 struct cxgbi_device *cdev = handle;
1760 case CXGB4_STATE_UP:
1761 pr_info("cdev 0x%p, UP.\n", cdev);
1763 case CXGB4_STATE_START_RECOVERY:
1764 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1765 /* close all connections */
1767 case CXGB4_STATE_DOWN:
1768 pr_info("cdev 0x%p, DOWN.\n", cdev);
1770 case CXGB4_STATE_DETACH:
1771 pr_info("cdev 0x%p, DETACH.\n", cdev);
1772 cxgbi_device_unregister(cdev);
1775 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1781 static int __init cxgb4i_init_module(void)
1785 printk(KERN_INFO "%s", version);
1787 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1790 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1795 static void __exit cxgb4i_exit_module(void)
1797 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1798 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1799 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1802 module_init(cxgb4i_init_module);
1803 module_exit(cxgb4i_exit_module);