2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <net/neighbour.h>
41 #include <net/netevent.h>
42 #include <net/route.h>
45 #include "cxgb3_offload.h"
47 #include "iwch_provider.h"
50 static char *states[] = {
67 module_param(peer2peer, int, 0644);
68 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
70 static int ep_timeout_secs = 60;
71 module_param(ep_timeout_secs, int, 0644);
72 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
73 "in seconds (default=60)");
75 static int mpa_rev = 1;
76 module_param(mpa_rev, int, 0644);
77 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
78 "1 is spec compliant. (default=1)");
80 static int markers_enabled = 0;
81 module_param(markers_enabled, int, 0644);
82 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
84 static int crc_enabled = 1;
85 module_param(crc_enabled, int, 0644);
86 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
88 static int rcv_win = 256 * 1024;
89 module_param(rcv_win, int, 0644);
90 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
92 static int snd_win = 32 * 1024;
93 module_param(snd_win, int, 0644);
94 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
96 static unsigned int nocong = 0;
97 module_param(nocong, uint, 0644);
98 MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
100 static unsigned int cong_flavor = 1;
101 module_param(cong_flavor, uint, 0644);
102 MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 static void process_work(struct work_struct *work);
105 static struct workqueue_struct *workq;
106 static DECLARE_WORK(skb_work, process_work);
108 static struct sk_buff_head rxq;
109 static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
112 static void ep_timeout(unsigned long arg);
113 static void connect_reply_upcall(struct iwch_ep *ep, int status);
115 static void start_ep_timer(struct iwch_ep *ep)
117 PDBG("%s ep %p\n", __func__, ep);
118 if (timer_pending(&ep->timer)) {
119 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
120 del_timer_sync(&ep->timer);
123 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
124 ep->timer.data = (unsigned long)ep;
125 ep->timer.function = ep_timeout;
126 add_timer(&ep->timer);
129 static void stop_ep_timer(struct iwch_ep *ep)
131 PDBG("%s ep %p\n", __func__, ep);
132 if (!timer_pending(&ep->timer)) {
133 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
134 __func__, ep, ep->com.state);
138 del_timer_sync(&ep->timer);
142 int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
145 struct cxio_rdev *rdev;
147 rdev = (struct cxio_rdev *)tdev->ulp;
148 if (cxio_fatal_error(rdev)) {
152 error = l2t_send(tdev, skb, l2e);
158 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
161 struct cxio_rdev *rdev;
163 rdev = (struct cxio_rdev *)tdev->ulp;
164 if (cxio_fatal_error(rdev)) {
168 error = cxgb3_ofld_send(tdev, skb);
174 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
176 struct cpl_tid_release *req;
178 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
181 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
182 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
183 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
184 skb->priority = CPL_PRIORITY_SETUP;
185 iwch_cxgb3_ofld_send(tdev, skb);
189 int iwch_quiesce_tid(struct iwch_ep *ep)
191 struct cpl_set_tcb_field *req;
192 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
196 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
197 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
198 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
202 req->word = htons(W_TCB_RX_QUIESCE);
203 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
204 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
206 skb->priority = CPL_PRIORITY_DATA;
207 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
210 int iwch_resume_tid(struct iwch_ep *ep)
212 struct cpl_set_tcb_field *req;
213 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
217 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
218 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
219 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
220 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
223 req->word = htons(W_TCB_RX_QUIESCE);
224 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
227 skb->priority = CPL_PRIORITY_DATA;
228 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
231 static void set_emss(struct iwch_ep *ep, u16 opt)
233 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
234 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
235 if (G_TCPOPT_TSTAMP(opt))
239 PDBG("emss=%d\n", ep->emss);
242 static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
245 enum iwch_ep_state state;
247 spin_lock_irqsave(&epc->lock, flags);
249 spin_unlock_irqrestore(&epc->lock, flags);
253 static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
258 static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
262 spin_lock_irqsave(&epc->lock, flags);
263 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
264 __state_set(epc, new);
265 spin_unlock_irqrestore(&epc->lock, flags);
269 static void *alloc_ep(int size, gfp_t gfp)
271 struct iwch_ep_common *epc;
273 epc = kzalloc(size, gfp);
275 kref_init(&epc->kref);
276 spin_lock_init(&epc->lock);
277 init_waitqueue_head(&epc->waitq);
279 PDBG("%s alloc ep %p\n", __func__, epc);
283 void __free_ep(struct kref *kref)
285 struct iwch_ep_common *epc;
286 epc = container_of(kref, struct iwch_ep_common, kref);
287 PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
291 static void release_ep_resources(struct iwch_ep *ep)
293 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
294 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
295 dst_release(ep->dst);
296 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
300 static void process_work(struct work_struct *work)
302 struct sk_buff *skb = NULL;
307 while ((skb = skb_dequeue(&rxq))) {
308 ep = *((void **) (skb->cb));
309 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
310 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
311 if (ret & CPL_RET_BUF_DONE)
315 * ep was referenced in sched(), and is freed here.
317 put_ep((struct iwch_ep_common *)ep);
321 static int status2errno(int status)
326 case CPL_ERR_CONN_RESET:
328 case CPL_ERR_ARP_MISS:
329 return -EHOSTUNREACH;
330 case CPL_ERR_CONN_TIMEDOUT:
332 case CPL_ERR_TCAM_FULL:
334 case CPL_ERR_CONN_EXIST:
342 * Try and reuse skbs already allocated...
344 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
346 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
350 skb = alloc_skb(len, gfp);
355 static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
356 __be32 peer_ip, __be16 local_port,
357 __be16 peer_port, u8 tos)
368 .proto = IPPROTO_TCP,
376 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
381 static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
385 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
390 static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
392 PDBG("%s t3cdev %p\n", __func__, dev);
397 * Handle an ARP failure for an active open.
399 static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
401 printk(KERN_ERR MOD "ARP failure duing connect\n");
406 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
409 static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
411 struct cpl_abort_req *req = cplhdr(skb);
413 PDBG("%s t3cdev %p\n", __func__, dev);
414 req->cmd = CPL_ABORT_NO_RST;
415 iwch_cxgb3_ofld_send(dev, skb);
418 static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
420 struct cpl_close_con_req *req;
423 PDBG("%s ep %p\n", __func__, ep);
424 skb = get_skb(NULL, sizeof(*req), gfp);
426 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
429 skb->priority = CPL_PRIORITY_DATA;
430 set_arp_failure_handler(skb, arp_failure_discard);
431 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
432 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
433 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
434 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
435 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
438 static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
440 struct cpl_abort_req *req;
442 PDBG("%s ep %p\n", __func__, ep);
443 skb = get_skb(skb, sizeof(*req), gfp);
445 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
449 skb->priority = CPL_PRIORITY_DATA;
450 set_arp_failure_handler(skb, abort_arp_failure);
451 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
452 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
453 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
454 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
455 req->cmd = CPL_ABORT_SEND_RST;
456 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
459 static int send_connect(struct iwch_ep *ep)
461 struct cpl_act_open_req *req;
463 u32 opt0h, opt0l, opt2;
464 unsigned int mtu_idx;
467 PDBG("%s ep %p\n", __func__, ep);
469 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
471 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
475 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
476 wscale = compute_wscale(rcv_win);
481 V_WND_SCALE(wscale) |
483 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
484 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
485 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
486 skb->priority = CPL_PRIORITY_SETUP;
487 set_arp_failure_handler(skb, act_open_req_arp_failure);
489 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
492 req->local_port = ep->com.local_addr.sin_port;
493 req->peer_port = ep->com.remote_addr.sin_port;
494 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
495 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
496 req->opt0h = htonl(opt0h);
497 req->opt0l = htonl(opt0l);
499 req->opt2 = htonl(opt2);
500 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
503 static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
506 struct tx_data_wr *req;
507 struct mpa_message *mpa;
510 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
512 BUG_ON(skb_cloned(skb));
514 mpalen = sizeof(*mpa) + ep->plen;
515 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
517 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
519 connect_reply_upcall(ep, -ENOMEM);
524 skb_reserve(skb, sizeof(*req));
525 skb_put(skb, mpalen);
526 skb->priority = CPL_PRIORITY_DATA;
527 mpa = (struct mpa_message *) skb->data;
528 memset(mpa, 0, sizeof(*mpa));
529 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
530 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
531 (markers_enabled ? MPA_MARKERS : 0);
532 mpa->private_data_size = htons(ep->plen);
533 mpa->revision = mpa_rev;
536 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
539 * Reference the mpa skb. This ensures the data area
540 * will remain in memory until the hw acks the tx.
541 * Function tx_ack() will deref it.
544 set_arp_failure_handler(skb, arp_failure_discard);
545 skb_reset_transport_header(skb);
547 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
548 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
549 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
550 req->len = htonl(len);
551 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
552 V_TX_SNDBUF(snd_win>>15));
553 req->flags = htonl(F_TX_INIT);
554 req->sndseq = htonl(ep->snd_seq);
557 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
559 state_set(&ep->com, MPA_REQ_SENT);
563 static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
566 struct tx_data_wr *req;
567 struct mpa_message *mpa;
570 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
572 mpalen = sizeof(*mpa) + plen;
574 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
576 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
579 skb_reserve(skb, sizeof(*req));
580 mpa = (struct mpa_message *) skb_put(skb, mpalen);
581 memset(mpa, 0, sizeof(*mpa));
582 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
583 mpa->flags = MPA_REJECT;
584 mpa->revision = mpa_rev;
585 mpa->private_data_size = htons(plen);
587 memcpy(mpa->private_data, pdata, plen);
590 * Reference the mpa skb again. This ensures the data area
591 * will remain in memory until the hw acks the tx.
592 * Function tx_ack() will deref it.
595 skb->priority = CPL_PRIORITY_DATA;
596 set_arp_failure_handler(skb, arp_failure_discard);
597 skb_reset_transport_header(skb);
598 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
599 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
600 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
601 req->len = htonl(mpalen);
602 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
603 V_TX_SNDBUF(snd_win>>15));
604 req->flags = htonl(F_TX_INIT);
605 req->sndseq = htonl(ep->snd_seq);
608 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
611 static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
614 struct tx_data_wr *req;
615 struct mpa_message *mpa;
619 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
621 mpalen = sizeof(*mpa) + plen;
623 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
625 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
628 skb->priority = CPL_PRIORITY_DATA;
629 skb_reserve(skb, sizeof(*req));
630 mpa = (struct mpa_message *) skb_put(skb, mpalen);
631 memset(mpa, 0, sizeof(*mpa));
632 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
633 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
634 (markers_enabled ? MPA_MARKERS : 0);
635 mpa->revision = mpa_rev;
636 mpa->private_data_size = htons(plen);
638 memcpy(mpa->private_data, pdata, plen);
641 * Reference the mpa skb. This ensures the data area
642 * will remain in memory until the hw acks the tx.
643 * Function tx_ack() will deref it.
646 set_arp_failure_handler(skb, arp_failure_discard);
647 skb_reset_transport_header(skb);
649 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
650 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
651 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
652 req->len = htonl(len);
653 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
654 V_TX_SNDBUF(snd_win>>15));
655 req->flags = htonl(F_TX_INIT);
656 req->sndseq = htonl(ep->snd_seq);
658 state_set(&ep->com, MPA_REP_SENT);
659 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
662 static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
664 struct iwch_ep *ep = ctx;
665 struct cpl_act_establish *req = cplhdr(skb);
666 unsigned int tid = GET_TID(req);
668 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
670 dst_confirm(ep->dst);
672 /* setup the hwtid for this connection */
674 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
676 ep->snd_seq = ntohl(req->snd_isn);
677 ep->rcv_seq = ntohl(req->rcv_isn);
679 set_emss(ep, ntohs(req->tcp_opt));
681 /* dealloc the atid */
682 cxgb3_free_atid(ep->com.tdev, ep->atid);
684 /* start MPA negotiation */
685 send_mpa_req(ep, skb);
690 static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
692 PDBG("%s ep %p\n", __FILE__, ep);
693 state_set(&ep->com, ABORTING);
694 send_abort(ep, skb, gfp);
697 static void close_complete_upcall(struct iwch_ep *ep)
699 struct iw_cm_event event;
701 PDBG("%s ep %p\n", __func__, ep);
702 memset(&event, 0, sizeof(event));
703 event.event = IW_CM_EVENT_CLOSE;
705 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
706 ep, ep->com.cm_id, ep->hwtid);
707 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
708 ep->com.cm_id->rem_ref(ep->com.cm_id);
709 ep->com.cm_id = NULL;
714 static void peer_close_upcall(struct iwch_ep *ep)
716 struct iw_cm_event event;
718 PDBG("%s ep %p\n", __func__, ep);
719 memset(&event, 0, sizeof(event));
720 event.event = IW_CM_EVENT_DISCONNECT;
722 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
723 ep, ep->com.cm_id, ep->hwtid);
724 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
728 static void peer_abort_upcall(struct iwch_ep *ep)
730 struct iw_cm_event event;
732 PDBG("%s ep %p\n", __func__, ep);
733 memset(&event, 0, sizeof(event));
734 event.event = IW_CM_EVENT_CLOSE;
735 event.status = -ECONNRESET;
737 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
738 ep->com.cm_id, ep->hwtid);
739 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
740 ep->com.cm_id->rem_ref(ep->com.cm_id);
741 ep->com.cm_id = NULL;
746 static void connect_reply_upcall(struct iwch_ep *ep, int status)
748 struct iw_cm_event event;
750 PDBG("%s ep %p status %d\n", __func__, ep, status);
751 memset(&event, 0, sizeof(event));
752 event.event = IW_CM_EVENT_CONNECT_REPLY;
753 event.status = status;
754 event.local_addr = ep->com.local_addr;
755 event.remote_addr = ep->com.remote_addr;
757 if ((status == 0) || (status == -ECONNREFUSED)) {
758 event.private_data_len = ep->plen;
759 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
762 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
764 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
767 ep->com.cm_id->rem_ref(ep->com.cm_id);
768 ep->com.cm_id = NULL;
773 static void connect_request_upcall(struct iwch_ep *ep)
775 struct iw_cm_event event;
777 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
778 memset(&event, 0, sizeof(event));
779 event.event = IW_CM_EVENT_CONNECT_REQUEST;
780 event.local_addr = ep->com.local_addr;
781 event.remote_addr = ep->com.remote_addr;
782 event.private_data_len = ep->plen;
783 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
784 event.provider_data = ep;
785 if (state_read(&ep->parent_ep->com) != DEAD)
786 ep->parent_ep->com.cm_id->event_handler(
787 ep->parent_ep->com.cm_id,
789 put_ep(&ep->parent_ep->com);
790 ep->parent_ep = NULL;
793 static void established_upcall(struct iwch_ep *ep)
795 struct iw_cm_event event;
797 PDBG("%s ep %p\n", __func__, ep);
798 memset(&event, 0, sizeof(event));
799 event.event = IW_CM_EVENT_ESTABLISHED;
801 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
802 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
806 static int update_rx_credits(struct iwch_ep *ep, u32 credits)
808 struct cpl_rx_data_ack *req;
811 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
812 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
814 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
818 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
819 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
820 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
821 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
822 skb->priority = CPL_PRIORITY_ACK;
823 iwch_cxgb3_ofld_send(ep->com.tdev, skb);
827 static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
829 struct mpa_message *mpa;
831 struct iwch_qp_attributes attrs;
832 enum iwch_qp_attr_mask mask;
835 PDBG("%s ep %p\n", __func__, ep);
838 * Stop mpa timer. If it expired, then the state has
839 * changed and we bail since ep_timeout already aborted
843 if (state_read(&ep->com) != MPA_REQ_SENT)
847 * If we get more than the supported amount of private data
848 * then we must fail this connection.
850 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
856 * copy the new data into our accumulation buffer.
858 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
860 ep->mpa_pkt_len += skb->len;
863 * if we don't even have the mpa message, then bail.
865 if (ep->mpa_pkt_len < sizeof(*mpa))
867 mpa = (struct mpa_message *) ep->mpa_pkt;
869 /* Validate MPA header. */
870 if (mpa->revision != mpa_rev) {
874 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
879 plen = ntohs(mpa->private_data_size);
882 * Fail if there's too much private data.
884 if (plen > MPA_MAX_PRIVATE_DATA) {
890 * If plen does not account for pkt size
892 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
897 ep->plen = (u8) plen;
900 * If we don't have all the pdata yet, then bail.
901 * We'll continue process when more data arrives.
903 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
906 if (mpa->flags & MPA_REJECT) {
912 * If we get here we have accumulated the entire mpa
913 * start reply message including private data. And
914 * the MPA header is valid.
916 state_set(&ep->com, FPDU_MODE);
917 ep->mpa_attr.initiator = 1;
918 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
919 ep->mpa_attr.recv_marker_enabled = markers_enabled;
920 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
921 ep->mpa_attr.version = mpa_rev;
922 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
923 "xmit_marker_enabled=%d, version=%d\n", __func__,
924 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
925 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
927 attrs.mpa_attr = ep->mpa_attr;
928 attrs.max_ird = ep->ird;
929 attrs.max_ord = ep->ord;
930 attrs.llp_stream_handle = ep;
931 attrs.next_state = IWCH_QP_STATE_RTS;
933 mask = IWCH_QP_ATTR_NEXT_STATE |
934 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
935 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
937 /* bind QP and TID with INIT_WR */
938 err = iwch_modify_qp(ep->com.qp->rhp,
939 ep->com.qp, mask, &attrs, 1);
943 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
944 iwch_post_zb_read(ep->com.qp);
949 abort_connection(ep, skb, GFP_KERNEL);
951 connect_reply_upcall(ep, err);
955 static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
957 struct mpa_message *mpa;
960 PDBG("%s ep %p\n", __func__, ep);
963 * Stop mpa timer. If it expired, then the state has
964 * changed and we bail since ep_timeout already aborted
968 if (state_read(&ep->com) != MPA_REQ_WAIT)
972 * If we get more than the supported amount of private data
973 * then we must fail this connection.
975 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
976 abort_connection(ep, skb, GFP_KERNEL);
980 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
983 * Copy the new data into our accumulation buffer.
985 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
987 ep->mpa_pkt_len += skb->len;
990 * If we don't even have the mpa message, then bail.
991 * We'll continue process when more data arrives.
993 if (ep->mpa_pkt_len < sizeof(*mpa))
995 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
996 mpa = (struct mpa_message *) ep->mpa_pkt;
999 * Validate MPA Header.
1001 if (mpa->revision != mpa_rev) {
1002 abort_connection(ep, skb, GFP_KERNEL);
1006 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1007 abort_connection(ep, skb, GFP_KERNEL);
1011 plen = ntohs(mpa->private_data_size);
1014 * Fail if there's too much private data.
1016 if (plen > MPA_MAX_PRIVATE_DATA) {
1017 abort_connection(ep, skb, GFP_KERNEL);
1022 * If plen does not account for pkt size
1024 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1025 abort_connection(ep, skb, GFP_KERNEL);
1028 ep->plen = (u8) plen;
1031 * If we don't have all the pdata yet, then bail.
1033 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1037 * If we get here we have accumulated the entire mpa
1038 * start reply message including private data.
1040 ep->mpa_attr.initiator = 0;
1041 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1042 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1043 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1044 ep->mpa_attr.version = mpa_rev;
1045 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1046 "xmit_marker_enabled=%d, version=%d\n", __func__,
1047 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1048 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1050 state_set(&ep->com, MPA_REQ_RCVD);
1053 connect_request_upcall(ep);
1057 static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1059 struct iwch_ep *ep = ctx;
1060 struct cpl_rx_data *hdr = cplhdr(skb);
1061 unsigned int dlen = ntohs(hdr->len);
1063 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1065 skb_pull(skb, sizeof(*hdr));
1066 skb_trim(skb, dlen);
1068 ep->rcv_seq += dlen;
1069 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1071 switch (state_read(&ep->com)) {
1073 process_mpa_reply(ep, skb);
1076 process_mpa_request(ep, skb);
1081 printk(KERN_ERR MOD "%s Unexpected streaming data."
1082 " ep %p state %d tid %d\n",
1083 __func__, ep, state_read(&ep->com), ep->hwtid);
1086 * The ep will timeout and inform the ULP of the failure.
1092 /* update RX credits */
1093 update_rx_credits(ep, dlen);
1095 return CPL_RET_BUF_DONE;
1099 * Upcall from the adapter indicating data has been transmitted.
1100 * For us its just the single MPA request or reply. We can now free
1101 * the skb holding the mpa message.
1103 static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1105 struct iwch_ep *ep = ctx;
1106 struct cpl_wr_ack *hdr = cplhdr(skb);
1107 unsigned int credits = ntohs(hdr->credits);
1109 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1112 PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
1113 __func__, ep, state_read(&ep->com));
1114 return CPL_RET_BUF_DONE;
1117 BUG_ON(credits != 1);
1118 dst_confirm(ep->dst);
1120 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1121 __func__, ep, state_read(&ep->com));
1122 if (ep->mpa_attr.initiator) {
1123 PDBG("%s initiator ep %p state %u\n",
1124 __func__, ep, state_read(&ep->com));
1126 iwch_post_zb_read(ep->com.qp);
1128 PDBG("%s responder ep %p state %u\n",
1129 __func__, ep, state_read(&ep->com));
1130 ep->com.rpl_done = 1;
1131 wake_up(&ep->com.waitq);
1134 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1135 __func__, ep, state_read(&ep->com));
1136 kfree_skb(ep->mpa_skb);
1139 return CPL_RET_BUF_DONE;
1142 static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1144 struct iwch_ep *ep = ctx;
1145 unsigned long flags;
1148 PDBG("%s ep %p\n", __func__, ep);
1152 * We get 2 abort replies from the HW. The first one must
1153 * be ignored except for scribbling that we need one more.
1155 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
1156 ep->flags |= ABORT_REQ_IN_PROGRESS;
1157 return CPL_RET_BUF_DONE;
1160 spin_lock_irqsave(&ep->com.lock, flags);
1161 switch (ep->com.state) {
1163 close_complete_upcall(ep);
1164 __state_set(&ep->com, DEAD);
1168 printk(KERN_ERR "%s ep %p state %d\n",
1169 __func__, ep, ep->com.state);
1172 spin_unlock_irqrestore(&ep->com.lock, flags);
1175 release_ep_resources(ep);
1176 return CPL_RET_BUF_DONE;
1180 * Return whether a failed active open has allocated a TID
1182 static inline int act_open_has_tid(int status)
1184 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1185 status != CPL_ERR_ARP_MISS;
1188 static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1190 struct iwch_ep *ep = ctx;
1191 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1193 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1194 status2errno(rpl->status));
1195 connect_reply_upcall(ep, status2errno(rpl->status));
1196 state_set(&ep->com, DEAD);
1197 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1198 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1199 cxgb3_free_atid(ep->com.tdev, ep->atid);
1200 dst_release(ep->dst);
1201 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1203 return CPL_RET_BUF_DONE;
1206 static int listen_start(struct iwch_listen_ep *ep)
1208 struct sk_buff *skb;
1209 struct cpl_pass_open_req *req;
1211 PDBG("%s ep %p\n", __func__, ep);
1212 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1214 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1218 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1219 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1220 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1221 req->local_port = ep->com.local_addr.sin_port;
1222 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1225 req->peer_netmask = 0;
1226 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1227 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1228 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1231 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1234 static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1236 struct iwch_listen_ep *ep = ctx;
1237 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1239 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1240 rpl->status, status2errno(rpl->status));
1241 ep->com.rpl_err = status2errno(rpl->status);
1242 ep->com.rpl_done = 1;
1243 wake_up(&ep->com.waitq);
1245 return CPL_RET_BUF_DONE;
1248 static int listen_stop(struct iwch_listen_ep *ep)
1250 struct sk_buff *skb;
1251 struct cpl_close_listserv_req *req;
1253 PDBG("%s ep %p\n", __func__, ep);
1254 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1256 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1259 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1260 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1262 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1264 return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
1267 static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1270 struct iwch_listen_ep *ep = ctx;
1271 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1273 PDBG("%s ep %p\n", __func__, ep);
1274 ep->com.rpl_err = status2errno(rpl->status);
1275 ep->com.rpl_done = 1;
1276 wake_up(&ep->com.waitq);
1277 return CPL_RET_BUF_DONE;
1280 static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1282 struct cpl_pass_accept_rpl *rpl;
1283 unsigned int mtu_idx;
1284 u32 opt0h, opt0l, opt2;
1287 PDBG("%s ep %p\n", __func__, ep);
1288 BUG_ON(skb_cloned(skb));
1289 skb_trim(skb, sizeof(*rpl));
1291 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1292 wscale = compute_wscale(rcv_win);
1293 opt0h = V_NAGLE(0) |
1297 V_WND_SCALE(wscale) |
1298 V_MSS_IDX(mtu_idx) |
1299 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1300 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1301 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1304 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1305 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1306 rpl->peer_ip = peer_ip;
1307 rpl->opt0h = htonl(opt0h);
1308 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1309 rpl->opt2 = htonl(opt2);
1310 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1311 skb->priority = CPL_PRIORITY_SETUP;
1312 iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
1317 static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1318 struct sk_buff *skb)
1320 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1322 BUG_ON(skb_cloned(skb));
1323 skb_trim(skb, sizeof(struct cpl_tid_release));
1326 if (tdev->type != T3A)
1327 release_tid(tdev, hwtid, skb);
1329 struct cpl_pass_accept_rpl *rpl;
1332 skb->priority = CPL_PRIORITY_SETUP;
1333 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1334 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1336 rpl->peer_ip = peer_ip;
1337 rpl->opt0h = htonl(F_TCAM_BYPASS);
1338 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1340 rpl->rsvd = rpl->opt2;
1341 iwch_cxgb3_ofld_send(tdev, skb);
1345 static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1347 struct iwch_ep *child_ep, *parent_ep = ctx;
1348 struct cpl_pass_accept_req *req = cplhdr(skb);
1349 unsigned int hwtid = GET_TID(req);
1350 struct dst_entry *dst;
1351 struct l2t_entry *l2t;
1355 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1357 if (state_read(&parent_ep->com) != LISTEN) {
1358 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1364 * Find the netdev for this connection request.
1366 tim.mac_addr = req->dst_mac;
1367 tim.vlan_tag = ntohs(req->vlan_tag);
1368 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1370 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1381 /* Find output route */
1382 rt = find_route(tdev,
1386 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1388 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1393 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1395 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1400 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1402 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1404 l2t_release(L2DATA(tdev), l2t);
1408 state_set(&child_ep->com, CONNECTING);
1409 child_ep->com.tdev = tdev;
1410 child_ep->com.cm_id = NULL;
1411 child_ep->com.local_addr.sin_family = PF_INET;
1412 child_ep->com.local_addr.sin_port = req->local_port;
1413 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1414 child_ep->com.remote_addr.sin_family = PF_INET;
1415 child_ep->com.remote_addr.sin_port = req->peer_port;
1416 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1417 get_ep(&parent_ep->com);
1418 child_ep->parent_ep = parent_ep;
1419 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1420 child_ep->l2t = l2t;
1421 child_ep->dst = dst;
1422 child_ep->hwtid = hwtid;
1423 init_timer(&child_ep->timer);
1424 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1425 accept_cr(child_ep, req->peer_ip, skb);
1428 reject_cr(tdev, hwtid, req->peer_ip, skb);
1430 return CPL_RET_BUF_DONE;
1433 static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1435 struct iwch_ep *ep = ctx;
1436 struct cpl_pass_establish *req = cplhdr(skb);
1438 PDBG("%s ep %p\n", __func__, ep);
1439 ep->snd_seq = ntohl(req->snd_isn);
1440 ep->rcv_seq = ntohl(req->rcv_isn);
1442 set_emss(ep, ntohs(req->tcp_opt));
1444 dst_confirm(ep->dst);
1445 state_set(&ep->com, MPA_REQ_WAIT);
1448 return CPL_RET_BUF_DONE;
1451 static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1453 struct iwch_ep *ep = ctx;
1454 struct iwch_qp_attributes attrs;
1455 unsigned long flags;
1459 PDBG("%s ep %p\n", __func__, ep);
1460 dst_confirm(ep->dst);
1462 spin_lock_irqsave(&ep->com.lock, flags);
1463 switch (ep->com.state) {
1465 __state_set(&ep->com, CLOSING);
1468 __state_set(&ep->com, CLOSING);
1469 connect_reply_upcall(ep, -ECONNRESET);
1474 * We're gonna mark this puppy DEAD, but keep
1475 * the reference on it until the ULP accepts or
1478 __state_set(&ep->com, CLOSING);
1482 __state_set(&ep->com, CLOSING);
1483 ep->com.rpl_done = 1;
1484 ep->com.rpl_err = -ECONNRESET;
1485 PDBG("waking up ep %p\n", ep);
1486 wake_up(&ep->com.waitq);
1490 __state_set(&ep->com, CLOSING);
1491 attrs.next_state = IWCH_QP_STATE_CLOSING;
1492 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1493 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1494 peer_close_upcall(ep);
1500 __state_set(&ep->com, MORIBUND);
1505 if (ep->com.cm_id && ep->com.qp) {
1506 attrs.next_state = IWCH_QP_STATE_IDLE;
1507 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1508 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1510 close_complete_upcall(ep);
1511 __state_set(&ep->com, DEAD);
1521 spin_unlock_irqrestore(&ep->com.lock, flags);
1523 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1525 release_ep_resources(ep);
1526 return CPL_RET_BUF_DONE;
1530 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1532 static int is_neg_adv_abort(unsigned int status)
1534 return status == CPL_ERR_RTX_NEG_ADVICE ||
1535 status == CPL_ERR_PERSIST_NEG_ADVICE;
1538 static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1540 struct cpl_abort_req_rss *req = cplhdr(skb);
1541 struct iwch_ep *ep = ctx;
1542 struct cpl_abort_rpl *rpl;
1543 struct sk_buff *rpl_skb;
1544 struct iwch_qp_attributes attrs;
1547 unsigned long flags;
1549 if (is_neg_adv_abort(req->status)) {
1550 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1552 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1553 return CPL_RET_BUF_DONE;
1557 * We get 2 peer aborts from the HW. The first one must
1558 * be ignored except for scribbling that we need one more.
1560 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
1561 ep->flags |= PEER_ABORT_IN_PROGRESS;
1562 return CPL_RET_BUF_DONE;
1565 spin_lock_irqsave(&ep->com.lock, flags);
1566 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1567 switch (ep->com.state) {
1575 connect_reply_upcall(ep, -ECONNRESET);
1578 ep->com.rpl_done = 1;
1579 ep->com.rpl_err = -ECONNRESET;
1580 PDBG("waking up ep %p\n", ep);
1581 wake_up(&ep->com.waitq);
1586 * We're gonna mark this puppy DEAD, but keep
1587 * the reference on it until the ULP accepts or
1597 if (ep->com.cm_id && ep->com.qp) {
1598 attrs.next_state = IWCH_QP_STATE_ERROR;
1599 ret = iwch_modify_qp(ep->com.qp->rhp,
1600 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1604 "%s - qp <- error failed!\n",
1607 peer_abort_upcall(ep);
1612 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1613 spin_unlock_irqrestore(&ep->com.lock, flags);
1614 return CPL_RET_BUF_DONE;
1619 dst_confirm(ep->dst);
1620 if (ep->com.state != ABORTING) {
1621 __state_set(&ep->com, DEAD);
1624 spin_unlock_irqrestore(&ep->com.lock, flags);
1626 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1628 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1633 rpl_skb->priority = CPL_PRIORITY_DATA;
1634 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1635 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1636 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1637 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1638 rpl->cmd = CPL_ABORT_NO_RST;
1639 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1642 release_ep_resources(ep);
1643 return CPL_RET_BUF_DONE;
1646 static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1648 struct iwch_ep *ep = ctx;
1649 struct iwch_qp_attributes attrs;
1650 unsigned long flags;
1653 PDBG("%s ep %p\n", __func__, ep);
1656 /* The cm_id may be null if we failed to connect */
1657 spin_lock_irqsave(&ep->com.lock, flags);
1658 switch (ep->com.state) {
1660 __state_set(&ep->com, MORIBUND);
1664 if ((ep->com.cm_id) && (ep->com.qp)) {
1665 attrs.next_state = IWCH_QP_STATE_IDLE;
1666 iwch_modify_qp(ep->com.qp->rhp,
1668 IWCH_QP_ATTR_NEXT_STATE,
1671 close_complete_upcall(ep);
1672 __state_set(&ep->com, DEAD);
1682 spin_unlock_irqrestore(&ep->com.lock, flags);
1684 release_ep_resources(ep);
1685 return CPL_RET_BUF_DONE;
1689 * T3A does 3 things when a TERM is received:
1690 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1691 * 2) generate an async event on the QP with the TERMINATE opcode
1692 * 3) post a TERMINATE opcde cqe into the associated CQ.
1694 * For (1), we save the message in the qp for later consumer consumption.
1695 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1696 * For (3), we toss the CQE in cxio_poll_cq().
1698 * terminate() handles case (1)...
1700 static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1702 struct iwch_ep *ep = ctx;
1704 if (state_read(&ep->com) != FPDU_MODE)
1705 return CPL_RET_BUF_DONE;
1707 PDBG("%s ep %p\n", __func__, ep);
1708 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1709 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1710 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1712 ep->com.qp->attr.terminate_msg_len = skb->len;
1713 ep->com.qp->attr.is_terminate_local = 0;
1714 return CPL_RET_BUF_DONE;
1717 static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1719 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1720 struct iwch_ep *ep = ctx;
1722 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1725 struct iwch_qp_attributes attrs;
1727 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1728 __func__, ep->hwtid);
1730 attrs.next_state = IWCH_QP_STATE_ERROR;
1731 iwch_modify_qp(ep->com.qp->rhp,
1732 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1734 abort_connection(ep, NULL, GFP_KERNEL);
1736 return CPL_RET_BUF_DONE;
1739 static void ep_timeout(unsigned long arg)
1741 struct iwch_ep *ep = (struct iwch_ep *)arg;
1742 struct iwch_qp_attributes attrs;
1743 unsigned long flags;
1746 spin_lock_irqsave(&ep->com.lock, flags);
1747 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1749 switch (ep->com.state) {
1751 __state_set(&ep->com, ABORTING);
1752 connect_reply_upcall(ep, -ETIMEDOUT);
1755 __state_set(&ep->com, ABORTING);
1759 if (ep->com.cm_id && ep->com.qp) {
1760 attrs.next_state = IWCH_QP_STATE_ERROR;
1761 iwch_modify_qp(ep->com.qp->rhp,
1762 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1765 __state_set(&ep->com, ABORTING);
1768 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1769 __func__, ep, ep->com.state);
1773 spin_unlock_irqrestore(&ep->com.lock, flags);
1775 abort_connection(ep, NULL, GFP_ATOMIC);
1779 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1782 struct iwch_ep *ep = to_ep(cm_id);
1783 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1785 if (state_read(&ep->com) == DEAD) {
1789 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1791 abort_connection(ep, NULL, GFP_KERNEL);
1793 err = send_mpa_reject(ep, pdata, pdata_len);
1794 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1799 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1802 struct iwch_qp_attributes attrs;
1803 enum iwch_qp_attr_mask mask;
1804 struct iwch_ep *ep = to_ep(cm_id);
1805 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1806 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1808 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1809 if (state_read(&ep->com) == DEAD)
1812 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1815 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1816 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1817 abort_connection(ep, NULL, GFP_KERNEL);
1821 cm_id->add_ref(cm_id);
1822 ep->com.cm_id = cm_id;
1825 ep->com.rpl_done = 0;
1826 ep->com.rpl_err = 0;
1827 ep->ird = conn_param->ird;
1828 ep->ord = conn_param->ord;
1829 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1833 /* bind QP to EP and move to RTS */
1834 attrs.mpa_attr = ep->mpa_attr;
1835 attrs.max_ird = ep->ird;
1836 attrs.max_ord = ep->ord;
1837 attrs.llp_stream_handle = ep;
1838 attrs.next_state = IWCH_QP_STATE_RTS;
1840 /* bind QP and TID with INIT_WR */
1841 mask = IWCH_QP_ATTR_NEXT_STATE |
1842 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1843 IWCH_QP_ATTR_MPA_ATTR |
1844 IWCH_QP_ATTR_MAX_IRD |
1845 IWCH_QP_ATTR_MAX_ORD;
1847 err = iwch_modify_qp(ep->com.qp->rhp,
1848 ep->com.qp, mask, &attrs, 1);
1852 /* if needed, wait for wr_ack */
1853 if (iwch_rqes_posted(qp)) {
1854 wait_event(ep->com.waitq, ep->com.rpl_done);
1855 err = ep->com.rpl_err;
1860 err = send_mpa_reply(ep, conn_param->private_data,
1861 conn_param->private_data_len);
1866 state_set(&ep->com, FPDU_MODE);
1867 established_upcall(ep);
1871 ep->com.cm_id = NULL;
1873 cm_id->rem_ref(cm_id);
1878 static int is_loopback_dst(struct iw_cm_id *cm_id)
1880 struct net_device *dev;
1882 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1889 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1892 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1896 if (is_loopback_dst(cm_id)) {
1901 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1903 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1907 init_timer(&ep->timer);
1908 ep->plen = conn_param->private_data_len;
1910 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1911 conn_param->private_data, ep->plen);
1912 ep->ird = conn_param->ird;
1913 ep->ord = conn_param->ord;
1914 ep->com.tdev = h->rdev.t3cdev_p;
1916 cm_id->add_ref(cm_id);
1917 ep->com.cm_id = cm_id;
1918 ep->com.qp = get_qhp(h, conn_param->qpn);
1919 BUG_ON(!ep->com.qp);
1920 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1924 * Allocate an active TID to initiate a TCP connection.
1926 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1927 if (ep->atid == -1) {
1928 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1934 rt = find_route(h->rdev.t3cdev_p,
1935 cm_id->local_addr.sin_addr.s_addr,
1936 cm_id->remote_addr.sin_addr.s_addr,
1937 cm_id->local_addr.sin_port,
1938 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1940 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1941 err = -EHOSTUNREACH;
1944 ep->dst = &rt->u.dst;
1946 /* get a l2t entry */
1947 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1948 ep->dst->neighbour->dev);
1950 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1955 state_set(&ep->com, CONNECTING);
1956 ep->tos = IPTOS_LOWDELAY;
1957 ep->com.local_addr = cm_id->local_addr;
1958 ep->com.remote_addr = cm_id->remote_addr;
1960 /* send connect request to rnic */
1961 err = send_connect(ep);
1965 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1967 dst_release(ep->dst);
1969 cxgb3_free_atid(ep->com.tdev, ep->atid);
1971 cm_id->rem_ref(cm_id);
1977 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1980 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1981 struct iwch_listen_ep *ep;
1986 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1988 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1992 PDBG("%s ep %p\n", __func__, ep);
1993 ep->com.tdev = h->rdev.t3cdev_p;
1994 cm_id->add_ref(cm_id);
1995 ep->com.cm_id = cm_id;
1996 ep->backlog = backlog;
1997 ep->com.local_addr = cm_id->local_addr;
2000 * Allocate a server TID.
2002 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
2003 if (ep->stid == -1) {
2004 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2009 state_set(&ep->com, LISTEN);
2010 err = listen_start(ep);
2014 /* wait for pass_open_rpl */
2015 wait_event(ep->com.waitq, ep->com.rpl_done);
2016 err = ep->com.rpl_err;
2018 cm_id->provider_data = ep;
2022 cxgb3_free_stid(ep->com.tdev, ep->stid);
2024 cm_id->rem_ref(cm_id);
2031 int iwch_destroy_listen(struct iw_cm_id *cm_id)
2034 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
2036 PDBG("%s ep %p\n", __func__, ep);
2039 state_set(&ep->com, DEAD);
2040 ep->com.rpl_done = 0;
2041 ep->com.rpl_err = 0;
2042 err = listen_stop(ep);
2045 wait_event(ep->com.waitq, ep->com.rpl_done);
2046 cxgb3_free_stid(ep->com.tdev, ep->stid);
2048 err = ep->com.rpl_err;
2049 cm_id->rem_ref(cm_id);
2054 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2057 unsigned long flags;
2060 struct t3cdev *tdev;
2061 struct cxio_rdev *rdev;
2063 spin_lock_irqsave(&ep->com.lock, flags);
2065 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2066 states[ep->com.state], abrupt);
2068 tdev = (struct t3cdev *)ep->com.tdev;
2069 rdev = (struct cxio_rdev *)tdev->ulp;
2070 if (cxio_fatal_error(rdev)) {
2072 close_complete_upcall(ep);
2073 ep->com.state = DEAD;
2075 switch (ep->com.state) {
2083 ep->com.state = ABORTING;
2085 ep->com.state = CLOSING;
2093 ep->com.state = ABORTING;
2095 ep->com.state = MORIBUND;
2100 PDBG("%s ignoring disconnect ep %p state %u\n",
2101 __func__, ep, ep->com.state);
2108 spin_unlock_irqrestore(&ep->com.lock, flags);
2111 ret = send_abort(ep, NULL, gfp);
2113 ret = send_halfclose(ep, gfp);
2118 release_ep_resources(ep);
2122 int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2123 struct l2t_entry *l2t)
2125 struct iwch_ep *ep = ctx;
2130 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2133 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2141 * All the CM events are handled on a work queue to have a safe context.
2143 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2145 struct iwch_ep_common *epc = ctx;
2150 * Save ctx and tdev in the skb->cb area.
2152 *((void **) skb->cb) = ctx;
2153 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2156 * Queue the skb and schedule the worker thread.
2158 skb_queue_tail(&rxq, skb);
2159 queue_work(workq, &skb_work);
2163 static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2165 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2167 if (rpl->status != CPL_ERR_NONE) {
2168 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2169 "for tid %u\n", rpl->status, GET_TID(rpl));
2171 return CPL_RET_BUF_DONE;
2174 int __init iwch_cm_init(void)
2176 skb_queue_head_init(&rxq);
2178 workq = create_singlethread_workqueue("iw_cxgb3");
2183 * All upcalls from the T3 Core go to sched() to
2184 * schedule the processing on a work queue.
2186 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2187 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2188 t3c_handlers[CPL_RX_DATA] = sched;
2189 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2190 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2191 t3c_handlers[CPL_ABORT_RPL] = sched;
2192 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2193 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2194 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2195 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2196 t3c_handlers[CPL_PEER_CLOSE] = sched;
2197 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2198 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2199 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2200 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2201 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2204 * These are the real handlers that are called from a
2207 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2208 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2209 work_handlers[CPL_RX_DATA] = rx_data;
2210 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2211 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2212 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2213 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2214 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2215 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2216 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2217 work_handlers[CPL_PEER_CLOSE] = peer_close;
2218 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2219 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2220 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2221 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2225 void __exit iwch_cm_term(void)
2227 flush_workqueue(workq);
2228 destroy_workqueue(workq);