2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
27 #include <net/inet_common.h>
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
33 int sysctl_tcp_abort_on_overflow __read_mostly;
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 .hashinfo = &tcp_hashinfo,
40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 (unsigned long)&tcp_death_row),
42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row),
50 EXPORT_SYMBOL_GPL(tcp_death_row);
52 /* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
58 static bool tcp_remember_stamp(struct sock *sk)
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
64 peer = icsk->icsk_af_ops->get_peer(sk);
66 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
67 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
68 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
69 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
70 peer->tcp_ts = tp->rx_opt.ts_recent;
78 static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
80 const struct tcp_timewait_sock *tcptw;
81 struct sock *sk = (struct sock *) tw;
82 struct inet_peer *peer;
85 peer = tcptw->tw_peer;
87 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
88 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
89 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
90 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
91 peer->tcp_ts = tcptw->tw_ts_recent;
98 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
102 if (after(end_seq, s_win) && before(seq, e_win))
104 return seq == e_win && seq == end_seq;
108 * * Main purpose of TIME-WAIT state is to close connection gracefully,
109 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
110 * (and, probably, tail of data) and one or more our ACKs are lost.
111 * * What is TIME-WAIT timeout? It is associated with maximal packet
112 * lifetime in the internet, which results in wrong conclusion, that
113 * it is set to catch "old duplicate segments" wandering out of their path.
114 * It is not quite correct. This timeout is calculated so that it exceeds
115 * maximal retransmission timeout enough to allow to lose one (or more)
116 * segments sent by peer and our ACKs. This time may be calculated from RTO.
117 * * When TIME-WAIT socket receives RST, it means that another end
118 * finally closed and we are allowed to kill TIME-WAIT too.
119 * * Second purpose of TIME-WAIT is catching old duplicate segments.
120 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
121 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
122 * * If we invented some more clever way to catch duplicates
123 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
125 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
126 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
127 * from the very beginning.
129 * NOTE. With recycling (and later with fin-wait-2) TW bucket
130 * is _not_ stateless. It means, that strictly speaking we must
131 * spinlock it. I do not want! Well, probability of misbehaviour
132 * is ridiculously low and, seems, we could use some mb() tricks
133 * to avoid misread sequence numbers, states etc. --ANK
136 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
137 const struct tcphdr *th)
139 struct tcp_options_received tmp_opt;
140 const u8 *hash_location;
141 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
142 bool paws_reject = false;
144 tmp_opt.saw_tstamp = 0;
145 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
146 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
148 if (tmp_opt.saw_tstamp) {
149 tmp_opt.ts_recent = tcptw->tw_ts_recent;
150 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
151 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
155 if (tw->tw_substate == TCP_FIN_WAIT2) {
156 /* Just repeat all the checks of tcp_rcv_state_process() */
158 /* Out of window, send ACK */
160 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
162 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
168 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
173 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
174 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
176 return TCP_TW_SUCCESS;
179 /* New data or FIN. If new data arrive after half-duplex close,
183 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
185 inet_twsk_deschedule(tw, &tcp_death_row);
190 /* FIN arrived, enter true time-wait state. */
191 tw->tw_substate = TCP_TIME_WAIT;
192 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
193 if (tmp_opt.saw_tstamp) {
194 tcptw->tw_ts_recent_stamp = get_seconds();
195 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
198 if (tcp_death_row.sysctl_tw_recycle &&
199 tcptw->tw_ts_recent_stamp &&
200 tcp_tw_remember_stamp(tw))
201 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
204 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
210 * Now real TIME-WAIT state.
213 * "When a connection is [...] on TIME-WAIT state [...]
214 * [a TCP] MAY accept a new SYN from the remote TCP to
215 * reopen the connection directly, if it:
217 * (1) assigns its initial sequence number for the new
218 * connection to be larger than the largest sequence
219 * number it used on the previous connection incarnation,
222 * (2) returns to TIME-WAIT state if the SYN turns out
223 * to be an old duplicate".
227 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
228 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
229 /* In window segment, it may be only reset or bare ack. */
232 /* This is TIME_WAIT assassination, in two flavors.
233 * Oh well... nobody has a sufficient solution to this
236 if (sysctl_tcp_rfc1337 == 0) {
238 inet_twsk_deschedule(tw, &tcp_death_row);
240 return TCP_TW_SUCCESS;
243 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
246 if (tmp_opt.saw_tstamp) {
247 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
248 tcptw->tw_ts_recent_stamp = get_seconds();
252 return TCP_TW_SUCCESS;
255 /* Out of window segment.
257 All the segments are ACKed immediately.
259 The only exception is new SYN. We accept it, if it is
260 not old duplicate and we are not in danger to be killed
261 by delayed old duplicates. RFC check is that it has
262 newer sequence number works at rates <40Mbit/sec.
263 However, if paws works, it is reliable AND even more,
264 we even may relax silly seq space cutoff.
266 RED-PEN: we violate main RFC requirement, if this SYN will appear
267 old duplicate (i.e. we receive RST in reply to SYN-ACK),
268 we must return socket to time-wait state. It is not good,
272 if (th->syn && !th->rst && !th->ack && !paws_reject &&
273 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
274 (tmp_opt.saw_tstamp &&
275 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
276 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
279 TCP_SKB_CB(skb)->when = isn;
284 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
287 /* In this case we must reset the TIMEWAIT timer.
289 * If it is ACKless SYN it may be both old duplicate
290 * and new good SYN with random sequence number <rcv_nxt.
291 * Do not reschedule in the last case.
293 if (paws_reject || th->ack)
294 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
297 /* Send ACK. Note, we do not put the bucket,
298 * it will be released by caller.
303 return TCP_TW_SUCCESS;
305 EXPORT_SYMBOL(tcp_timewait_state_process);
308 * Move a socket to time-wait or dead fin-wait-2 state.
310 void tcp_time_wait(struct sock *sk, int state, int timeo)
312 struct inet_timewait_sock *tw = NULL;
313 const struct inet_connection_sock *icsk = inet_csk(sk);
314 const struct tcp_sock *tp = tcp_sk(sk);
315 bool recycle_ok = false;
316 bool recycle_on = false;
318 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) {
319 recycle_ok = tcp_remember_stamp(sk);
323 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
324 tw = inet_twsk_alloc(sk, state);
327 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
328 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
329 struct inet_sock *inet = inet_sk(sk);
330 struct inet_peer *peer = NULL;
332 tw->tw_transparent = inet->transparent;
333 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
334 tcptw->tw_rcv_nxt = tp->rcv_nxt;
335 tcptw->tw_snd_nxt = tp->snd_nxt;
336 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
337 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
338 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
340 #if IS_ENABLED(CONFIG_IPV6)
341 if (tw->tw_family == PF_INET6) {
342 struct ipv6_pinfo *np = inet6_sk(sk);
343 struct inet6_timewait_sock *tw6;
345 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
346 tw6 = inet6_twsk((struct sock *)tw);
347 tw6->tw_v6_daddr = np->daddr;
348 tw6->tw_v6_rcv_saddr = np->rcv_saddr;
349 tw->tw_tclass = np->tclass;
350 tw->tw_ipv6only = np->ipv6only;
355 peer = icsk->icsk_af_ops->get_peer(sk);
356 tcptw->tw_peer = peer;
358 atomic_inc(&peer->refcnt);
360 #ifdef CONFIG_TCP_MD5SIG
362 * The timewait bucket does not have the key DB from the
363 * sock structure. We just make a quick copy of the
364 * md5 key being used (if indeed we are using one)
365 * so the timewait ack generating code has the key.
368 struct tcp_md5sig_key *key;
369 tcptw->tw_md5_key = NULL;
370 key = tp->af_specific->md5_lookup(sk, sk);
372 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
373 if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
379 /* Linkage updates. */
380 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
382 /* Get the TIME_WAIT timeout firing. */
387 tw->tw_timeout = rto;
389 tw->tw_timeout = TCP_TIMEWAIT_LEN;
390 if (state == TCP_TIME_WAIT)
391 timeo = TCP_TIMEWAIT_LEN;
394 inet_twsk_schedule(tw, &tcp_death_row, timeo,
398 /* Sorry, if we're out of memory, just CLOSE this
399 * socket up. We've got bigger problems than
400 * non-graceful socket closings.
402 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
405 tcp_update_metrics(sk);
409 void tcp_twsk_destructor(struct sock *sk)
411 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
414 inet_putpeer(twsk->tw_peer);
415 #ifdef CONFIG_TCP_MD5SIG
416 if (twsk->tw_md5_key) {
417 tcp_free_md5sig_pool();
418 kfree_rcu(twsk->tw_md5_key, rcu);
422 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
424 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
425 struct request_sock *req)
427 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
430 /* This is not only more efficient than what we used to do, it eliminates
431 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
433 * Actually, we could lots of memory writes here. tp of listening
434 * socket contains all necessary default parameters.
436 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
438 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
441 const struct inet_request_sock *ireq = inet_rsk(req);
442 struct tcp_request_sock *treq = tcp_rsk(req);
443 struct inet_connection_sock *newicsk = inet_csk(newsk);
444 struct tcp_sock *newtp = tcp_sk(newsk);
445 struct tcp_sock *oldtp = tcp_sk(sk);
446 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
448 newsk->sk_rx_dst = dst_clone(skb_dst(skb));
450 /* TCP Cookie Transactions require space for the cookie pair,
451 * as it differs for each connection. There is no need to
452 * copy any s_data_payload stored at the original socket.
453 * Failure will prevent resuming the connection.
455 * Presumed copied, in order of appearance:
456 * cookie_in_always, cookie_out_never
458 if (oldcvp != NULL) {
459 struct tcp_cookie_values *newcvp =
460 kzalloc(sizeof(*newtp->cookie_values),
463 if (newcvp != NULL) {
464 kref_init(&newcvp->kref);
465 newcvp->cookie_desired =
466 oldcvp->cookie_desired;
467 newtp->cookie_values = newcvp;
469 /* Not Yet Implemented */
470 newtp->cookie_values = NULL;
474 /* Now setup tcp_sock */
475 newtp->pred_flags = 0;
477 newtp->rcv_wup = newtp->copied_seq =
478 newtp->rcv_nxt = treq->rcv_isn + 1;
480 newtp->snd_sml = newtp->snd_una =
481 newtp->snd_nxt = newtp->snd_up =
482 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
484 tcp_prequeue_init(newtp);
486 tcp_init_wl(newtp, treq->rcv_isn);
489 newtp->mdev = TCP_TIMEOUT_INIT;
490 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
492 newtp->packets_out = 0;
493 newtp->retrans_out = 0;
494 newtp->sacked_out = 0;
495 newtp->fackets_out = 0;
496 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
497 tcp_enable_early_retrans(newtp);
499 /* So many TCP implementations out there (incorrectly) count the
500 * initial SYN frame in their delayed-ACK and congestion control
501 * algorithms that we must have the following bandaid to talk
502 * efficiently to them. -DaveM
504 newtp->snd_cwnd = TCP_INIT_CWND;
505 newtp->snd_cwnd_cnt = 0;
506 newtp->bytes_acked = 0;
508 newtp->frto_counter = 0;
509 newtp->frto_highmark = 0;
511 if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
512 !try_module_get(newicsk->icsk_ca_ops->owner))
513 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
515 tcp_set_ca_state(newsk, TCP_CA_Open);
516 tcp_init_xmit_timers(newsk);
517 skb_queue_head_init(&newtp->out_of_order_queue);
518 newtp->write_seq = newtp->pushed_seq =
519 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
521 newtp->rx_opt.saw_tstamp = 0;
523 newtp->rx_opt.dsack = 0;
524 newtp->rx_opt.num_sacks = 0;
528 if (sock_flag(newsk, SOCK_KEEPOPEN))
529 inet_csk_reset_keepalive_timer(newsk,
530 keepalive_time_when(newtp));
532 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
533 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
535 tcp_enable_fack(newtp);
537 newtp->window_clamp = req->window_clamp;
538 newtp->rcv_ssthresh = req->rcv_wnd;
539 newtp->rcv_wnd = req->rcv_wnd;
540 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
541 if (newtp->rx_opt.wscale_ok) {
542 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
543 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
545 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
546 newtp->window_clamp = min(newtp->window_clamp, 65535U);
548 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
549 newtp->rx_opt.snd_wscale);
550 newtp->max_window = newtp->snd_wnd;
552 if (newtp->rx_opt.tstamp_ok) {
553 newtp->rx_opt.ts_recent = req->ts_recent;
554 newtp->rx_opt.ts_recent_stamp = get_seconds();
555 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
557 newtp->rx_opt.ts_recent_stamp = 0;
558 newtp->tcp_header_len = sizeof(struct tcphdr);
560 #ifdef CONFIG_TCP_MD5SIG
561 newtp->md5sig_info = NULL; /*XXX*/
562 if (newtp->af_specific->md5_lookup(sk, newsk))
563 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
565 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
566 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
567 newtp->rx_opt.mss_clamp = req->mss;
568 TCP_ECN_openreq_child(newtp, req);
570 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
574 EXPORT_SYMBOL(tcp_create_openreq_child);
577 * Process an incoming packet for SYN_RECV sockets represented
581 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
582 struct request_sock *req,
583 struct request_sock **prev)
585 struct tcp_options_received tmp_opt;
586 const u8 *hash_location;
588 const struct tcphdr *th = tcp_hdr(skb);
589 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
590 bool paws_reject = false;
592 tmp_opt.saw_tstamp = 0;
593 if (th->doff > (sizeof(struct tcphdr)>>2)) {
594 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
596 if (tmp_opt.saw_tstamp) {
597 tmp_opt.ts_recent = req->ts_recent;
598 /* We do not store true stamp, but it is not required,
599 * it can be estimated (approximately)
602 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
603 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
607 /* Check for pure retransmitted SYN. */
608 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
609 flg == TCP_FLAG_SYN &&
612 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
613 * this case on figure 6 and figure 8, but formal
614 * protocol description says NOTHING.
615 * To be more exact, it says that we should send ACK,
616 * because this segment (at least, if it has no data)
619 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
620 * describe SYN-RECV state. All the description
621 * is wrong, we cannot believe to it and should
622 * rely only on common sense and implementation
625 * Enforce "SYN-ACK" according to figure 8, figure 6
626 * of RFC793, fixed by RFC1122.
628 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
632 /* Further reproduces section "SEGMENT ARRIVES"
633 for state SYN-RECEIVED of RFC793.
634 It is broken, however, it does not work only
635 when SYNs are crossed.
637 You would think that SYN crossing is impossible here, since
638 we should have a SYN_SENT socket (from connect()) on our end,
639 but this is not true if the crossed SYNs were sent to both
640 ends by a malicious third party. We must defend against this,
641 and to do that we first verify the ACK (as per RFC793, page
642 36) and reset if it is invalid. Is this a true full defense?
643 To convince ourselves, let us consider a way in which the ACK
644 test can still pass in this 'malicious crossed SYNs' case.
645 Malicious sender sends identical SYNs (and thus identical sequence
646 numbers) to both A and B:
651 By our good fortune, both A and B select the same initial
652 send sequence number of seven :-)
654 A: sends SYN|ACK, seq=7, ack_seq=8
655 B: sends SYN|ACK, seq=7, ack_seq=8
657 So we are now A eating this SYN|ACK, ACK test passes. So
658 does sequence test, SYN is truncated, and thus we consider
661 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
662 bare ACK. Otherwise, we create an established connection. Both
663 ends (listening sockets) accept the new incoming connection and try
664 to talk to each other. 8-)
666 Note: This case is both harmless, and rare. Possibility is about the
667 same as us discovering intelligent life on another plant tomorrow.
669 But generally, we should (RFC lies!) to accept ACK
670 from SYNACK both here and in tcp_rcv_state_process().
671 tcp_rcv_state_process() does not, hence, we do not too.
673 Note that the case is absolutely generic:
674 we cannot optimize anything here without
675 violating protocol. All the checks must be made
676 before attempt to create socket.
679 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
680 * and the incoming segment acknowledges something not yet
681 * sent (the segment carries an unacceptable ACK) ...
684 * Invalid ACK: reset will be sent by listening socket
686 if ((flg & TCP_FLAG_ACK) &&
687 (TCP_SKB_CB(skb)->ack_seq !=
688 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
691 /* Also, it would be not so bad idea to check rcv_tsecr, which
692 * is essentially ACK extension and too early or too late values
693 * should cause reset in unsynchronized states.
696 /* RFC793: "first check sequence number". */
698 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
699 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
700 /* Out of window: send ACK and drop. */
701 if (!(flg & TCP_FLAG_RST))
702 req->rsk_ops->send_ack(sk, skb, req);
704 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
708 /* In sequence, PAWS is OK. */
710 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
711 req->ts_recent = tmp_opt.rcv_tsval;
713 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
714 /* Truncate SYN, it is out of window starting
715 at tcp_rsk(req)->rcv_isn + 1. */
716 flg &= ~TCP_FLAG_SYN;
719 /* RFC793: "second check the RST bit" and
720 * "fourth, check the SYN bit"
722 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
723 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
724 goto embryonic_reset;
727 /* ACK sequence verified above, just make sure ACK is
728 * set. If ACK not set, just silently drop the packet.
730 if (!(flg & TCP_FLAG_ACK))
733 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
734 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
735 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
736 inet_rsk(req)->acked = 1;
737 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
740 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
741 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
742 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
743 tcp_rsk(req)->snt_synack = 0;
745 /* OK, ACK is valid, create big socket and
746 * feed this segment to it. It will repeat all
747 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
748 * ESTABLISHED STATE. If it will be dropped after
749 * socket is created, wait for troubles.
751 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
753 goto listen_overflow;
755 inet_csk_reqsk_queue_unlink(sk, req, prev);
756 inet_csk_reqsk_queue_removed(sk, req);
758 inet_csk_reqsk_queue_add(sk, req, child);
762 if (!sysctl_tcp_abort_on_overflow) {
763 inet_rsk(req)->acked = 1;
768 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
769 if (!(flg & TCP_FLAG_RST))
770 req->rsk_ops->send_reset(sk, skb);
772 inet_csk_reqsk_queue_drop(sk, req, prev);
775 EXPORT_SYMBOL(tcp_check_req);
778 * Queue segment on the new socket if the new socket is active,
779 * otherwise we just shortcircuit this and continue with
783 int tcp_child_process(struct sock *parent, struct sock *child,
787 int state = child->sk_state;
789 if (!sock_owned_by_user(child)) {
790 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
792 /* Wakeup parent, send SIGIO */
793 if (state == TCP_SYN_RECV && child->sk_state != state)
794 parent->sk_data_ready(parent, 0);
796 /* Alas, it is possible again, because we do lookup
797 * in main socket hash table and lock on listening
798 * socket does not protect us more.
800 __sk_add_backlog(child, skb);
803 bh_unlock_sock(child);
807 EXPORT_SYMBOL(tcp_child_process);