2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
29 #include <net/inet_common.h>
33 #define SYNC_INIT 0 /* let the user enable it */
38 int sysctl_tcp_tw_recycle;
39 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
41 int sysctl_tcp_syncookies = SYNC_INIT;
42 int sysctl_tcp_abort_on_overflow;
44 static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo);
46 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
50 if (after(end_seq, s_win) && before(seq, e_win))
52 return (seq == e_win && seq == end_seq);
55 /* New-style handling of TIME_WAIT sockets. */
60 * * Main purpose of TIME-WAIT state is to close connection gracefully,
61 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
62 * (and, probably, tail of data) and one or more our ACKs are lost.
63 * * What is TIME-WAIT timeout? It is associated with maximal packet
64 * lifetime in the internet, which results in wrong conclusion, that
65 * it is set to catch "old duplicate segments" wandering out of their path.
66 * It is not quite correct. This timeout is calculated so that it exceeds
67 * maximal retransmission timeout enough to allow to lose one (or more)
68 * segments sent by peer and our ACKs. This time may be calculated from RTO.
69 * * When TIME-WAIT socket receives RST, it means that another end
70 * finally closed and we are allowed to kill TIME-WAIT too.
71 * * Second purpose of TIME-WAIT is catching old duplicate segments.
72 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
73 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
74 * * If we invented some more clever way to catch duplicates
75 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
78 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
79 * from the very beginning.
81 * NOTE. With recycling (and later with fin-wait-2) TW bucket
82 * is _not_ stateless. It means, that strictly speaking we must
83 * spinlock it. I do not want! Well, probability of misbehaviour
84 * is ridiculously low and, seems, we could use some mb() tricks
85 * to avoid misread sequence numbers, states etc. --ANK
88 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
89 const struct tcphdr *th)
91 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
92 struct tcp_options_received tmp_opt;
95 tmp_opt.saw_tstamp = 0;
96 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
97 tcp_parse_options(skb, &tmp_opt, 0);
99 if (tmp_opt.saw_tstamp) {
100 tmp_opt.ts_recent = tcptw->tw_ts_recent;
101 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
102 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
106 if (tw->tw_substate == TCP_FIN_WAIT2) {
107 /* Just repeat all the checks of tcp_rcv_state_process() */
109 /* Out of window, send ACK */
111 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
113 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
123 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
124 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126 return TCP_TW_SUCCESS;
129 /* New data or FIN. If new data arrive after half-duplex close,
133 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
135 tcp_tw_deschedule(tw);
140 /* FIN arrived, enter true time-wait state. */
141 tw->tw_substate = TCP_TIME_WAIT;
142 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
143 if (tmp_opt.saw_tstamp) {
144 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
145 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
148 /* I am shamed, but failed to make it more elegant.
149 * Yes, it is direct reference to IP, which is impossible
150 * to generalize to IPv6. Taking into account that IPv6
151 * do not undertsnad recycling in any case, it not
152 * a big problem in practice. --ANK */
153 if (tw->tw_family == AF_INET &&
154 sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp &&
155 tcp_v4_tw_remember_stamp(tw))
156 tcp_tw_schedule(tw, tw->tw_timeout);
158 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
163 * Now real TIME-WAIT state.
166 * "When a connection is [...] on TIME-WAIT state [...]
167 * [a TCP] MAY accept a new SYN from the remote TCP to
168 * reopen the connection directly, if it:
170 * (1) assigns its initial sequence number for the new
171 * connection to be larger than the largest sequence
172 * number it used on the previous connection incarnation,
175 * (2) returns to TIME-WAIT state if the SYN turns out
176 * to be an old duplicate".
180 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182 /* In window segment, it may be only reset or bare ack. */
185 /* This is TIME_WAIT assasination, in two flavors.
186 * Oh well... nobody has a sufficient solution to this
189 if (sysctl_tcp_rfc1337 == 0) {
191 tcp_tw_deschedule(tw);
193 return TCP_TW_SUCCESS;
196 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
198 if (tmp_opt.saw_tstamp) {
199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
200 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
204 return TCP_TW_SUCCESS;
207 /* Out of window segment.
209 All the segments are ACKed immediately.
211 The only exception is new SYN. We accept it, if it is
212 not old duplicate and we are not in danger to be killed
213 by delayed old duplicates. RFC check is that it has
214 newer sequence number works at rates <40Mbit/sec.
215 However, if paws works, it is reliable AND even more,
216 we even may relax silly seq space cutoff.
218 RED-PEN: we violate main RFC requirement, if this SYN will appear
219 old duplicate (i.e. we receive RST in reply to SYN-ACK),
220 we must return socket to time-wait state. It is not good,
224 if (th->syn && !th->rst && !th->ack && !paws_reject &&
225 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226 (tmp_opt.saw_tstamp &&
227 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 TCP_SKB_CB(skb)->when = isn;
236 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
239 /* In this case we must reset the TIMEWAIT timer.
241 * If it is ACKless SYN it may be both old duplicate
242 * and new good SYN with random sequence number <rcv_nxt.
243 * Do not reschedule in the last case.
245 if (paws_reject || th->ack)
246 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
248 /* Send ACK. Note, we do not put the bucket,
249 * it will be released by caller.
254 return TCP_TW_SUCCESS;
258 * Move a socket to time-wait or dead fin-wait-2 state.
260 void tcp_time_wait(struct sock *sk, int state, int timeo)
262 struct inet_timewait_sock *tw = NULL;
263 const struct tcp_sock *tp = tcp_sk(sk);
266 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
267 recycle_ok = tp->af_specific->remember_stamp(sk);
269 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
270 tw = inet_twsk_alloc(sk, state);
273 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
274 const struct inet_connection_sock *icsk = inet_csk(sk);
275 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
277 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
278 tcptw->tw_rcv_nxt = tp->rcv_nxt;
279 tcptw->tw_snd_nxt = tp->snd_nxt;
280 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
281 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
282 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
284 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
285 if (tw->tw_family == PF_INET6) {
286 struct ipv6_pinfo *np = inet6_sk(sk);
287 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
289 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
290 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
291 tw->tw_ipv6only = np->ipv6only;
294 /* Linkage updates. */
295 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
297 /* Get the TIME_WAIT timeout firing. */
302 tw->tw_timeout = rto;
304 tw->tw_timeout = TCP_TIMEWAIT_LEN;
305 if (state == TCP_TIME_WAIT)
306 timeo = TCP_TIMEWAIT_LEN;
309 tcp_tw_schedule(tw, timeo);
312 /* Sorry, if we're out of memory, just CLOSE this
313 * socket up. We've got bigger problems than
314 * non-graceful socket closings.
317 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
320 tcp_update_metrics(sk);
324 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
325 static int tcp_tw_death_row_slot;
327 static void tcp_twkill(unsigned long);
329 /* TIME_WAIT reaping mechanism. */
330 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
331 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
333 #define TCP_TWKILL_QUOTA 100
335 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
336 static DEFINE_SPINLOCK(tw_death_lock);
337 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
338 static void twkill_work(void *);
339 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
340 static u32 twkill_thread_slots;
342 /* Returns non-zero if quota exceeded. */
343 static int tcp_do_twkill_work(int slot, unsigned int quota)
345 struct inet_timewait_sock *tw;
346 struct hlist_node *node;
350 /* NOTE: compare this to previous version where lock
351 * was released after detaching chain. It was racy,
352 * because tw buckets are scheduled in not serialized context
353 * in 2.3 (with netfilter), and with softnet it is common, because
354 * soft irqs are not sequenced.
359 inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
360 __inet_twsk_del_dead_node(tw);
361 spin_unlock(&tw_death_lock);
362 __inet_twsk_kill(tw, &tcp_hashinfo);
365 spin_lock(&tw_death_lock);
366 if (killed > quota) {
371 /* While we dropped tw_death_lock, another cpu may have
372 * killed off the next TW bucket in the list, therefore
373 * do a fresh re-read of the hlist head node with the
374 * lock reacquired. We still use the hlist traversal
375 * macro in order to get the prefetches.
380 tcp_tw_count -= killed;
381 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
386 static void tcp_twkill(unsigned long dummy)
390 spin_lock(&tw_death_lock);
392 if (tcp_tw_count == 0)
396 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
398 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
400 schedule_work(&tcp_twkill_work);
403 /* We purged the entire slot, anything left? */
407 tcp_tw_death_row_slot =
408 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
410 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
412 spin_unlock(&tw_death_lock);
415 extern void twkill_slots_invalid(void);
417 static void twkill_work(void *dummy)
421 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
422 twkill_slots_invalid();
424 while (twkill_thread_slots) {
425 spin_lock_bh(&tw_death_lock);
426 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
427 if (!(twkill_thread_slots & (1 << i)))
430 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
431 if (need_resched()) {
432 spin_unlock_bh(&tw_death_lock);
434 spin_lock_bh(&tw_death_lock);
438 twkill_thread_slots &= ~(1 << i);
440 spin_unlock_bh(&tw_death_lock);
444 /* These are always called from BH context. See callers in
445 * tcp_input.c to verify this.
448 /* This is for handling early-kills of TIME_WAIT sockets. */
449 void tcp_tw_deschedule(struct inet_timewait_sock *tw)
451 spin_lock(&tw_death_lock);
452 if (inet_twsk_del_dead_node(tw)) {
454 if (--tcp_tw_count == 0)
455 del_timer(&tcp_tw_timer);
457 spin_unlock(&tw_death_lock);
458 __inet_twsk_kill(tw, &tcp_hashinfo);
461 /* Short-time timewait calendar */
463 static int tcp_twcal_hand = -1;
464 static int tcp_twcal_jiffie;
465 static void tcp_twcal_tick(unsigned long);
466 static struct timer_list tcp_twcal_timer =
467 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
468 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
470 static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo)
472 struct hlist_head *list;
475 /* timeout := RTO * 3.5
477 * 3.5 = 1+2+0.5 to wait for two retransmits.
479 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
480 * our ACK acking that FIN can be lost. If N subsequent retransmitted
481 * FINs (or previous seqments) are lost (probability of such event
482 * is p^(N+1), where p is probability to lose single packet and
483 * time to detect the loss is about RTO*(2^N - 1) with exponential
484 * backoff). Normal timewait length is calculated so, that we
485 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
486 * [ BTW Linux. following BSD, violates this requirement waiting
487 * only for 60sec, we should wait at least for 240 secs.
488 * Well, 240 consumes too much of resources 8)
490 * This interval is not reduced to catch old duplicate and
491 * responces to our wandering segments living for two MSLs.
492 * However, if we use PAWS to detect
493 * old duplicates, we can reduce the interval to bounds required
494 * by RTO, rather than MSL. So, if peer understands PAWS, we
495 * kill tw bucket after 3.5*RTO (it is important that this number
496 * is greater than TS tick!) and detect old duplicates with help
499 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
501 spin_lock(&tw_death_lock);
503 /* Unlink it, if it was scheduled */
504 if (inet_twsk_del_dead_node(tw))
507 atomic_inc(&tw->tw_refcnt);
509 if (slot >= TCP_TW_RECYCLE_SLOTS) {
510 /* Schedule to slow timer */
511 if (timeo >= TCP_TIMEWAIT_LEN) {
512 slot = TCP_TWKILL_SLOTS-1;
514 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
515 if (slot >= TCP_TWKILL_SLOTS)
516 slot = TCP_TWKILL_SLOTS-1;
518 tw->tw_ttd = jiffies + timeo;
519 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
520 list = &tcp_tw_death_row[slot];
522 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
524 if (tcp_twcal_hand < 0) {
526 tcp_twcal_jiffie = jiffies;
527 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
528 add_timer(&tcp_twcal_timer);
530 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
531 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
532 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
534 list = &tcp_twcal_row[slot];
537 hlist_add_head(&tw->tw_death_node, list);
539 if (tcp_tw_count++ == 0)
540 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
541 spin_unlock(&tw_death_lock);
544 void tcp_twcal_tick(unsigned long dummy)
548 unsigned long now = jiffies;
552 spin_lock(&tw_death_lock);
553 if (tcp_twcal_hand < 0)
556 slot = tcp_twcal_hand;
557 j = tcp_twcal_jiffie;
559 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
560 if (time_before_eq(j, now)) {
561 struct hlist_node *node, *safe;
562 struct inet_timewait_sock *tw;
564 inet_twsk_for_each_inmate_safe(tw, node, safe,
565 &tcp_twcal_row[slot]) {
566 __inet_twsk_del_dead_node(tw);
567 __inet_twsk_kill(tw, &tcp_hashinfo);
574 tcp_twcal_jiffie = j;
575 tcp_twcal_hand = slot;
578 if (!hlist_empty(&tcp_twcal_row[slot])) {
579 mod_timer(&tcp_twcal_timer, j);
583 j += (1<<TCP_TW_RECYCLE_TICK);
584 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
589 if ((tcp_tw_count -= killed) == 0)
590 del_timer(&tcp_tw_timer);
591 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
592 spin_unlock(&tw_death_lock);
595 /* This is not only more efficient than what we used to do, it eliminates
596 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
598 * Actually, we could lots of memory writes here. tp of listening
599 * socket contains all necessary default parameters.
601 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
603 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
606 const struct inet_request_sock *ireq = inet_rsk(req);
607 struct tcp_request_sock *treq = tcp_rsk(req);
608 struct inet_connection_sock *newicsk = inet_csk(sk);
609 struct tcp_sock *newtp;
611 /* Now setup tcp_sock */
612 newtp = tcp_sk(newsk);
613 newtp->pred_flags = 0;
614 newtp->rcv_nxt = treq->rcv_isn + 1;
615 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
617 tcp_prequeue_init(newtp);
619 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
622 newtp->mdev = TCP_TIMEOUT_INIT;
623 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
625 newtp->packets_out = 0;
627 newtp->retrans_out = 0;
628 newtp->sacked_out = 0;
629 newtp->fackets_out = 0;
630 newtp->snd_ssthresh = 0x7fffffff;
632 /* So many TCP implementations out there (incorrectly) count the
633 * initial SYN frame in their delayed-ACK and congestion control
634 * algorithms that we must have the following bandaid to talk
635 * efficiently to them. -DaveM
638 newtp->snd_cwnd_cnt = 0;
640 newtp->frto_counter = 0;
641 newtp->frto_highmark = 0;
643 newtp->ca_ops = &tcp_reno;
645 tcp_set_ca_state(newtp, TCP_CA_Open);
646 tcp_init_xmit_timers(newsk);
647 skb_queue_head_init(&newtp->out_of_order_queue);
648 newtp->rcv_wup = treq->rcv_isn + 1;
649 newtp->write_seq = treq->snt_isn + 1;
650 newtp->pushed_seq = newtp->write_seq;
651 newtp->copied_seq = treq->rcv_isn + 1;
653 newtp->rx_opt.saw_tstamp = 0;
655 newtp->rx_opt.dsack = 0;
656 newtp->rx_opt.eff_sacks = 0;
658 newtp->probes_out = 0;
659 newtp->rx_opt.num_sacks = 0;
662 if (sock_flag(newsk, SOCK_KEEPOPEN))
663 inet_csk_reset_keepalive_timer(newsk,
664 keepalive_time_when(newtp));
666 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
667 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
669 newtp->rx_opt.sack_ok |= 2;
671 newtp->window_clamp = req->window_clamp;
672 newtp->rcv_ssthresh = req->rcv_wnd;
673 newtp->rcv_wnd = req->rcv_wnd;
674 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
675 if (newtp->rx_opt.wscale_ok) {
676 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
677 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
679 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
680 newtp->window_clamp = min(newtp->window_clamp, 65535U);
682 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
683 newtp->max_window = newtp->snd_wnd;
685 if (newtp->rx_opt.tstamp_ok) {
686 newtp->rx_opt.ts_recent = req->ts_recent;
687 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
688 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
690 newtp->rx_opt.ts_recent_stamp = 0;
691 newtp->tcp_header_len = sizeof(struct tcphdr);
693 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
694 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
695 newtp->rx_opt.mss_clamp = req->mss;
696 TCP_ECN_openreq_child(newtp, req);
697 if (newtp->ecn_flags&TCP_ECN_OK)
698 sock_set_flag(newsk, SOCK_NO_LARGESEND);
700 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
706 * Process an incoming packet for SYN_RECV sockets represented
710 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
711 struct request_sock *req,
712 struct request_sock **prev)
714 struct tcphdr *th = skb->h.th;
715 struct tcp_sock *tp = tcp_sk(sk);
716 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
718 struct tcp_options_received tmp_opt;
721 tmp_opt.saw_tstamp = 0;
722 if (th->doff > (sizeof(struct tcphdr)>>2)) {
723 tcp_parse_options(skb, &tmp_opt, 0);
725 if (tmp_opt.saw_tstamp) {
726 tmp_opt.ts_recent = req->ts_recent;
727 /* We do not store true stamp, but it is not required,
728 * it can be estimated (approximately)
731 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
732 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
736 /* Check for pure retransmitted SYN. */
737 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
738 flg == TCP_FLAG_SYN &&
741 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
742 * this case on figure 6 and figure 8, but formal
743 * protocol description says NOTHING.
744 * To be more exact, it says that we should send ACK,
745 * because this segment (at least, if it has no data)
748 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
749 * describe SYN-RECV state. All the description
750 * is wrong, we cannot believe to it and should
751 * rely only on common sense and implementation
754 * Enforce "SYN-ACK" according to figure 8, figure 6
755 * of RFC793, fixed by RFC1122.
757 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
761 /* Further reproduces section "SEGMENT ARRIVES"
762 for state SYN-RECEIVED of RFC793.
763 It is broken, however, it does not work only
764 when SYNs are crossed.
766 You would think that SYN crossing is impossible here, since
767 we should have a SYN_SENT socket (from connect()) on our end,
768 but this is not true if the crossed SYNs were sent to both
769 ends by a malicious third party. We must defend against this,
770 and to do that we first verify the ACK (as per RFC793, page
771 36) and reset if it is invalid. Is this a true full defense?
772 To convince ourselves, let us consider a way in which the ACK
773 test can still pass in this 'malicious crossed SYNs' case.
774 Malicious sender sends identical SYNs (and thus identical sequence
775 numbers) to both A and B:
780 By our good fortune, both A and B select the same initial
781 send sequence number of seven :-)
783 A: sends SYN|ACK, seq=7, ack_seq=8
784 B: sends SYN|ACK, seq=7, ack_seq=8
786 So we are now A eating this SYN|ACK, ACK test passes. So
787 does sequence test, SYN is truncated, and thus we consider
790 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
791 we create an established connection. Both ends (listening sockets)
792 accept the new incoming connection and try to talk to each other. 8-)
794 Note: This case is both harmless, and rare. Possibility is about the
795 same as us discovering intelligent life on another plant tomorrow.
797 But generally, we should (RFC lies!) to accept ACK
798 from SYNACK both here and in tcp_rcv_state_process().
799 tcp_rcv_state_process() does not, hence, we do not too.
801 Note that the case is absolutely generic:
802 we cannot optimize anything here without
803 violating protocol. All the checks must be made
804 before attempt to create socket.
807 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
808 * and the incoming segment acknowledges something not yet
809 * sent (the segment carries an unaccaptable ACK) ...
812 * Invalid ACK: reset will be sent by listening socket
814 if ((flg & TCP_FLAG_ACK) &&
815 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
818 /* Also, it would be not so bad idea to check rcv_tsecr, which
819 * is essentially ACK extension and too early or too late values
820 * should cause reset in unsynchronized states.
823 /* RFC793: "first check sequence number". */
825 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
826 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
827 /* Out of window: send ACK and drop. */
828 if (!(flg & TCP_FLAG_RST))
829 req->rsk_ops->send_ack(skb, req);
831 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
835 /* In sequence, PAWS is OK. */
837 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
838 req->ts_recent = tmp_opt.rcv_tsval;
840 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
841 /* Truncate SYN, it is out of window starting
842 at tcp_rsk(req)->rcv_isn + 1. */
843 flg &= ~TCP_FLAG_SYN;
846 /* RFC793: "second check the RST bit" and
847 * "fourth, check the SYN bit"
849 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
850 goto embryonic_reset;
852 /* ACK sequence verified above, just make sure ACK is
853 * set. If ACK not set, just silently drop the packet.
855 if (!(flg & TCP_FLAG_ACK))
858 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
859 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
860 inet_rsk(req)->acked = 1;
864 /* OK, ACK is valid, create big socket and
865 * feed this segment to it. It will repeat all
866 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
867 * ESTABLISHED STATE. If it will be dropped after
868 * socket is created, wait for troubles.
870 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
872 goto listen_overflow;
874 inet_csk_reqsk_queue_unlink(sk, req, prev);
875 inet_csk_reqsk_queue_removed(sk, req);
877 inet_csk_reqsk_queue_add(sk, req, child);
881 if (!sysctl_tcp_abort_on_overflow) {
882 inet_rsk(req)->acked = 1;
887 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
888 if (!(flg & TCP_FLAG_RST))
889 req->rsk_ops->send_reset(skb);
891 inet_csk_reqsk_queue_drop(sk, req, prev);
896 * Queue segment on the new socket if the new socket is active,
897 * otherwise we just shortcircuit this and continue with
901 int tcp_child_process(struct sock *parent, struct sock *child,
905 int state = child->sk_state;
907 if (!sock_owned_by_user(child)) {
908 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
910 /* Wakeup parent, send SIGIO */
911 if (state == TCP_SYN_RECV && child->sk_state != state)
912 parent->sk_data_ready(parent, 0);
914 /* Alas, it is possible again, because we do lookup
915 * in main socket hash table and lock on listening
916 * socket does not protect us more.
918 sk_add_backlog(child, skb);
921 bh_unlock_sock(child);
926 EXPORT_SYMBOL(tcp_check_req);
927 EXPORT_SYMBOL(tcp_child_process);
928 EXPORT_SYMBOL(tcp_create_openreq_child);
929 EXPORT_SYMBOL(tcp_timewait_state_process);
930 EXPORT_SYMBOL(tcp_tw_deschedule);