2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
29 #include <net/inet_common.h>
33 #define SYNC_INIT 0 /* let the user enable it */
38 int sysctl_tcp_tw_recycle;
39 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
41 int sysctl_tcp_syncookies = SYNC_INIT;
42 int sysctl_tcp_abort_on_overflow;
44 static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo);
46 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
50 if (after(end_seq, s_win) && before(seq, e_win))
52 return (seq == e_win && seq == end_seq);
55 /* New-style handling of TIME_WAIT sockets. */
60 * * Main purpose of TIME-WAIT state is to close connection gracefully,
61 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
62 * (and, probably, tail of data) and one or more our ACKs are lost.
63 * * What is TIME-WAIT timeout? It is associated with maximal packet
64 * lifetime in the internet, which results in wrong conclusion, that
65 * it is set to catch "old duplicate segments" wandering out of their path.
66 * It is not quite correct. This timeout is calculated so that it exceeds
67 * maximal retransmission timeout enough to allow to lose one (or more)
68 * segments sent by peer and our ACKs. This time may be calculated from RTO.
69 * * When TIME-WAIT socket receives RST, it means that another end
70 * finally closed and we are allowed to kill TIME-WAIT too.
71 * * Second purpose of TIME-WAIT is catching old duplicate segments.
72 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
73 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
74 * * If we invented some more clever way to catch duplicates
75 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
78 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
79 * from the very beginning.
81 * NOTE. With recycling (and later with fin-wait-2) TW bucket
82 * is _not_ stateless. It means, that strictly speaking we must
83 * spinlock it. I do not want! Well, probability of misbehaviour
84 * is ridiculously low and, seems, we could use some mb() tricks
85 * to avoid misread sequence numbers, states etc. --ANK
88 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
89 const struct tcphdr *th)
91 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
92 struct tcp_options_received tmp_opt;
95 tmp_opt.saw_tstamp = 0;
96 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
97 tcp_parse_options(skb, &tmp_opt, 0);
99 if (tmp_opt.saw_tstamp) {
100 tmp_opt.ts_recent = tcptw->tw_ts_recent;
101 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
102 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
106 if (tw->tw_substate == TCP_FIN_WAIT2) {
107 /* Just repeat all the checks of tcp_rcv_state_process() */
109 /* Out of window, send ACK */
111 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
113 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
123 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
124 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126 return TCP_TW_SUCCESS;
129 /* New data or FIN. If new data arrive after half-duplex close,
133 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
135 tcp_tw_deschedule(tw);
140 /* FIN arrived, enter true time-wait state. */
141 tw->tw_substate = TCP_TIME_WAIT;
142 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
143 if (tmp_opt.saw_tstamp) {
144 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
145 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
148 /* I am shamed, but failed to make it more elegant.
149 * Yes, it is direct reference to IP, which is impossible
150 * to generalize to IPv6. Taking into account that IPv6
151 * do not undertsnad recycling in any case, it not
152 * a big problem in practice. --ANK */
153 if (tw->tw_family == AF_INET &&
154 sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp &&
155 tcp_v4_tw_remember_stamp(tw))
156 tcp_tw_schedule(tw, tw->tw_timeout);
158 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
163 * Now real TIME-WAIT state.
166 * "When a connection is [...] on TIME-WAIT state [...]
167 * [a TCP] MAY accept a new SYN from the remote TCP to
168 * reopen the connection directly, if it:
170 * (1) assigns its initial sequence number for the new
171 * connection to be larger than the largest sequence
172 * number it used on the previous connection incarnation,
175 * (2) returns to TIME-WAIT state if the SYN turns out
176 * to be an old duplicate".
180 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182 /* In window segment, it may be only reset or bare ack. */
185 /* This is TIME_WAIT assasination, in two flavors.
186 * Oh well... nobody has a sufficient solution to this
189 if (sysctl_tcp_rfc1337 == 0) {
191 tcp_tw_deschedule(tw);
193 return TCP_TW_SUCCESS;
196 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
198 if (tmp_opt.saw_tstamp) {
199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
200 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
204 return TCP_TW_SUCCESS;
207 /* Out of window segment.
209 All the segments are ACKed immediately.
211 The only exception is new SYN. We accept it, if it is
212 not old duplicate and we are not in danger to be killed
213 by delayed old duplicates. RFC check is that it has
214 newer sequence number works at rates <40Mbit/sec.
215 However, if paws works, it is reliable AND even more,
216 we even may relax silly seq space cutoff.
218 RED-PEN: we violate main RFC requirement, if this SYN will appear
219 old duplicate (i.e. we receive RST in reply to SYN-ACK),
220 we must return socket to time-wait state. It is not good,
224 if (th->syn && !th->rst && !th->ack && !paws_reject &&
225 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226 (tmp_opt.saw_tstamp &&
227 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 TCP_SKB_CB(skb)->when = isn;
236 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
239 /* In this case we must reset the TIMEWAIT timer.
241 * If it is ACKless SYN it may be both old duplicate
242 * and new good SYN with random sequence number <rcv_nxt.
243 * Do not reschedule in the last case.
245 if (paws_reject || th->ack)
246 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
248 /* Send ACK. Note, we do not put the bucket,
249 * it will be released by caller.
254 return TCP_TW_SUCCESS;
258 * Move a socket to time-wait or dead fin-wait-2 state.
260 void tcp_time_wait(struct sock *sk, int state, int timeo)
262 struct inet_timewait_sock *tw = NULL;
263 const struct tcp_sock *tp = tcp_sk(sk);
266 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
267 recycle_ok = tp->af_specific->remember_stamp(sk);
269 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
270 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, SLAB_ATOMIC);
273 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
274 const struct inet_sock *inet = inet_sk(sk);
275 const int rto = (tp->rto << 2) - (tp->rto >> 1);
277 /* Remember our protocol */
278 tw->tw_prot = sk->sk_prot_creator;
280 /* Give us an identity. */
281 tw->tw_daddr = inet->daddr;
282 tw->tw_rcv_saddr = inet->rcv_saddr;
283 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
284 tw->tw_num = inet->num;
285 tw->tw_state = TCP_TIME_WAIT;
286 tw->tw_substate = state;
287 tw->tw_sport = inet->sport;
288 tw->tw_dport = inet->dport;
289 tw->tw_family = sk->sk_family;
290 tw->tw_reuse = sk->sk_reuse;
291 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
292 atomic_set(&tw->tw_refcnt, 1);
294 tw->tw_hashent = sk->sk_hashent;
295 tcptw->tw_rcv_nxt = tp->rcv_nxt;
296 tcptw->tw_snd_nxt = tp->snd_nxt;
297 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
298 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
299 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
300 inet_twsk_dead_node_init(tw);
302 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
303 if (tw->tw_family == PF_INET6) {
304 struct ipv6_pinfo *np = inet6_sk(sk);
305 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
307 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
308 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
309 tw->tw_ipv6only = np->ipv6only;
313 /* Linkage updates. */
314 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
316 /* Get the TIME_WAIT timeout firing. */
321 tw->tw_timeout = rto;
323 tw->tw_timeout = TCP_TIMEWAIT_LEN;
324 if (state == TCP_TIME_WAIT)
325 timeo = TCP_TIMEWAIT_LEN;
328 tcp_tw_schedule(tw, timeo);
331 /* Sorry, if we're out of memory, just CLOSE this
332 * socket up. We've got bigger problems than
333 * non-graceful socket closings.
336 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
339 tcp_update_metrics(sk);
343 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
344 static int tcp_tw_death_row_slot;
346 static void tcp_twkill(unsigned long);
348 /* TIME_WAIT reaping mechanism. */
349 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
350 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
352 #define TCP_TWKILL_QUOTA 100
354 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
355 static DEFINE_SPINLOCK(tw_death_lock);
356 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
357 static void twkill_work(void *);
358 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
359 static u32 twkill_thread_slots;
361 /* Returns non-zero if quota exceeded. */
362 static int tcp_do_twkill_work(int slot, unsigned int quota)
364 struct inet_timewait_sock *tw;
365 struct hlist_node *node;
369 /* NOTE: compare this to previous version where lock
370 * was released after detaching chain. It was racy,
371 * because tw buckets are scheduled in not serialized context
372 * in 2.3 (with netfilter), and with softnet it is common, because
373 * soft irqs are not sequenced.
378 inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
379 __inet_twsk_del_dead_node(tw);
380 spin_unlock(&tw_death_lock);
381 __inet_twsk_kill(tw, &tcp_hashinfo);
384 spin_lock(&tw_death_lock);
385 if (killed > quota) {
390 /* While we dropped tw_death_lock, another cpu may have
391 * killed off the next TW bucket in the list, therefore
392 * do a fresh re-read of the hlist head node with the
393 * lock reacquired. We still use the hlist traversal
394 * macro in order to get the prefetches.
399 tcp_tw_count -= killed;
400 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
405 static void tcp_twkill(unsigned long dummy)
409 spin_lock(&tw_death_lock);
411 if (tcp_tw_count == 0)
415 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
417 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
419 schedule_work(&tcp_twkill_work);
422 /* We purged the entire slot, anything left? */
426 tcp_tw_death_row_slot =
427 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
429 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
431 spin_unlock(&tw_death_lock);
434 extern void twkill_slots_invalid(void);
436 static void twkill_work(void *dummy)
440 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
441 twkill_slots_invalid();
443 while (twkill_thread_slots) {
444 spin_lock_bh(&tw_death_lock);
445 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
446 if (!(twkill_thread_slots & (1 << i)))
449 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
450 if (need_resched()) {
451 spin_unlock_bh(&tw_death_lock);
453 spin_lock_bh(&tw_death_lock);
457 twkill_thread_slots &= ~(1 << i);
459 spin_unlock_bh(&tw_death_lock);
463 /* These are always called from BH context. See callers in
464 * tcp_input.c to verify this.
467 /* This is for handling early-kills of TIME_WAIT sockets. */
468 void tcp_tw_deschedule(struct inet_timewait_sock *tw)
470 spin_lock(&tw_death_lock);
471 if (inet_twsk_del_dead_node(tw)) {
473 if (--tcp_tw_count == 0)
474 del_timer(&tcp_tw_timer);
476 spin_unlock(&tw_death_lock);
477 __inet_twsk_kill(tw, &tcp_hashinfo);
480 /* Short-time timewait calendar */
482 static int tcp_twcal_hand = -1;
483 static int tcp_twcal_jiffie;
484 static void tcp_twcal_tick(unsigned long);
485 static struct timer_list tcp_twcal_timer =
486 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
487 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
489 static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo)
491 struct hlist_head *list;
494 /* timeout := RTO * 3.5
496 * 3.5 = 1+2+0.5 to wait for two retransmits.
498 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
499 * our ACK acking that FIN can be lost. If N subsequent retransmitted
500 * FINs (or previous seqments) are lost (probability of such event
501 * is p^(N+1), where p is probability to lose single packet and
502 * time to detect the loss is about RTO*(2^N - 1) with exponential
503 * backoff). Normal timewait length is calculated so, that we
504 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
505 * [ BTW Linux. following BSD, violates this requirement waiting
506 * only for 60sec, we should wait at least for 240 secs.
507 * Well, 240 consumes too much of resources 8)
509 * This interval is not reduced to catch old duplicate and
510 * responces to our wandering segments living for two MSLs.
511 * However, if we use PAWS to detect
512 * old duplicates, we can reduce the interval to bounds required
513 * by RTO, rather than MSL. So, if peer understands PAWS, we
514 * kill tw bucket after 3.5*RTO (it is important that this number
515 * is greater than TS tick!) and detect old duplicates with help
518 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
520 spin_lock(&tw_death_lock);
522 /* Unlink it, if it was scheduled */
523 if (inet_twsk_del_dead_node(tw))
526 atomic_inc(&tw->tw_refcnt);
528 if (slot >= TCP_TW_RECYCLE_SLOTS) {
529 /* Schedule to slow timer */
530 if (timeo >= TCP_TIMEWAIT_LEN) {
531 slot = TCP_TWKILL_SLOTS-1;
533 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
534 if (slot >= TCP_TWKILL_SLOTS)
535 slot = TCP_TWKILL_SLOTS-1;
537 tw->tw_ttd = jiffies + timeo;
538 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
539 list = &tcp_tw_death_row[slot];
541 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
543 if (tcp_twcal_hand < 0) {
545 tcp_twcal_jiffie = jiffies;
546 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
547 add_timer(&tcp_twcal_timer);
549 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
550 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
551 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
553 list = &tcp_twcal_row[slot];
556 hlist_add_head(&tw->tw_death_node, list);
558 if (tcp_tw_count++ == 0)
559 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
560 spin_unlock(&tw_death_lock);
563 void tcp_twcal_tick(unsigned long dummy)
567 unsigned long now = jiffies;
571 spin_lock(&tw_death_lock);
572 if (tcp_twcal_hand < 0)
575 slot = tcp_twcal_hand;
576 j = tcp_twcal_jiffie;
578 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
579 if (time_before_eq(j, now)) {
580 struct hlist_node *node, *safe;
581 struct inet_timewait_sock *tw;
583 inet_twsk_for_each_inmate_safe(tw, node, safe,
584 &tcp_twcal_row[slot]) {
585 __inet_twsk_del_dead_node(tw);
586 __inet_twsk_kill(tw, &tcp_hashinfo);
593 tcp_twcal_jiffie = j;
594 tcp_twcal_hand = slot;
597 if (!hlist_empty(&tcp_twcal_row[slot])) {
598 mod_timer(&tcp_twcal_timer, j);
602 j += (1<<TCP_TW_RECYCLE_TICK);
603 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
608 if ((tcp_tw_count -= killed) == 0)
609 del_timer(&tcp_tw_timer);
610 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
611 spin_unlock(&tw_death_lock);
614 /* This is not only more efficient than what we used to do, it eliminates
615 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
617 * Actually, we could lots of memory writes here. tp of listening
618 * socket contains all necessary default parameters.
620 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
622 /* allocate the newsk from the same slab of the master sock,
623 * if not, at sk_free time we'll try to free it from the wrong
624 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
625 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
628 struct inet_request_sock *ireq = inet_rsk(req);
629 struct tcp_request_sock *treq = tcp_rsk(req);
630 struct inet_sock *newinet = inet_sk(newsk);
631 struct tcp_sock *newtp;
632 struct sk_filter *filter;
634 memcpy(newsk, sk, sizeof(struct tcp_sock));
635 newsk->sk_state = TCP_SYN_RECV;
638 sk_node_init(&newsk->sk_node);
639 newinet->bind_hash = NULL;
641 /* Clone the TCP header template */
642 newinet->dport = ireq->rmt_port;
644 sock_lock_init(newsk);
647 rwlock_init(&newsk->sk_dst_lock);
648 newsk->sk_dst_cache = NULL;
649 atomic_set(&newsk->sk_rmem_alloc, 0);
650 skb_queue_head_init(&newsk->sk_receive_queue);
651 atomic_set(&newsk->sk_wmem_alloc, 0);
652 skb_queue_head_init(&newsk->sk_write_queue);
653 atomic_set(&newsk->sk_omem_alloc, 0);
654 newsk->sk_wmem_queued = 0;
655 newsk->sk_forward_alloc = 0;
657 sock_reset_flag(newsk, SOCK_DONE);
658 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
659 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
660 newsk->sk_send_head = NULL;
661 rwlock_init(&newsk->sk_callback_lock);
662 skb_queue_head_init(&newsk->sk_error_queue);
663 newsk->sk_write_space = sk_stream_write_space;
665 if ((filter = newsk->sk_filter) != NULL)
666 sk_filter_charge(newsk, filter);
668 if (unlikely(xfrm_sk_clone_policy(newsk))) {
669 /* It is still raw copy of parent, so invalidate
670 * destructor and make plain sk_free() */
671 newsk->sk_destruct = NULL;
676 /* Now setup tcp_sock */
677 newtp = tcp_sk(newsk);
678 newtp->pred_flags = 0;
679 newtp->rcv_nxt = treq->rcv_isn + 1;
680 newtp->snd_nxt = treq->snt_isn + 1;
681 newtp->snd_una = treq->snt_isn + 1;
682 newtp->snd_sml = treq->snt_isn + 1;
684 tcp_prequeue_init(newtp);
686 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
688 newtp->retransmits = 0;
691 newtp->mdev = TCP_TIMEOUT_INIT;
692 newtp->rto = TCP_TIMEOUT_INIT;
694 newtp->packets_out = 0;
696 newtp->retrans_out = 0;
697 newtp->sacked_out = 0;
698 newtp->fackets_out = 0;
699 newtp->snd_ssthresh = 0x7fffffff;
701 /* So many TCP implementations out there (incorrectly) count the
702 * initial SYN frame in their delayed-ACK and congestion control
703 * algorithms that we must have the following bandaid to talk
704 * efficiently to them. -DaveM
707 newtp->snd_cwnd_cnt = 0;
709 newtp->frto_counter = 0;
710 newtp->frto_highmark = 0;
712 newtp->ca_ops = &tcp_reno;
714 tcp_set_ca_state(newtp, TCP_CA_Open);
715 tcp_init_xmit_timers(newsk);
716 skb_queue_head_init(&newtp->out_of_order_queue);
717 newtp->rcv_wup = treq->rcv_isn + 1;
718 newtp->write_seq = treq->snt_isn + 1;
719 newtp->pushed_seq = newtp->write_seq;
720 newtp->copied_seq = treq->rcv_isn + 1;
722 newtp->rx_opt.saw_tstamp = 0;
724 newtp->rx_opt.dsack = 0;
725 newtp->rx_opt.eff_sacks = 0;
727 newtp->probes_out = 0;
728 newtp->rx_opt.num_sacks = 0;
730 /* Deinitialize accept_queue to trap illegal accesses. */
731 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
733 /* Back to base struct sock members. */
735 newsk->sk_priority = 0;
736 atomic_set(&newsk->sk_refcnt, 2);
739 * Increment the counter in the same struct proto as the master
740 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
741 * is the same as sk->sk_prot->socks, as this field was copied
742 * with memcpy), same rationale as the first comment in this
745 * This _changes_ the previous behaviour, where
746 * tcp_create_openreq_child always was incrementing the
747 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
748 * to be taken into account in all callers. -acme
750 sk_refcnt_debug_inc(newsk);
752 atomic_inc(&tcp_sockets_allocated);
754 if (sock_flag(newsk, SOCK_KEEPOPEN))
755 tcp_reset_keepalive_timer(newsk,
756 keepalive_time_when(newtp));
757 newsk->sk_socket = NULL;
758 newsk->sk_sleep = NULL;
760 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
761 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
763 newtp->rx_opt.sack_ok |= 2;
765 newtp->window_clamp = req->window_clamp;
766 newtp->rcv_ssthresh = req->rcv_wnd;
767 newtp->rcv_wnd = req->rcv_wnd;
768 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
769 if (newtp->rx_opt.wscale_ok) {
770 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
771 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
773 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
774 newtp->window_clamp = min(newtp->window_clamp, 65535U);
776 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
777 newtp->max_window = newtp->snd_wnd;
779 if (newtp->rx_opt.tstamp_ok) {
780 newtp->rx_opt.ts_recent = req->ts_recent;
781 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
782 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
784 newtp->rx_opt.ts_recent_stamp = 0;
785 newtp->tcp_header_len = sizeof(struct tcphdr);
787 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
788 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
789 newtp->rx_opt.mss_clamp = req->mss;
790 TCP_ECN_openreq_child(newtp, req);
791 if (newtp->ecn_flags&TCP_ECN_OK)
792 sock_set_flag(newsk, SOCK_NO_LARGESEND);
794 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
800 * Process an incoming packet for SYN_RECV sockets represented
804 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
805 struct request_sock *req,
806 struct request_sock **prev)
808 struct tcphdr *th = skb->h.th;
809 struct tcp_sock *tp = tcp_sk(sk);
810 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
812 struct tcp_options_received tmp_opt;
815 tmp_opt.saw_tstamp = 0;
816 if (th->doff > (sizeof(struct tcphdr)>>2)) {
817 tcp_parse_options(skb, &tmp_opt, 0);
819 if (tmp_opt.saw_tstamp) {
820 tmp_opt.ts_recent = req->ts_recent;
821 /* We do not store true stamp, but it is not required,
822 * it can be estimated (approximately)
825 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
826 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
830 /* Check for pure retransmitted SYN. */
831 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
832 flg == TCP_FLAG_SYN &&
835 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
836 * this case on figure 6 and figure 8, but formal
837 * protocol description says NOTHING.
838 * To be more exact, it says that we should send ACK,
839 * because this segment (at least, if it has no data)
842 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
843 * describe SYN-RECV state. All the description
844 * is wrong, we cannot believe to it and should
845 * rely only on common sense and implementation
848 * Enforce "SYN-ACK" according to figure 8, figure 6
849 * of RFC793, fixed by RFC1122.
851 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
855 /* Further reproduces section "SEGMENT ARRIVES"
856 for state SYN-RECEIVED of RFC793.
857 It is broken, however, it does not work only
858 when SYNs are crossed.
860 You would think that SYN crossing is impossible here, since
861 we should have a SYN_SENT socket (from connect()) on our end,
862 but this is not true if the crossed SYNs were sent to both
863 ends by a malicious third party. We must defend against this,
864 and to do that we first verify the ACK (as per RFC793, page
865 36) and reset if it is invalid. Is this a true full defense?
866 To convince ourselves, let us consider a way in which the ACK
867 test can still pass in this 'malicious crossed SYNs' case.
868 Malicious sender sends identical SYNs (and thus identical sequence
869 numbers) to both A and B:
874 By our good fortune, both A and B select the same initial
875 send sequence number of seven :-)
877 A: sends SYN|ACK, seq=7, ack_seq=8
878 B: sends SYN|ACK, seq=7, ack_seq=8
880 So we are now A eating this SYN|ACK, ACK test passes. So
881 does sequence test, SYN is truncated, and thus we consider
884 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
885 we create an established connection. Both ends (listening sockets)
886 accept the new incoming connection and try to talk to each other. 8-)
888 Note: This case is both harmless, and rare. Possibility is about the
889 same as us discovering intelligent life on another plant tomorrow.
891 But generally, we should (RFC lies!) to accept ACK
892 from SYNACK both here and in tcp_rcv_state_process().
893 tcp_rcv_state_process() does not, hence, we do not too.
895 Note that the case is absolutely generic:
896 we cannot optimize anything here without
897 violating protocol. All the checks must be made
898 before attempt to create socket.
901 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
902 * and the incoming segment acknowledges something not yet
903 * sent (the segment carries an unaccaptable ACK) ...
906 * Invalid ACK: reset will be sent by listening socket
908 if ((flg & TCP_FLAG_ACK) &&
909 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
912 /* Also, it would be not so bad idea to check rcv_tsecr, which
913 * is essentially ACK extension and too early or too late values
914 * should cause reset in unsynchronized states.
917 /* RFC793: "first check sequence number". */
919 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
920 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
921 /* Out of window: send ACK and drop. */
922 if (!(flg & TCP_FLAG_RST))
923 req->rsk_ops->send_ack(skb, req);
925 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
929 /* In sequence, PAWS is OK. */
931 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
932 req->ts_recent = tmp_opt.rcv_tsval;
934 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
935 /* Truncate SYN, it is out of window starting
936 at tcp_rsk(req)->rcv_isn + 1. */
937 flg &= ~TCP_FLAG_SYN;
940 /* RFC793: "second check the RST bit" and
941 * "fourth, check the SYN bit"
943 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
944 goto embryonic_reset;
946 /* ACK sequence verified above, just make sure ACK is
947 * set. If ACK not set, just silently drop the packet.
949 if (!(flg & TCP_FLAG_ACK))
952 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
953 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
954 inet_rsk(req)->acked = 1;
958 /* OK, ACK is valid, create big socket and
959 * feed this segment to it. It will repeat all
960 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
961 * ESTABLISHED STATE. If it will be dropped after
962 * socket is created, wait for troubles.
964 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
966 goto listen_overflow;
968 tcp_synq_unlink(tp, req, prev);
969 tcp_synq_removed(sk, req);
971 tcp_acceptq_queue(sk, req, child);
975 if (!sysctl_tcp_abort_on_overflow) {
976 inet_rsk(req)->acked = 1;
981 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
982 if (!(flg & TCP_FLAG_RST))
983 req->rsk_ops->send_reset(skb);
985 tcp_synq_drop(sk, req, prev);
990 * Queue segment on the new socket if the new socket is active,
991 * otherwise we just shortcircuit this and continue with
995 int tcp_child_process(struct sock *parent, struct sock *child,
999 int state = child->sk_state;
1001 if (!sock_owned_by_user(child)) {
1002 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1004 /* Wakeup parent, send SIGIO */
1005 if (state == TCP_SYN_RECV && child->sk_state != state)
1006 parent->sk_data_ready(parent, 0);
1008 /* Alas, it is possible again, because we do lookup
1009 * in main socket hash table and lock on listening
1010 * socket does not protect us more.
1012 sk_add_backlog(child, skb);
1015 bh_unlock_sock(child);
1020 EXPORT_SYMBOL(tcp_check_req);
1021 EXPORT_SYMBOL(tcp_child_process);
1022 EXPORT_SYMBOL(tcp_create_openreq_child);
1023 EXPORT_SYMBOL(tcp_timewait_state_process);
1024 EXPORT_SYMBOL(tcp_tw_deschedule);