2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
105 tcp_hdr(skb)->source);
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw->tw_ts_recent_stamp &&
125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
151 struct ip_options_rcu *inet_opt;
153 if (addr_len < sizeof(struct sockaddr_in))
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
165 nexthop = inet_opt->opt.faddr;
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 orig_sport, orig_dport, sk);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 if (!inet_opt || !inet_opt->opt.srr)
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
310 dst->ops->redirect(dst, sk, skb);
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
329 * Still in SYN_RECV, just remove it silently.
330 * There is no good way to pass the error to the newly
331 * created socket, and POSIX does not want network
332 * errors returned from accept().
334 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
339 EXPORT_SYMBOL(tcp_req_err);
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
368 struct request_sock *fastopen;
372 struct net *net = dev_net(icmp_skb->dev);
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
399 if (sk->sk_state == TCP_CLOSE)
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
420 do_redirect(icmp_skb, sk);
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
425 case ICMP_PARAMETERPROB:
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
437 if (sk->sk_state == TCP_LISTEN)
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
459 if (sock_owned_by_user(sk))
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
467 skb = tcp_write_queue_head(sk);
470 remaining = icsk->icsk_rto -
472 tcp_time_stamp - tcp_skb_timestamp(skb));
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
484 case ICMP_TIME_EXCEEDED:
491 switch (sk->sk_state) {
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
497 if (fastopen && !fastopen->sk)
500 if (!sock_owned_by_user(sk)) {
503 sk->sk_error_report(sk);
507 sk->sk_err_soft = err;
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 * Now we are in compliance with RFCs.
529 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
543 struct tcphdr *th = tcp_hdr(skb);
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
560 const struct inet_sock *inet = inet_sk(sk);
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
564 EXPORT_SYMBOL(tcp_v4_send_check);
567 * This routine will send an RST to the other tcp.
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
581 const struct tcphdr *th = tcp_hdr(skb);
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key = NULL;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
594 struct sock *sk1 = NULL;
598 /* Never send a reset in response to a reset. */
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
616 rep.th.seq = th->ack_seq;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (sk && sk_fullsock(sk)) {
631 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
632 &ip_hdr(skb)->saddr, AF_INET);
633 } else if (hash_location) {
635 * active side is lost. Try to find listening socket through
636 * source port, and then find md5 key through listening socket.
637 * we are not loose security here:
638 * Incoming packet is checked with md5 hash with finding key,
639 * no RST generated if md5 hash doesn't match.
641 sk1 = __inet_lookup_listener(net,
642 &tcp_hashinfo, ip_hdr(skb)->saddr,
643 th->source, ip_hdr(skb)->daddr,
644 ntohs(th->source), inet_iif(skb));
645 /* don't send rst if it can't find key */
649 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
650 &ip_hdr(skb)->saddr, AF_INET);
654 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
655 if (genhash || memcmp(hash_location, newhash, 16) != 0)
660 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_MD5SIG << 8) |
664 /* Update length and the length the header thinks exists */
665 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
666 rep.th.doff = arg.iov[0].iov_len / 4;
668 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
669 key, ip_hdr(skb)->saddr,
670 ip_hdr(skb)->daddr, &rep.th);
673 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
674 ip_hdr(skb)->saddr, /* XXX */
675 arg.iov[0].iov_len, IPPROTO_TCP, 0);
676 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
677 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
684 arg.bound_dev_if = sk->sk_bound_dev_if;
686 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
687 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
689 arg.tos = ip_hdr(skb)->tos;
690 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
691 skb, &TCP_SKB_CB(skb)->header.h4.opt,
692 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
693 &arg, arg.iov[0].iov_len);
695 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
696 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
698 #ifdef CONFIG_TCP_MD5SIG
707 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
708 outside socket context is ugly, certainly. What can I do?
711 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
712 u32 win, u32 tsval, u32 tsecr, int oif,
713 struct tcp_md5sig_key *key,
714 int reply_flags, u8 tos)
716 const struct tcphdr *th = tcp_hdr(skb);
719 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
720 #ifdef CONFIG_TCP_MD5SIG
721 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
725 struct ip_reply_arg arg;
726 struct net *net = dev_net(skb_dst(skb)->dev);
728 memset(&rep.th, 0, sizeof(struct tcphdr));
729 memset(&arg, 0, sizeof(arg));
731 arg.iov[0].iov_base = (unsigned char *)&rep;
732 arg.iov[0].iov_len = sizeof(rep.th);
734 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
735 (TCPOPT_TIMESTAMP << 8) |
737 rep.opt[1] = htonl(tsval);
738 rep.opt[2] = htonl(tsecr);
739 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
742 /* Swap the send and the receive. */
743 rep.th.dest = th->source;
744 rep.th.source = th->dest;
745 rep.th.doff = arg.iov[0].iov_len / 4;
746 rep.th.seq = htonl(seq);
747 rep.th.ack_seq = htonl(ack);
749 rep.th.window = htons(win);
751 #ifdef CONFIG_TCP_MD5SIG
753 int offset = (tsecr) ? 3 : 0;
755 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
757 (TCPOPT_MD5SIG << 8) |
759 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
760 rep.th.doff = arg.iov[0].iov_len/4;
762 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
763 key, ip_hdr(skb)->saddr,
764 ip_hdr(skb)->daddr, &rep.th);
767 arg.flags = reply_flags;
768 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
769 ip_hdr(skb)->saddr, /* XXX */
770 arg.iov[0].iov_len, IPPROTO_TCP, 0);
771 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
773 arg.bound_dev_if = oif;
775 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
776 skb, &TCP_SKB_CB(skb)->header.h4.opt,
777 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
778 &arg, arg.iov[0].iov_len);
780 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
783 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
785 struct inet_timewait_sock *tw = inet_twsk(sk);
786 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
788 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
789 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
790 tcp_time_stamp + tcptw->tw_ts_offset,
793 tcp_twsk_md5_key(tcptw),
794 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
802 struct request_sock *req)
804 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
805 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
807 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
808 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
809 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
813 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
820 * Send a SYN-ACK after having received a SYN.
821 * This still operates on a request_sock only, not on a big
824 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
827 struct tcp_fastopen_cookie *foc,
830 const struct inet_request_sock *ireq = inet_rsk(req);
835 /* First, grab a route. */
836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
839 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
842 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
844 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
847 err = net_xmit_eval(err);
854 * IPv4 request_sock destructor.
856 static void tcp_v4_reqsk_destructor(struct request_sock *req)
858 kfree(inet_rsk(req)->opt);
862 #ifdef CONFIG_TCP_MD5SIG
864 * RFC2385 MD5 checksumming requires a mapping of
865 * IP address->MD5 Key.
866 * We need to maintain these in the sk structure.
869 /* Find the Key structure for an address. */
870 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
871 const union tcp_md5_addr *addr,
874 const struct tcp_sock *tp = tcp_sk(sk);
875 struct tcp_md5sig_key *key;
876 unsigned int size = sizeof(struct in_addr);
877 const struct tcp_md5sig_info *md5sig;
879 /* caller either holds rcu_read_lock() or socket lock */
880 md5sig = rcu_dereference_check(tp->md5sig_info,
881 sock_owned_by_user(sk) ||
882 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
885 #if IS_ENABLED(CONFIG_IPV6)
886 if (family == AF_INET6)
887 size = sizeof(struct in6_addr);
889 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
890 if (key->family != family)
892 if (!memcmp(&key->addr, addr, size))
897 EXPORT_SYMBOL(tcp_md5_do_lookup);
899 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
900 const struct sock *addr_sk)
902 const union tcp_md5_addr *addr;
904 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
905 return tcp_md5_do_lookup(sk, addr, AF_INET);
907 EXPORT_SYMBOL(tcp_v4_md5_lookup);
909 /* This can be called on a newly created socket, from other files */
910 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
911 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
913 /* Add Key to the list */
914 struct tcp_md5sig_key *key;
915 struct tcp_sock *tp = tcp_sk(sk);
916 struct tcp_md5sig_info *md5sig;
918 key = tcp_md5_do_lookup(sk, addr, family);
920 /* Pre-existing entry - just update that one. */
921 memcpy(key->key, newkey, newkeylen);
922 key->keylen = newkeylen;
926 md5sig = rcu_dereference_protected(tp->md5sig_info,
927 sock_owned_by_user(sk) ||
928 lockdep_is_held(&sk->sk_lock.slock));
930 md5sig = kmalloc(sizeof(*md5sig), gfp);
934 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
935 INIT_HLIST_HEAD(&md5sig->head);
936 rcu_assign_pointer(tp->md5sig_info, md5sig);
939 key = sock_kmalloc(sk, sizeof(*key), gfp);
942 if (!tcp_alloc_md5sig_pool()) {
943 sock_kfree_s(sk, key, sizeof(*key));
947 memcpy(key->key, newkey, newkeylen);
948 key->keylen = newkeylen;
949 key->family = family;
950 memcpy(&key->addr, addr,
951 (family == AF_INET6) ? sizeof(struct in6_addr) :
952 sizeof(struct in_addr));
953 hlist_add_head_rcu(&key->node, &md5sig->head);
956 EXPORT_SYMBOL(tcp_md5_do_add);
958 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
960 struct tcp_md5sig_key *key;
962 key = tcp_md5_do_lookup(sk, addr, family);
965 hlist_del_rcu(&key->node);
966 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
970 EXPORT_SYMBOL(tcp_md5_do_del);
972 static void tcp_clear_md5_list(struct sock *sk)
974 struct tcp_sock *tp = tcp_sk(sk);
975 struct tcp_md5sig_key *key;
976 struct hlist_node *n;
977 struct tcp_md5sig_info *md5sig;
979 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
981 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
982 hlist_del_rcu(&key->node);
983 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
988 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
991 struct tcp_md5sig cmd;
992 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
994 if (optlen < sizeof(cmd))
997 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1000 if (sin->sin_family != AF_INET)
1003 if (!cmd.tcpm_keylen)
1004 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1010 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1011 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1015 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1016 __be32 daddr, __be32 saddr, int nbytes)
1018 struct tcp4_pseudohdr *bp;
1019 struct scatterlist sg;
1021 bp = &hp->md5_blk.ip4;
1024 * 1. the TCP pseudo-header (in the order: source IP address,
1025 * destination IP address, zero-padded protocol number, and
1031 bp->protocol = IPPROTO_TCP;
1032 bp->len = cpu_to_be16(nbytes);
1034 sg_init_one(&sg, bp, sizeof(*bp));
1035 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1038 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1039 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1041 struct tcp_md5sig_pool *hp;
1042 struct hash_desc *desc;
1044 hp = tcp_get_md5sig_pool();
1046 goto clear_hash_noput;
1047 desc = &hp->md5_desc;
1049 if (crypto_hash_init(desc))
1051 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1053 if (tcp_md5_hash_header(hp, th))
1055 if (tcp_md5_hash_key(hp, key))
1057 if (crypto_hash_final(desc, md5_hash))
1060 tcp_put_md5sig_pool();
1064 tcp_put_md5sig_pool();
1066 memset(md5_hash, 0, 16);
1070 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1071 const struct sock *sk,
1072 const struct sk_buff *skb)
1074 struct tcp_md5sig_pool *hp;
1075 struct hash_desc *desc;
1076 const struct tcphdr *th = tcp_hdr(skb);
1077 __be32 saddr, daddr;
1079 if (sk) { /* valid for establish/request sockets */
1080 saddr = sk->sk_rcv_saddr;
1081 daddr = sk->sk_daddr;
1083 const struct iphdr *iph = ip_hdr(skb);
1088 hp = tcp_get_md5sig_pool();
1090 goto clear_hash_noput;
1091 desc = &hp->md5_desc;
1093 if (crypto_hash_init(desc))
1096 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1098 if (tcp_md5_hash_header(hp, th))
1100 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1102 if (tcp_md5_hash_key(hp, key))
1104 if (crypto_hash_final(desc, md5_hash))
1107 tcp_put_md5sig_pool();
1111 tcp_put_md5sig_pool();
1113 memset(md5_hash, 0, 16);
1116 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1120 /* Called with rcu_read_lock() */
1121 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1122 const struct sk_buff *skb)
1124 #ifdef CONFIG_TCP_MD5SIG
1126 * This gets called for each TCP segment that arrives
1127 * so we want to be efficient.
1128 * We have 3 drop cases:
1129 * o No MD5 hash and one expected.
1130 * o MD5 hash and we're not expecting one.
1131 * o MD5 hash and its wrong.
1133 const __u8 *hash_location = NULL;
1134 struct tcp_md5sig_key *hash_expected;
1135 const struct iphdr *iph = ip_hdr(skb);
1136 const struct tcphdr *th = tcp_hdr(skb);
1138 unsigned char newhash[16];
1140 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1142 hash_location = tcp_parse_md5sig_option(th);
1144 /* We've parsed the options - do we have a hash? */
1145 if (!hash_expected && !hash_location)
1148 if (hash_expected && !hash_location) {
1149 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1153 if (!hash_expected && hash_location) {
1154 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1158 /* Okay, so this is hash_expected and hash_location -
1159 * so we need to calculate the checksum.
1161 genhash = tcp_v4_md5_hash_skb(newhash,
1165 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1166 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1167 &iph->saddr, ntohs(th->source),
1168 &iph->daddr, ntohs(th->dest),
1169 genhash ? " tcp_v4_calc_md5_hash failed"
1178 static void tcp_v4_init_req(struct request_sock *req,
1179 const struct sock *sk_listener,
1180 struct sk_buff *skb)
1182 struct inet_request_sock *ireq = inet_rsk(req);
1184 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1185 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1186 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1187 ireq->opt = tcp_v4_save_options(skb);
1190 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1192 const struct request_sock *req,
1195 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1198 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1207 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1209 .obj_size = sizeof(struct tcp_request_sock),
1210 .rtx_syn_ack = tcp_rtx_synack,
1211 .send_ack = tcp_v4_reqsk_send_ack,
1212 .destructor = tcp_v4_reqsk_destructor,
1213 .send_reset = tcp_v4_send_reset,
1214 .syn_ack_timeout = tcp_syn_ack_timeout,
1217 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1218 .mss_clamp = TCP_MSS_DEFAULT,
1219 #ifdef CONFIG_TCP_MD5SIG
1220 .req_md5_lookup = tcp_v4_md5_lookup,
1221 .calc_md5_hash = tcp_v4_md5_hash_skb,
1223 .init_req = tcp_v4_init_req,
1224 #ifdef CONFIG_SYN_COOKIES
1225 .cookie_init_seq = cookie_v4_init_sequence,
1227 .route_req = tcp_v4_route_req,
1228 .init_seq = tcp_v4_init_sequence,
1229 .send_synack = tcp_v4_send_synack,
1232 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1234 /* Never answer to SYNs send to broadcast or multicast */
1235 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1238 return tcp_conn_request(&tcp_request_sock_ops,
1239 &tcp_request_sock_ipv4_ops, sk, skb);
1242 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1245 EXPORT_SYMBOL(tcp_v4_conn_request);
1249 * The three way handshake has completed - we got a valid synack -
1250 * now create the new socket.
1252 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1253 struct request_sock *req,
1254 struct dst_entry *dst,
1255 struct request_sock *req_unhash,
1258 struct inet_request_sock *ireq;
1259 struct inet_sock *newinet;
1260 struct tcp_sock *newtp;
1262 #ifdef CONFIG_TCP_MD5SIG
1263 struct tcp_md5sig_key *key;
1265 struct ip_options_rcu *inet_opt;
1267 if (sk_acceptq_is_full(sk))
1270 newsk = tcp_create_openreq_child(sk, req, skb);
1274 newsk->sk_gso_type = SKB_GSO_TCPV4;
1275 inet_sk_rx_dst_set(newsk, skb);
1277 newtp = tcp_sk(newsk);
1278 newinet = inet_sk(newsk);
1279 ireq = inet_rsk(req);
1280 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1281 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1282 newsk->sk_bound_dev_if = ireq->ir_iif;
1283 newinet->inet_saddr = ireq->ir_loc_addr;
1284 inet_opt = ireq->opt;
1285 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1287 newinet->mc_index = inet_iif(skb);
1288 newinet->mc_ttl = ip_hdr(skb)->ttl;
1289 newinet->rcv_tos = ip_hdr(skb)->tos;
1290 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1292 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1293 newinet->inet_id = newtp->write_seq ^ jiffies;
1296 dst = inet_csk_route_child_sock(sk, newsk, req);
1300 /* syncookie case : see end of cookie_v4_check() */
1302 sk_setup_caps(newsk, dst);
1304 tcp_ca_openreq_child(newsk, dst);
1306 tcp_sync_mss(newsk, dst_mtu(dst));
1307 newtp->advmss = dst_metric_advmss(dst);
1308 if (tcp_sk(sk)->rx_opt.user_mss &&
1309 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1310 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1312 tcp_initialize_rcv_mss(newsk);
1314 #ifdef CONFIG_TCP_MD5SIG
1315 /* Copy over the MD5 key from the original socket */
1316 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1320 * We're using one, so create a matching key
1321 * on the newsk structure. If we fail to get
1322 * memory, then we end up not copying the key
1325 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1326 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1327 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1331 if (__inet_inherit_port(sk, newsk) < 0)
1333 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1335 tcp_move_syn(newtp, req);
1340 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1344 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1347 inet_csk_prepare_forced_close(newsk);
1351 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1353 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1355 #ifdef CONFIG_SYN_COOKIES
1356 const struct tcphdr *th = tcp_hdr(skb);
1359 sk = cookie_v4_check(sk, skb);
1364 /* The socket must have it's spinlock held when we get
1365 * here, unless it is a TCP_LISTEN socket.
1367 * We have a potential double-lock case here, so even when
1368 * doing backlog processing we use the BH locking scheme.
1369 * This is because we cannot sleep with the original spinlock
1372 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1376 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1377 struct dst_entry *dst = sk->sk_rx_dst;
1379 sock_rps_save_rxhash(sk, skb);
1380 sk_mark_napi_id(sk, skb);
1382 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1383 !dst->ops->check(dst, 0)) {
1385 sk->sk_rx_dst = NULL;
1388 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1392 if (tcp_checksum_complete(skb))
1395 if (sk->sk_state == TCP_LISTEN) {
1396 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1401 sock_rps_save_rxhash(nsk, skb);
1402 sk_mark_napi_id(nsk, skb);
1403 if (tcp_child_process(sk, nsk, skb)) {
1410 sock_rps_save_rxhash(sk, skb);
1412 if (tcp_rcv_state_process(sk, skb)) {
1419 tcp_v4_send_reset(rsk, skb);
1422 /* Be careful here. If this function gets more complicated and
1423 * gcc suffers from register pressure on the x86, sk (in %ebx)
1424 * might be destroyed here. This current version compiles correctly,
1425 * but you have been warned.
1430 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1431 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1434 EXPORT_SYMBOL(tcp_v4_do_rcv);
1436 void tcp_v4_early_demux(struct sk_buff *skb)
1438 const struct iphdr *iph;
1439 const struct tcphdr *th;
1442 if (skb->pkt_type != PACKET_HOST)
1445 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1451 if (th->doff < sizeof(struct tcphdr) / 4)
1454 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1455 iph->saddr, th->source,
1456 iph->daddr, ntohs(th->dest),
1460 skb->destructor = sock_edemux;
1461 if (sk_fullsock(sk)) {
1462 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1465 dst = dst_check(dst, 0);
1467 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1468 skb_dst_set_noref(skb, dst);
1473 /* Packet is added to VJ-style prequeue for processing in process
1474 * context, if a reader task is waiting. Apparently, this exciting
1475 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1476 * failed somewhere. Latency? Burstiness? Well, at least now we will
1477 * see, why it failed. 8)8) --ANK
1480 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1482 struct tcp_sock *tp = tcp_sk(sk);
1484 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1487 if (skb->len <= tcp_hdrlen(skb) &&
1488 skb_queue_len(&tp->ucopy.prequeue) == 0)
1491 /* Before escaping RCU protected region, we need to take care of skb
1492 * dst. Prequeue is only enabled for established sockets.
1493 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1494 * Instead of doing full sk_rx_dst validity here, let's perform
1495 * an optimistic check.
1497 if (likely(sk->sk_rx_dst))
1500 skb_dst_force_safe(skb);
1502 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1503 tp->ucopy.memory += skb->truesize;
1504 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1505 struct sk_buff *skb1;
1507 BUG_ON(sock_owned_by_user(sk));
1509 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1510 sk_backlog_rcv(sk, skb1);
1511 NET_INC_STATS_BH(sock_net(sk),
1512 LINUX_MIB_TCPPREQUEUEDROPPED);
1515 tp->ucopy.memory = 0;
1516 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1517 wake_up_interruptible_sync_poll(sk_sleep(sk),
1518 POLLIN | POLLRDNORM | POLLRDBAND);
1519 if (!inet_csk_ack_scheduled(sk))
1520 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1521 (3 * tcp_rto_min(sk)) / 4,
1526 EXPORT_SYMBOL(tcp_prequeue);
1532 int tcp_v4_rcv(struct sk_buff *skb)
1534 const struct iphdr *iph;
1535 const struct tcphdr *th;
1538 struct net *net = dev_net(skb->dev);
1540 if (skb->pkt_type != PACKET_HOST)
1543 /* Count it even if it's bad */
1544 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1546 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1551 if (th->doff < sizeof(struct tcphdr) / 4)
1553 if (!pskb_may_pull(skb, th->doff * 4))
1556 /* An explanation is required here, I think.
1557 * Packet length and doff are validated by header prediction,
1558 * provided case of th->doff==0 is eliminated.
1559 * So, we defer the checks. */
1561 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1566 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1567 * barrier() makes sure compiler wont play fool^Waliasing games.
1569 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1570 sizeof(struct inet_skb_parm));
1573 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1574 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1575 skb->len - th->doff * 4);
1576 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1577 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1578 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1579 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1580 TCP_SKB_CB(skb)->sacked = 0;
1583 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1588 if (sk->sk_state == TCP_TIME_WAIT)
1591 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1592 struct request_sock *req = inet_reqsk(sk);
1593 struct sock *nsk = NULL;
1595 sk = req->rsk_listener;
1596 if (tcp_v4_inbound_md5_hash(sk, skb))
1597 goto discard_and_relse;
1598 if (likely(sk->sk_state == TCP_LISTEN)) {
1599 nsk = tcp_check_req(sk, skb, req, false);
1601 inet_csk_reqsk_queue_drop_and_put(sk, req);
1611 } else if (tcp_child_process(sk, nsk, skb)) {
1612 tcp_v4_send_reset(nsk, skb);
1618 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1619 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1620 goto discard_and_relse;
1623 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1624 goto discard_and_relse;
1626 if (tcp_v4_inbound_md5_hash(sk, skb))
1627 goto discard_and_relse;
1631 if (sk_filter(sk, skb))
1632 goto discard_and_relse;
1636 if (sk->sk_state == TCP_LISTEN) {
1637 ret = tcp_v4_do_rcv(sk, skb);
1638 goto put_and_return;
1641 sk_incoming_cpu_update(sk);
1643 bh_lock_sock_nested(sk);
1644 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1646 if (!sock_owned_by_user(sk)) {
1647 if (!tcp_prequeue(sk, skb))
1648 ret = tcp_v4_do_rcv(sk, skb);
1649 } else if (unlikely(sk_add_backlog(sk, skb,
1650 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1652 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1653 goto discard_and_relse;
1663 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1666 if (tcp_checksum_complete(skb)) {
1668 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1670 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1672 tcp_v4_send_reset(NULL, skb);
1676 /* Discard frame. */
1685 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1686 inet_twsk_put(inet_twsk(sk));
1690 if (tcp_checksum_complete(skb)) {
1691 inet_twsk_put(inet_twsk(sk));
1694 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1696 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1698 iph->saddr, th->source,
1699 iph->daddr, th->dest,
1702 inet_twsk_deschedule_put(inet_twsk(sk));
1706 /* Fall through to ACK */
1709 tcp_v4_timewait_ack(sk, skb);
1712 tcp_v4_send_reset(sk, skb);
1713 inet_twsk_deschedule_put(inet_twsk(sk));
1715 case TCP_TW_SUCCESS:;
1720 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1721 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1722 .twsk_unique = tcp_twsk_unique,
1723 .twsk_destructor= tcp_twsk_destructor,
1726 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1728 struct dst_entry *dst = skb_dst(skb);
1730 if (dst && dst_hold_safe(dst)) {
1731 sk->sk_rx_dst = dst;
1732 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1735 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1737 const struct inet_connection_sock_af_ops ipv4_specific = {
1738 .queue_xmit = ip_queue_xmit,
1739 .send_check = tcp_v4_send_check,
1740 .rebuild_header = inet_sk_rebuild_header,
1741 .sk_rx_dst_set = inet_sk_rx_dst_set,
1742 .conn_request = tcp_v4_conn_request,
1743 .syn_recv_sock = tcp_v4_syn_recv_sock,
1744 .net_header_len = sizeof(struct iphdr),
1745 .setsockopt = ip_setsockopt,
1746 .getsockopt = ip_getsockopt,
1747 .addr2sockaddr = inet_csk_addr2sockaddr,
1748 .sockaddr_len = sizeof(struct sockaddr_in),
1749 .bind_conflict = inet_csk_bind_conflict,
1750 #ifdef CONFIG_COMPAT
1751 .compat_setsockopt = compat_ip_setsockopt,
1752 .compat_getsockopt = compat_ip_getsockopt,
1754 .mtu_reduced = tcp_v4_mtu_reduced,
1756 EXPORT_SYMBOL(ipv4_specific);
1758 #ifdef CONFIG_TCP_MD5SIG
1759 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1760 .md5_lookup = tcp_v4_md5_lookup,
1761 .calc_md5_hash = tcp_v4_md5_hash_skb,
1762 .md5_parse = tcp_v4_parse_md5_keys,
1766 /* NOTE: A lot of things set to zero explicitly by call to
1767 * sk_alloc() so need not be done here.
1769 static int tcp_v4_init_sock(struct sock *sk)
1771 struct inet_connection_sock *icsk = inet_csk(sk);
1775 icsk->icsk_af_ops = &ipv4_specific;
1777 #ifdef CONFIG_TCP_MD5SIG
1778 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1784 void tcp_v4_destroy_sock(struct sock *sk)
1786 struct tcp_sock *tp = tcp_sk(sk);
1788 tcp_clear_xmit_timers(sk);
1790 tcp_cleanup_congestion_control(sk);
1792 /* Cleanup up the write buffer. */
1793 tcp_write_queue_purge(sk);
1795 /* Cleans up our, hopefully empty, out_of_order_queue. */
1796 __skb_queue_purge(&tp->out_of_order_queue);
1798 #ifdef CONFIG_TCP_MD5SIG
1799 /* Clean up the MD5 key list, if any */
1800 if (tp->md5sig_info) {
1801 tcp_clear_md5_list(sk);
1802 kfree_rcu(tp->md5sig_info, rcu);
1803 tp->md5sig_info = NULL;
1807 /* Clean prequeue, it must be empty really */
1808 __skb_queue_purge(&tp->ucopy.prequeue);
1810 /* Clean up a referenced TCP bind bucket. */
1811 if (inet_csk(sk)->icsk_bind_hash)
1814 BUG_ON(tp->fastopen_rsk);
1816 /* If socket is aborted during connect operation */
1817 tcp_free_fastopen_req(tp);
1818 tcp_saved_syn_free(tp);
1820 sk_sockets_allocated_dec(sk);
1822 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1823 sock_release_memcg(sk);
1825 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1827 #ifdef CONFIG_PROC_FS
1828 /* Proc filesystem TCP sock list dumping. */
1831 * Get next listener socket follow cur. If cur is NULL, get first socket
1832 * starting from bucket given in st->bucket; when st->bucket is zero the
1833 * very first socket in the hash table is returned.
1835 static void *listening_get_next(struct seq_file *seq, void *cur)
1837 struct inet_connection_sock *icsk;
1838 struct hlist_nulls_node *node;
1839 struct sock *sk = cur;
1840 struct inet_listen_hashbucket *ilb;
1841 struct tcp_iter_state *st = seq->private;
1842 struct net *net = seq_file_net(seq);
1845 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1846 spin_lock_bh(&ilb->lock);
1847 sk = sk_nulls_head(&ilb->head);
1851 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1855 sk = sk_nulls_next(sk);
1857 sk_nulls_for_each_from(sk, node) {
1858 if (!net_eq(sock_net(sk), net))
1860 if (sk->sk_family == st->family) {
1864 icsk = inet_csk(sk);
1866 spin_unlock_bh(&ilb->lock);
1868 if (++st->bucket < INET_LHTABLE_SIZE) {
1869 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1870 spin_lock_bh(&ilb->lock);
1871 sk = sk_nulls_head(&ilb->head);
1879 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1881 struct tcp_iter_state *st = seq->private;
1886 rc = listening_get_next(seq, NULL);
1888 while (rc && *pos) {
1889 rc = listening_get_next(seq, rc);
1895 static inline bool empty_bucket(const struct tcp_iter_state *st)
1897 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1901 * Get first established socket starting from bucket given in st->bucket.
1902 * If st->bucket is zero, the very first socket in the hash is returned.
1904 static void *established_get_first(struct seq_file *seq)
1906 struct tcp_iter_state *st = seq->private;
1907 struct net *net = seq_file_net(seq);
1911 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1913 struct hlist_nulls_node *node;
1914 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1916 /* Lockless fast path for the common case of empty buckets */
1917 if (empty_bucket(st))
1921 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1922 if (sk->sk_family != st->family ||
1923 !net_eq(sock_net(sk), net)) {
1929 spin_unlock_bh(lock);
1935 static void *established_get_next(struct seq_file *seq, void *cur)
1937 struct sock *sk = cur;
1938 struct hlist_nulls_node *node;
1939 struct tcp_iter_state *st = seq->private;
1940 struct net *net = seq_file_net(seq);
1945 sk = sk_nulls_next(sk);
1947 sk_nulls_for_each_from(sk, node) {
1948 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1952 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1954 return established_get_first(seq);
1957 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1959 struct tcp_iter_state *st = seq->private;
1963 rc = established_get_first(seq);
1966 rc = established_get_next(seq, rc);
1972 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1975 struct tcp_iter_state *st = seq->private;
1977 st->state = TCP_SEQ_STATE_LISTENING;
1978 rc = listening_get_idx(seq, &pos);
1981 st->state = TCP_SEQ_STATE_ESTABLISHED;
1982 rc = established_get_idx(seq, pos);
1988 static void *tcp_seek_last_pos(struct seq_file *seq)
1990 struct tcp_iter_state *st = seq->private;
1991 int offset = st->offset;
1992 int orig_num = st->num;
1995 switch (st->state) {
1996 case TCP_SEQ_STATE_LISTENING:
1997 if (st->bucket >= INET_LHTABLE_SIZE)
1999 st->state = TCP_SEQ_STATE_LISTENING;
2000 rc = listening_get_next(seq, NULL);
2001 while (offset-- && rc)
2002 rc = listening_get_next(seq, rc);
2006 st->state = TCP_SEQ_STATE_ESTABLISHED;
2008 case TCP_SEQ_STATE_ESTABLISHED:
2009 if (st->bucket > tcp_hashinfo.ehash_mask)
2011 rc = established_get_first(seq);
2012 while (offset-- && rc)
2013 rc = established_get_next(seq, rc);
2021 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2023 struct tcp_iter_state *st = seq->private;
2026 if (*pos && *pos == st->last_pos) {
2027 rc = tcp_seek_last_pos(seq);
2032 st->state = TCP_SEQ_STATE_LISTENING;
2036 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2039 st->last_pos = *pos;
2043 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2045 struct tcp_iter_state *st = seq->private;
2048 if (v == SEQ_START_TOKEN) {
2049 rc = tcp_get_idx(seq, 0);
2053 switch (st->state) {
2054 case TCP_SEQ_STATE_LISTENING:
2055 rc = listening_get_next(seq, v);
2057 st->state = TCP_SEQ_STATE_ESTABLISHED;
2060 rc = established_get_first(seq);
2063 case TCP_SEQ_STATE_ESTABLISHED:
2064 rc = established_get_next(seq, v);
2069 st->last_pos = *pos;
2073 static void tcp_seq_stop(struct seq_file *seq, void *v)
2075 struct tcp_iter_state *st = seq->private;
2077 switch (st->state) {
2078 case TCP_SEQ_STATE_LISTENING:
2079 if (v != SEQ_START_TOKEN)
2080 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2082 case TCP_SEQ_STATE_ESTABLISHED:
2084 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2089 int tcp_seq_open(struct inode *inode, struct file *file)
2091 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2092 struct tcp_iter_state *s;
2095 err = seq_open_net(inode, file, &afinfo->seq_ops,
2096 sizeof(struct tcp_iter_state));
2100 s = ((struct seq_file *)file->private_data)->private;
2101 s->family = afinfo->family;
2105 EXPORT_SYMBOL(tcp_seq_open);
2107 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2110 struct proc_dir_entry *p;
2112 afinfo->seq_ops.start = tcp_seq_start;
2113 afinfo->seq_ops.next = tcp_seq_next;
2114 afinfo->seq_ops.stop = tcp_seq_stop;
2116 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2117 afinfo->seq_fops, afinfo);
2122 EXPORT_SYMBOL(tcp_proc_register);
2124 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2126 remove_proc_entry(afinfo->name, net->proc_net);
2128 EXPORT_SYMBOL(tcp_proc_unregister);
2130 static void get_openreq4(const struct request_sock *req,
2131 struct seq_file *f, int i)
2133 const struct inet_request_sock *ireq = inet_rsk(req);
2134 long delta = req->rsk_timer.expires - jiffies;
2136 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2137 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2142 ntohs(ireq->ir_rmt_port),
2144 0, 0, /* could print option size, but that is af dependent. */
2145 1, /* timers active (only the expire timer) */
2146 jiffies_delta_to_clock_t(delta),
2148 from_kuid_munged(seq_user_ns(f),
2149 sock_i_uid(req->rsk_listener)),
2150 0, /* non standard timer */
2151 0, /* open_requests have no inode */
2156 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2159 unsigned long timer_expires;
2160 const struct tcp_sock *tp = tcp_sk(sk);
2161 const struct inet_connection_sock *icsk = inet_csk(sk);
2162 const struct inet_sock *inet = inet_sk(sk);
2163 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2164 __be32 dest = inet->inet_daddr;
2165 __be32 src = inet->inet_rcv_saddr;
2166 __u16 destp = ntohs(inet->inet_dport);
2167 __u16 srcp = ntohs(inet->inet_sport);
2171 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2172 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2173 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2175 timer_expires = icsk->icsk_timeout;
2176 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2178 timer_expires = icsk->icsk_timeout;
2179 } else if (timer_pending(&sk->sk_timer)) {
2181 timer_expires = sk->sk_timer.expires;
2184 timer_expires = jiffies;
2187 state = sk_state_load(sk);
2188 if (state == TCP_LISTEN)
2189 rx_queue = sk->sk_ack_backlog;
2191 /* Because we don't lock the socket,
2192 * we might find a transient negative value.
2194 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2196 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2197 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2198 i, src, srcp, dest, destp, state,
2199 tp->write_seq - tp->snd_una,
2202 jiffies_delta_to_clock_t(timer_expires - jiffies),
2203 icsk->icsk_retransmits,
2204 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2205 icsk->icsk_probes_out,
2207 atomic_read(&sk->sk_refcnt), sk,
2208 jiffies_to_clock_t(icsk->icsk_rto),
2209 jiffies_to_clock_t(icsk->icsk_ack.ato),
2210 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2212 state == TCP_LISTEN ?
2213 fastopenq->max_qlen :
2214 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2217 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2218 struct seq_file *f, int i)
2220 long delta = tw->tw_timer.expires - jiffies;
2224 dest = tw->tw_daddr;
2225 src = tw->tw_rcv_saddr;
2226 destp = ntohs(tw->tw_dport);
2227 srcp = ntohs(tw->tw_sport);
2229 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2230 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2231 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2232 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2233 atomic_read(&tw->tw_refcnt), tw);
2238 static int tcp4_seq_show(struct seq_file *seq, void *v)
2240 struct tcp_iter_state *st;
2241 struct sock *sk = v;
2243 seq_setwidth(seq, TMPSZ - 1);
2244 if (v == SEQ_START_TOKEN) {
2245 seq_puts(seq, " sl local_address rem_address st tx_queue "
2246 "rx_queue tr tm->when retrnsmt uid timeout "
2252 if (sk->sk_state == TCP_TIME_WAIT)
2253 get_timewait4_sock(v, seq, st->num);
2254 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2255 get_openreq4(v, seq, st->num);
2257 get_tcp4_sock(v, seq, st->num);
2263 static const struct file_operations tcp_afinfo_seq_fops = {
2264 .owner = THIS_MODULE,
2265 .open = tcp_seq_open,
2267 .llseek = seq_lseek,
2268 .release = seq_release_net
2271 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2274 .seq_fops = &tcp_afinfo_seq_fops,
2276 .show = tcp4_seq_show,
2280 static int __net_init tcp4_proc_init_net(struct net *net)
2282 return tcp_proc_register(net, &tcp4_seq_afinfo);
2285 static void __net_exit tcp4_proc_exit_net(struct net *net)
2287 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2290 static struct pernet_operations tcp4_net_ops = {
2291 .init = tcp4_proc_init_net,
2292 .exit = tcp4_proc_exit_net,
2295 int __init tcp4_proc_init(void)
2297 return register_pernet_subsys(&tcp4_net_ops);
2300 void tcp4_proc_exit(void)
2302 unregister_pernet_subsys(&tcp4_net_ops);
2304 #endif /* CONFIG_PROC_FS */
2306 struct proto tcp_prot = {
2308 .owner = THIS_MODULE,
2310 .connect = tcp_v4_connect,
2311 .disconnect = tcp_disconnect,
2312 .accept = inet_csk_accept,
2314 .init = tcp_v4_init_sock,
2315 .destroy = tcp_v4_destroy_sock,
2316 .shutdown = tcp_shutdown,
2317 .setsockopt = tcp_setsockopt,
2318 .getsockopt = tcp_getsockopt,
2319 .recvmsg = tcp_recvmsg,
2320 .sendmsg = tcp_sendmsg,
2321 .sendpage = tcp_sendpage,
2322 .backlog_rcv = tcp_v4_do_rcv,
2323 .release_cb = tcp_release_cb,
2325 .unhash = inet_unhash,
2326 .get_port = inet_csk_get_port,
2327 .enter_memory_pressure = tcp_enter_memory_pressure,
2328 .stream_memory_free = tcp_stream_memory_free,
2329 .sockets_allocated = &tcp_sockets_allocated,
2330 .orphan_count = &tcp_orphan_count,
2331 .memory_allocated = &tcp_memory_allocated,
2332 .memory_pressure = &tcp_memory_pressure,
2333 .sysctl_mem = sysctl_tcp_mem,
2334 .sysctl_wmem = sysctl_tcp_wmem,
2335 .sysctl_rmem = sysctl_tcp_rmem,
2336 .max_header = MAX_TCP_HEADER,
2337 .obj_size = sizeof(struct tcp_sock),
2338 .slab_flags = SLAB_DESTROY_BY_RCU,
2339 .twsk_prot = &tcp_timewait_sock_ops,
2340 .rsk_prot = &tcp_request_sock_ops,
2341 .h.hashinfo = &tcp_hashinfo,
2342 .no_autobind = true,
2343 #ifdef CONFIG_COMPAT
2344 .compat_setsockopt = compat_tcp_setsockopt,
2345 .compat_getsockopt = compat_tcp_getsockopt,
2347 .diag_destroy = tcp_abort,
2349 EXPORT_SYMBOL(tcp_prot);
2351 static void __net_exit tcp_sk_exit(struct net *net)
2355 for_each_possible_cpu(cpu)
2356 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2357 free_percpu(net->ipv4.tcp_sk);
2360 static int __net_init tcp_sk_init(struct net *net)
2364 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2365 if (!net->ipv4.tcp_sk)
2368 for_each_possible_cpu(cpu) {
2371 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2375 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2378 net->ipv4.sysctl_tcp_ecn = 2;
2379 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2381 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2382 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2383 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2385 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2386 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2387 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2396 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2398 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2401 static struct pernet_operations __net_initdata tcp_sk_ops = {
2402 .init = tcp_sk_init,
2403 .exit = tcp_sk_exit,
2404 .exit_batch = tcp_sk_exit_batch,
2407 void __init tcp_v4_init(void)
2409 inet_hashinfo_init(&tcp_hashinfo);
2410 if (register_pernet_subsys(&tcp_sk_ops))
2411 panic("Failed to create the TCP control socket.\n");