2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 orig_sport, orig_dport, sk, true);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
196 if (!inet_opt || !inet_opt->opt.srr)
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
218 inet_csk(sk)->icsk_ext_hdr_len = 0;
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
229 tcp_set_state(sk, TCP_SYN_SENT);
230 err = inet_hash_connect(&tcp_death_row, sk);
234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 inet->inet_sport, inet->inet_dport, sk);
241 /* OK, now commit destination to socket. */
242 sk->sk_gso_type = SKB_GSO_TCPV4;
243 sk_setup_caps(sk, &rt->dst);
245 if (!tp->write_seq && likely(!tp->repair))
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_id = tp->write_seq ^ jiffies;
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
256 err = tcp_repair_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
282 static void tcp_v4_mtu_reduced(struct sock *sk)
284 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk);
286 u32 mtu = tcp_sk(sk)->mtu_info;
288 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 * send out by Linux are always <576bytes so they should go through
292 if (sk->sk_state == TCP_LISTEN)
295 dst = inet_csk_update_pmtu(sk, mtu);
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 tcp_sync_mss(sk, mtu);
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
320 static void do_redirect(struct sk_buff *skb, struct sock *sk)
322 struct dst_entry *dst = __sk_dst_check(sk, 0);
325 dst->ops->redirect(dst, sk, skb);
329 * This routine is called by the ICMP module when it gets some
330 * sort of error condition. If err < 0 then the socket should
331 * be closed and the error returned to the user. If err > 0
332 * it's just the icmp type << 8 | icmp code. After adjustment
333 * header points to the first 8 bytes of the tcp header. We need
334 * to find the appropriate port.
336 * The locking strategy used here is very "optimistic". When
337 * someone else accesses the socket the ICMP is just dropped
338 * and for some paths there is no check at all.
339 * A more general error queue to queue errors for later handling
340 * is probably better.
344 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
346 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
347 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
348 struct inet_connection_sock *icsk;
350 struct inet_sock *inet;
351 const int type = icmp_hdr(icmp_skb)->type;
352 const int code = icmp_hdr(icmp_skb)->code;
355 struct request_sock *req;
359 struct net *net = dev_net(icmp_skb->dev);
361 if (icmp_skb->len < (iph->ihl << 2) + 8) {
362 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
366 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
367 iph->saddr, th->source, inet_iif(icmp_skb));
369 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
372 if (sk->sk_state == TCP_TIME_WAIT) {
373 inet_twsk_put(inet_twsk(sk));
378 /* If too many ICMPs get dropped on busy
379 * servers this needs to be solved differently.
380 * We do take care of PMTU discovery (RFC1191) special case :
381 * we can receive locally generated ICMP messages while socket is held.
383 if (sock_owned_by_user(sk) &&
384 type != ICMP_DEST_UNREACH &&
385 code != ICMP_FRAG_NEEDED)
386 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
388 if (sk->sk_state == TCP_CLOSE)
391 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
392 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
398 req = tp->fastopen_rsk;
399 seq = ntohl(th->seq);
400 if (sk->sk_state != TCP_LISTEN &&
401 !between(seq, tp->snd_una, tp->snd_nxt) &&
402 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
403 /* For a Fast Open socket, allow seq to be snt_isn. */
404 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
410 do_redirect(icmp_skb, sk);
412 case ICMP_SOURCE_QUENCH:
413 /* Just silently ignore these. */
415 case ICMP_PARAMETERPROB:
418 case ICMP_DEST_UNREACH:
419 if (code > NR_ICMP_UNREACH)
422 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
424 if (!sock_owned_by_user(sk)) {
425 tcp_v4_mtu_reduced(sk);
427 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
433 err = icmp_err_convert[code].errno;
434 /* check if icmp_skb allows revert of backoff
435 * (see draft-zimmermann-tcp-lcd) */
436 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
438 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
442 /* XXX (TFO) - revisit the following logic for TFO */
444 if (sock_owned_by_user(sk))
447 icsk->icsk_backoff--;
448 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
449 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
452 skb = tcp_write_queue_head(sk);
455 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
456 tcp_time_stamp - TCP_SKB_CB(skb)->when);
459 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
460 remaining, TCP_RTO_MAX);
462 /* RTO revert clocked out retransmission.
463 * Will retransmit now */
464 tcp_retransmit_timer(sk);
468 case ICMP_TIME_EXCEEDED:
475 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
476 * than following the TCP_SYN_RECV case and closing the socket,
477 * we ignore the ICMP error and keep trying like a fully established
478 * socket. Is this the right thing to do?
480 if (req && req->sk == NULL)
483 switch (sk->sk_state) {
484 struct request_sock *req, **prev;
486 if (sock_owned_by_user(sk))
489 req = inet_csk_search_req(sk, &prev, th->dest,
490 iph->daddr, iph->saddr);
494 /* ICMPs are not backlogged, hence we cannot get
495 an established socket here.
499 if (seq != tcp_rsk(req)->snt_isn) {
500 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
505 * Still in SYN_RECV, just remove it silently.
506 * There is no good way to pass the error to the newly
507 * created socket, and POSIX does not want network
508 * errors returned from accept().
510 inet_csk_reqsk_queue_drop(sk, req, prev);
514 case TCP_SYN_RECV: /* Cannot happen.
515 It can f.e. if SYNs crossed,
518 if (!sock_owned_by_user(sk)) {
521 sk->sk_error_report(sk);
525 sk->sk_err_soft = err;
530 /* If we've already connected we will keep trying
531 * until we time out, or the user gives up.
533 * rfc1122 4.2.3.9 allows to consider as hard errors
534 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
535 * but it is obsoleted by pmtu discovery).
537 * Note, that in modern internet, where routing is unreliable
538 * and in each dark corner broken firewalls sit, sending random
539 * errors ordered by their masters even this two messages finally lose
540 * their original sense (even Linux sends invalid PORT_UNREACHs)
542 * Now we are in compliance with RFCs.
547 if (!sock_owned_by_user(sk) && inet->recverr) {
549 sk->sk_error_report(sk);
550 } else { /* Only an error on timeout */
551 sk->sk_err_soft = err;
559 static void __tcp_v4_send_check(struct sk_buff *skb,
560 __be32 saddr, __be32 daddr)
562 struct tcphdr *th = tcp_hdr(skb);
564 if (skb->ip_summed == CHECKSUM_PARTIAL) {
565 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
566 skb->csum_start = skb_transport_header(skb) - skb->head;
567 skb->csum_offset = offsetof(struct tcphdr, check);
569 th->check = tcp_v4_check(skb->len, saddr, daddr,
576 /* This routine computes an IPv4 TCP checksum. */
577 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
579 const struct inet_sock *inet = inet_sk(sk);
581 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
583 EXPORT_SYMBOL(tcp_v4_send_check);
585 int tcp_v4_gso_send_check(struct sk_buff *skb)
587 const struct iphdr *iph;
590 if (!pskb_may_pull(skb, sizeof(*th)))
597 skb->ip_summed = CHECKSUM_PARTIAL;
598 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
603 * This routine will send an RST to the other tcp.
605 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
607 * Answer: if a packet caused RST, it is not for a socket
608 * existing in our system, if it is matched to a socket,
609 * it is just duplicate segment or bug in other side's TCP.
610 * So that we build reply only basing on parameters
611 * arrived with segment.
612 * Exception: precedence violation. We do not implement it in any case.
615 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
617 const struct tcphdr *th = tcp_hdr(skb);
620 #ifdef CONFIG_TCP_MD5SIG
621 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
624 struct ip_reply_arg arg;
625 #ifdef CONFIG_TCP_MD5SIG
626 struct tcp_md5sig_key *key;
627 const __u8 *hash_location = NULL;
628 unsigned char newhash[16];
630 struct sock *sk1 = NULL;
634 /* Never send a reset in response to a reset. */
638 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
641 /* Swap the send and the receive. */
642 memset(&rep, 0, sizeof(rep));
643 rep.th.dest = th->source;
644 rep.th.source = th->dest;
645 rep.th.doff = sizeof(struct tcphdr) / 4;
649 rep.th.seq = th->ack_seq;
652 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
653 skb->len - (th->doff << 2));
656 memset(&arg, 0, sizeof(arg));
657 arg.iov[0].iov_base = (unsigned char *)&rep;
658 arg.iov[0].iov_len = sizeof(rep.th);
660 #ifdef CONFIG_TCP_MD5SIG
661 hash_location = tcp_parse_md5sig_option(th);
662 if (!sk && hash_location) {
664 * active side is lost. Try to find listening socket through
665 * source port, and then find md5 key through listening socket.
666 * we are not loose security here:
667 * Incoming packet is checked with md5 hash with finding key,
668 * no RST generated if md5 hash doesn't match.
670 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
671 &tcp_hashinfo, ip_hdr(skb)->daddr,
672 ntohs(th->source), inet_iif(skb));
673 /* don't send rst if it can't find key */
677 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
678 &ip_hdr(skb)->saddr, AF_INET);
682 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
683 if (genhash || memcmp(hash_location, newhash, 16) != 0)
686 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
692 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
694 (TCPOPT_MD5SIG << 8) |
696 /* Update length and the length the header thinks exists */
697 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
698 rep.th.doff = arg.iov[0].iov_len / 4;
700 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
701 key, ip_hdr(skb)->saddr,
702 ip_hdr(skb)->daddr, &rep.th);
705 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
706 ip_hdr(skb)->saddr, /* XXX */
707 arg.iov[0].iov_len, IPPROTO_TCP, 0);
708 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
709 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
710 /* When socket is gone, all binding information is lost.
711 * routing might fail in this case. using iif for oif to
712 * make sure we can deliver it
714 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
716 net = dev_net(skb_dst(skb)->dev);
717 arg.tos = ip_hdr(skb)->tos;
718 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
719 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
721 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
722 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
724 #ifdef CONFIG_TCP_MD5SIG
733 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
734 outside socket context is ugly, certainly. What can I do?
737 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
738 u32 win, u32 ts, int oif,
739 struct tcp_md5sig_key *key,
740 int reply_flags, u8 tos)
742 const struct tcphdr *th = tcp_hdr(skb);
745 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
746 #ifdef CONFIG_TCP_MD5SIG
747 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
751 struct ip_reply_arg arg;
752 struct net *net = dev_net(skb_dst(skb)->dev);
754 memset(&rep.th, 0, sizeof(struct tcphdr));
755 memset(&arg, 0, sizeof(arg));
757 arg.iov[0].iov_base = (unsigned char *)&rep;
758 arg.iov[0].iov_len = sizeof(rep.th);
760 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
761 (TCPOPT_TIMESTAMP << 8) |
763 rep.opt[1] = htonl(tcp_time_stamp);
764 rep.opt[2] = htonl(ts);
765 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
768 /* Swap the send and the receive. */
769 rep.th.dest = th->source;
770 rep.th.source = th->dest;
771 rep.th.doff = arg.iov[0].iov_len / 4;
772 rep.th.seq = htonl(seq);
773 rep.th.ack_seq = htonl(ack);
775 rep.th.window = htons(win);
777 #ifdef CONFIG_TCP_MD5SIG
779 int offset = (ts) ? 3 : 0;
781 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
783 (TCPOPT_MD5SIG << 8) |
785 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
786 rep.th.doff = arg.iov[0].iov_len/4;
788 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
789 key, ip_hdr(skb)->saddr,
790 ip_hdr(skb)->daddr, &rep.th);
793 arg.flags = reply_flags;
794 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
795 ip_hdr(skb)->saddr, /* XXX */
796 arg.iov[0].iov_len, IPPROTO_TCP, 0);
797 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
799 arg.bound_dev_if = oif;
801 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
802 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
804 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
807 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809 struct inet_timewait_sock *tw = inet_twsk(sk);
810 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
812 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
813 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 tcp_twsk_md5_key(tcptw),
817 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
824 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
825 struct request_sock *req)
827 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
828 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
830 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
831 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
832 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
835 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
837 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
842 * Send a SYN-ACK after having received a SYN.
843 * This still operates on a request_sock only, not on a big
846 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
847 struct request_sock *req,
848 struct request_values *rvp,
852 const struct inet_request_sock *ireq = inet_rsk(req);
855 struct sk_buff * skb;
857 /* First, grab a route. */
858 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
861 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
864 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
866 skb_set_queue_mapping(skb, queue_mapping);
867 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
870 err = net_xmit_eval(err);
871 if (!tcp_rsk(req)->snt_synack && !err)
872 tcp_rsk(req)->snt_synack = tcp_time_stamp;
878 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
879 struct request_values *rvp)
881 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
882 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
886 * IPv4 request_sock destructor.
888 static void tcp_v4_reqsk_destructor(struct request_sock *req)
890 kfree(inet_rsk(req)->opt);
894 * Return true if a syncookie should be sent
896 bool tcp_syn_flood_action(struct sock *sk,
897 const struct sk_buff *skb,
900 const char *msg = "Dropping request";
901 bool want_cookie = false;
902 struct listen_sock *lopt;
906 #ifdef CONFIG_SYN_COOKIES
907 if (sysctl_tcp_syncookies) {
908 msg = "Sending cookies";
910 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
913 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
915 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
916 if (!lopt->synflood_warned) {
917 lopt->synflood_warned = 1;
918 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
919 proto, ntohs(tcp_hdr(skb)->dest), msg);
923 EXPORT_SYMBOL(tcp_syn_flood_action);
926 * Save and compile IPv4 options into the request_sock if needed.
928 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
930 const struct ip_options *opt = &(IPCB(skb)->opt);
931 struct ip_options_rcu *dopt = NULL;
933 if (opt && opt->optlen) {
934 int opt_size = sizeof(*dopt) + opt->optlen;
936 dopt = kmalloc(opt_size, GFP_ATOMIC);
938 if (ip_options_echo(&dopt->opt, skb)) {
947 #ifdef CONFIG_TCP_MD5SIG
949 * RFC2385 MD5 checksumming requires a mapping of
950 * IP address->MD5 Key.
951 * We need to maintain these in the sk structure.
954 /* Find the Key structure for an address. */
955 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
956 const union tcp_md5_addr *addr,
959 struct tcp_sock *tp = tcp_sk(sk);
960 struct tcp_md5sig_key *key;
961 struct hlist_node *pos;
962 unsigned int size = sizeof(struct in_addr);
963 struct tcp_md5sig_info *md5sig;
965 /* caller either holds rcu_read_lock() or socket lock */
966 md5sig = rcu_dereference_check(tp->md5sig_info,
967 sock_owned_by_user(sk) ||
968 lockdep_is_held(&sk->sk_lock.slock));
971 #if IS_ENABLED(CONFIG_IPV6)
972 if (family == AF_INET6)
973 size = sizeof(struct in6_addr);
975 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
976 if (key->family != family)
978 if (!memcmp(&key->addr, addr, size))
983 EXPORT_SYMBOL(tcp_md5_do_lookup);
985 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
986 struct sock *addr_sk)
988 union tcp_md5_addr *addr;
990 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
991 return tcp_md5_do_lookup(sk, addr, AF_INET);
993 EXPORT_SYMBOL(tcp_v4_md5_lookup);
995 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
996 struct request_sock *req)
998 union tcp_md5_addr *addr;
1000 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
1001 return tcp_md5_do_lookup(sk, addr, AF_INET);
1004 /* This can be called on a newly created socket, from other files */
1005 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1006 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1008 /* Add Key to the list */
1009 struct tcp_md5sig_key *key;
1010 struct tcp_sock *tp = tcp_sk(sk);
1011 struct tcp_md5sig_info *md5sig;
1013 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1015 /* Pre-existing entry - just update that one. */
1016 memcpy(key->key, newkey, newkeylen);
1017 key->keylen = newkeylen;
1021 md5sig = rcu_dereference_protected(tp->md5sig_info,
1022 sock_owned_by_user(sk));
1024 md5sig = kmalloc(sizeof(*md5sig), gfp);
1028 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1029 INIT_HLIST_HEAD(&md5sig->head);
1030 rcu_assign_pointer(tp->md5sig_info, md5sig);
1033 key = sock_kmalloc(sk, sizeof(*key), gfp);
1036 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1037 sock_kfree_s(sk, key, sizeof(*key));
1041 memcpy(key->key, newkey, newkeylen);
1042 key->keylen = newkeylen;
1043 key->family = family;
1044 memcpy(&key->addr, addr,
1045 (family == AF_INET6) ? sizeof(struct in6_addr) :
1046 sizeof(struct in_addr));
1047 hlist_add_head_rcu(&key->node, &md5sig->head);
1050 EXPORT_SYMBOL(tcp_md5_do_add);
1052 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1054 struct tcp_sock *tp = tcp_sk(sk);
1055 struct tcp_md5sig_key *key;
1056 struct tcp_md5sig_info *md5sig;
1058 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1061 hlist_del_rcu(&key->node);
1062 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1063 kfree_rcu(key, rcu);
1064 md5sig = rcu_dereference_protected(tp->md5sig_info,
1065 sock_owned_by_user(sk));
1066 if (hlist_empty(&md5sig->head))
1067 tcp_free_md5sig_pool();
1070 EXPORT_SYMBOL(tcp_md5_do_del);
1072 void tcp_clear_md5_list(struct sock *sk)
1074 struct tcp_sock *tp = tcp_sk(sk);
1075 struct tcp_md5sig_key *key;
1076 struct hlist_node *pos, *n;
1077 struct tcp_md5sig_info *md5sig;
1079 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1081 if (!hlist_empty(&md5sig->head))
1082 tcp_free_md5sig_pool();
1083 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1084 hlist_del_rcu(&key->node);
1085 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1086 kfree_rcu(key, rcu);
1090 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1093 struct tcp_md5sig cmd;
1094 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1096 if (optlen < sizeof(cmd))
1099 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1102 if (sin->sin_family != AF_INET)
1105 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1106 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1109 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1112 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1113 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1117 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1118 __be32 daddr, __be32 saddr, int nbytes)
1120 struct tcp4_pseudohdr *bp;
1121 struct scatterlist sg;
1123 bp = &hp->md5_blk.ip4;
1126 * 1. the TCP pseudo-header (in the order: source IP address,
1127 * destination IP address, zero-padded protocol number, and
1133 bp->protocol = IPPROTO_TCP;
1134 bp->len = cpu_to_be16(nbytes);
1136 sg_init_one(&sg, bp, sizeof(*bp));
1137 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1140 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1141 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1143 struct tcp_md5sig_pool *hp;
1144 struct hash_desc *desc;
1146 hp = tcp_get_md5sig_pool();
1148 goto clear_hash_noput;
1149 desc = &hp->md5_desc;
1151 if (crypto_hash_init(desc))
1153 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1155 if (tcp_md5_hash_header(hp, th))
1157 if (tcp_md5_hash_key(hp, key))
1159 if (crypto_hash_final(desc, md5_hash))
1162 tcp_put_md5sig_pool();
1166 tcp_put_md5sig_pool();
1168 memset(md5_hash, 0, 16);
1172 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1173 const struct sock *sk, const struct request_sock *req,
1174 const struct sk_buff *skb)
1176 struct tcp_md5sig_pool *hp;
1177 struct hash_desc *desc;
1178 const struct tcphdr *th = tcp_hdr(skb);
1179 __be32 saddr, daddr;
1182 saddr = inet_sk(sk)->inet_saddr;
1183 daddr = inet_sk(sk)->inet_daddr;
1185 saddr = inet_rsk(req)->loc_addr;
1186 daddr = inet_rsk(req)->rmt_addr;
1188 const struct iphdr *iph = ip_hdr(skb);
1193 hp = tcp_get_md5sig_pool();
1195 goto clear_hash_noput;
1196 desc = &hp->md5_desc;
1198 if (crypto_hash_init(desc))
1201 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1203 if (tcp_md5_hash_header(hp, th))
1205 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1207 if (tcp_md5_hash_key(hp, key))
1209 if (crypto_hash_final(desc, md5_hash))
1212 tcp_put_md5sig_pool();
1216 tcp_put_md5sig_pool();
1218 memset(md5_hash, 0, 16);
1221 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1223 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1226 * This gets called for each TCP segment that arrives
1227 * so we want to be efficient.
1228 * We have 3 drop cases:
1229 * o No MD5 hash and one expected.
1230 * o MD5 hash and we're not expecting one.
1231 * o MD5 hash and its wrong.
1233 const __u8 *hash_location = NULL;
1234 struct tcp_md5sig_key *hash_expected;
1235 const struct iphdr *iph = ip_hdr(skb);
1236 const struct tcphdr *th = tcp_hdr(skb);
1238 unsigned char newhash[16];
1240 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1242 hash_location = tcp_parse_md5sig_option(th);
1244 /* We've parsed the options - do we have a hash? */
1245 if (!hash_expected && !hash_location)
1248 if (hash_expected && !hash_location) {
1249 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1253 if (!hash_expected && hash_location) {
1254 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1258 /* Okay, so this is hash_expected and hash_location -
1259 * so we need to calculate the checksum.
1261 genhash = tcp_v4_md5_hash_skb(newhash,
1265 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1266 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1267 &iph->saddr, ntohs(th->source),
1268 &iph->daddr, ntohs(th->dest),
1269 genhash ? " tcp_v4_calc_md5_hash failed"
1278 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1280 .obj_size = sizeof(struct tcp_request_sock),
1281 .rtx_syn_ack = tcp_v4_rtx_synack,
1282 .send_ack = tcp_v4_reqsk_send_ack,
1283 .destructor = tcp_v4_reqsk_destructor,
1284 .send_reset = tcp_v4_send_reset,
1285 .syn_ack_timeout = tcp_syn_ack_timeout,
1288 #ifdef CONFIG_TCP_MD5SIG
1289 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1290 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1291 .calc_md5_hash = tcp_v4_md5_hash_skb,
1295 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1296 struct request_sock *req,
1297 struct tcp_fastopen_cookie *foc,
1298 struct tcp_fastopen_cookie *valid_foc)
1300 bool skip_cookie = false;
1301 struct fastopen_queue *fastopenq;
1303 if (likely(!fastopen_cookie_present(foc))) {
1304 /* See include/net/tcp.h for the meaning of these knobs */
1305 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1306 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1307 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1308 skip_cookie = true; /* no cookie to validate */
1312 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1313 /* A FO option is present; bump the counter. */
1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1316 /* Make sure the listener has enabled fastopen, and we don't
1317 * exceed the max # of pending TFO requests allowed before trying
1318 * to validating the cookie in order to avoid burning CPU cycles
1321 * XXX (TFO) - The implication of checking the max_qlen before
1322 * processing a cookie request is that clients can't differentiate
1323 * between qlen overflow causing Fast Open to be disabled
1324 * temporarily vs a server not supporting Fast Open at all.
1326 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1327 fastopenq == NULL || fastopenq->max_qlen == 0)
1330 if (fastopenq->qlen >= fastopenq->max_qlen) {
1331 struct request_sock *req1;
1332 spin_lock(&fastopenq->lock);
1333 req1 = fastopenq->rskq_rst_head;
1334 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1335 spin_unlock(&fastopenq->lock);
1336 NET_INC_STATS_BH(sock_net(sk),
1337 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1338 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1342 fastopenq->rskq_rst_head = req1->dl_next;
1344 spin_unlock(&fastopenq->lock);
1348 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1351 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1352 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1353 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1354 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1355 memcmp(&foc->val[0], &valid_foc->val[0],
1356 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1358 valid_foc->len = -1;
1360 /* Acknowledge the data received from the peer. */
1361 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1363 } else if (foc->len == 0) { /* Client requesting a cookie */
1364 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1365 NET_INC_STATS_BH(sock_net(sk),
1366 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1368 /* Client sent a cookie with wrong size. Treat it
1369 * the same as invalid and return a valid one.
1371 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1376 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1377 struct sk_buff *skb,
1378 struct sk_buff *skb_synack,
1379 struct request_sock *req,
1380 struct request_values *rvp)
1382 struct tcp_sock *tp = tcp_sk(sk);
1383 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1384 const struct inet_request_sock *ireq = inet_rsk(req);
1391 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1392 if (child == NULL) {
1393 NET_INC_STATS_BH(sock_net(sk),
1394 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1395 kfree_skb(skb_synack);
1398 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1399 ireq->rmt_addr, ireq->opt);
1400 err = net_xmit_eval(err);
1402 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1403 /* XXX (TFO) - is it ok to ignore error and continue? */
1405 spin_lock(&queue->fastopenq->lock);
1406 queue->fastopenq->qlen++;
1407 spin_unlock(&queue->fastopenq->lock);
1409 /* Initialize the child socket. Have to fix some values to take
1410 * into account the child is a Fast Open socket and is created
1411 * only out of the bits carried in the SYN packet.
1415 tp->fastopen_rsk = req;
1416 /* Do a hold on the listner sk so that if the listener is being
1417 * closed, the child that has been accepted can live on and still
1418 * access listen_lock.
1421 tcp_rsk(req)->listener = sk;
1423 /* RFC1323: The window in SYN & SYN/ACK segments is never
1424 * scaled. So correct it appropriately.
1426 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1428 /* Activate the retrans timer so that SYNACK can be retransmitted.
1429 * The request socket is not added to the SYN table of the parent
1430 * because it's been added to the accept queue directly.
1432 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1433 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1435 /* Add the child socket directly into the accept queue */
1436 inet_csk_reqsk_queue_add(sk, req, child);
1438 /* Now finish processing the fastopen child socket. */
1439 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1440 tcp_init_congestion_control(child);
1441 tcp_mtup_init(child);
1442 tcp_init_buffer_space(child);
1443 tcp_init_metrics(child);
1445 /* Queue the data carried in the SYN packet. We need to first
1446 * bump skb's refcnt because the caller will attempt to free it.
1448 * XXX (TFO) - we honor a zero-payload TFO request for now.
1449 * (Any reason not to?)
1451 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1452 /* Don't queue the skb if there is no payload in SYN.
1453 * XXX (TFO) - How about SYN+FIN?
1455 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1459 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1460 skb_set_owner_r(skb, child);
1461 __skb_queue_tail(&child->sk_receive_queue, skb);
1462 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1464 sk->sk_data_ready(sk, 0);
1465 bh_unlock_sock(child);
1467 WARN_ON(req->sk == NULL);
1471 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1473 struct tcp_extend_values tmp_ext;
1474 struct tcp_options_received tmp_opt;
1475 const u8 *hash_location;
1476 struct request_sock *req;
1477 struct inet_request_sock *ireq;
1478 struct tcp_sock *tp = tcp_sk(sk);
1479 struct dst_entry *dst = NULL;
1480 __be32 saddr = ip_hdr(skb)->saddr;
1481 __be32 daddr = ip_hdr(skb)->daddr;
1482 __u32 isn = TCP_SKB_CB(skb)->when;
1483 bool want_cookie = false;
1485 struct tcp_fastopen_cookie foc = { .len = -1 };
1486 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1487 struct sk_buff *skb_synack;
1490 /* Never answer to SYNs send to broadcast or multicast */
1491 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1494 /* TW buckets are converted to open requests without
1495 * limitations, they conserve resources and peer is
1496 * evidently real one.
1498 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1499 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1504 /* Accept backlog is full. If we have already queued enough
1505 * of warm entries in syn queue, drop request. It is better than
1506 * clogging syn queue with openreqs with exponentially increasing
1509 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1512 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1516 #ifdef CONFIG_TCP_MD5SIG
1517 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1520 tcp_clear_options(&tmp_opt);
1521 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1522 tmp_opt.user_mss = tp->rx_opt.user_mss;
1523 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1524 want_cookie ? NULL : &foc);
1526 if (tmp_opt.cookie_plus > 0 &&
1527 tmp_opt.saw_tstamp &&
1528 !tp->rx_opt.cookie_out_never &&
1529 (sysctl_tcp_cookie_size > 0 ||
1530 (tp->cookie_values != NULL &&
1531 tp->cookie_values->cookie_desired > 0))) {
1533 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1534 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1536 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1537 goto drop_and_release;
1539 /* Secret recipe starts with IP addresses */
1540 *mess++ ^= (__force u32)daddr;
1541 *mess++ ^= (__force u32)saddr;
1543 /* plus variable length Initiator Cookie */
1546 *c++ ^= *hash_location++;
1548 want_cookie = false; /* not our kind of cookie */
1549 tmp_ext.cookie_out_never = 0; /* false */
1550 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1551 } else if (!tp->rx_opt.cookie_in_always) {
1552 /* redundant indications, but ensure initialization. */
1553 tmp_ext.cookie_out_never = 1; /* true */
1554 tmp_ext.cookie_plus = 0;
1556 goto drop_and_release;
1558 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1560 if (want_cookie && !tmp_opt.saw_tstamp)
1561 tcp_clear_options(&tmp_opt);
1563 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1564 tcp_openreq_init(req, &tmp_opt, skb);
1566 ireq = inet_rsk(req);
1567 ireq->loc_addr = daddr;
1568 ireq->rmt_addr = saddr;
1569 ireq->no_srccheck = inet_sk(sk)->transparent;
1570 ireq->opt = tcp_v4_save_options(skb);
1572 if (security_inet_conn_request(sk, skb, req))
1575 if (!want_cookie || tmp_opt.tstamp_ok)
1576 TCP_ECN_create_request(req, skb);
1579 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1580 req->cookie_ts = tmp_opt.tstamp_ok;
1582 /* VJ's idea. We save last timestamp seen
1583 * from the destination in peer table, when entering
1584 * state TIME-WAIT, and check against it before
1585 * accepting new connection request.
1587 * If "isn" is not zero, this request hit alive
1588 * timewait bucket, so that all the necessary checks
1589 * are made in the function processing timewait state.
1591 if (tmp_opt.saw_tstamp &&
1592 tcp_death_row.sysctl_tw_recycle &&
1593 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1594 fl4.daddr == saddr) {
1595 if (!tcp_peer_is_proven(req, dst, true)) {
1596 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1597 goto drop_and_release;
1600 /* Kill the following clause, if you dislike this way. */
1601 else if (!sysctl_tcp_syncookies &&
1602 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1603 (sysctl_max_syn_backlog >> 2)) &&
1604 !tcp_peer_is_proven(req, dst, false)) {
1605 /* Without syncookies last quarter of
1606 * backlog is filled with destinations,
1607 * proven to be alive.
1608 * It means that we continue to communicate
1609 * to destinations, already remembered
1610 * to the moment of synflood.
1612 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1613 &saddr, ntohs(tcp_hdr(skb)->source));
1614 goto drop_and_release;
1617 isn = tcp_v4_init_sequence(skb);
1619 tcp_rsk(req)->snt_isn = isn;
1622 dst = inet_csk_route_req(sk, &fl4, req);
1626 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1628 /* We don't call tcp_v4_send_synack() directly because we need
1629 * to make sure a child socket can be created successfully before
1630 * sending back synack!
1632 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1633 * (or better yet, call tcp_send_synack() in the child context
1634 * directly, but will have to fix bunch of other code first)
1635 * after syn_recv_sock() except one will need to first fix the
1636 * latter to remove its dependency on the current implementation
1637 * of tcp_v4_send_synack()->tcp_select_initial_window().
1639 skb_synack = tcp_make_synack(sk, dst, req,
1640 (struct request_values *)&tmp_ext,
1641 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1644 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1645 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1649 if (likely(!do_fastopen)) {
1651 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1652 ireq->rmt_addr, ireq->opt);
1653 err = net_xmit_eval(err);
1654 if (err || want_cookie)
1657 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1658 tcp_rsk(req)->listener = NULL;
1659 /* Add the request_sock to the SYN table */
1660 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1661 if (fastopen_cookie_present(&foc) && foc.len != 0)
1662 NET_INC_STATS_BH(sock_net(sk),
1663 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1664 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1665 (struct request_values *)&tmp_ext))
1677 EXPORT_SYMBOL(tcp_v4_conn_request);
1681 * The three way handshake has completed - we got a valid synack -
1682 * now create the new socket.
1684 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1685 struct request_sock *req,
1686 struct dst_entry *dst)
1688 struct inet_request_sock *ireq;
1689 struct inet_sock *newinet;
1690 struct tcp_sock *newtp;
1692 #ifdef CONFIG_TCP_MD5SIG
1693 struct tcp_md5sig_key *key;
1695 struct ip_options_rcu *inet_opt;
1697 if (sk_acceptq_is_full(sk))
1700 newsk = tcp_create_openreq_child(sk, req, skb);
1704 newsk->sk_gso_type = SKB_GSO_TCPV4;
1705 inet_sk_rx_dst_set(newsk, skb);
1707 newtp = tcp_sk(newsk);
1708 newinet = inet_sk(newsk);
1709 ireq = inet_rsk(req);
1710 newinet->inet_daddr = ireq->rmt_addr;
1711 newinet->inet_rcv_saddr = ireq->loc_addr;
1712 newinet->inet_saddr = ireq->loc_addr;
1713 inet_opt = ireq->opt;
1714 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1716 newinet->mc_index = inet_iif(skb);
1717 newinet->mc_ttl = ip_hdr(skb)->ttl;
1718 newinet->rcv_tos = ip_hdr(skb)->tos;
1719 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1721 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1722 newinet->inet_id = newtp->write_seq ^ jiffies;
1725 dst = inet_csk_route_child_sock(sk, newsk, req);
1729 /* syncookie case : see end of cookie_v4_check() */
1731 sk_setup_caps(newsk, dst);
1733 tcp_mtup_init(newsk);
1734 tcp_sync_mss(newsk, dst_mtu(dst));
1735 newtp->advmss = dst_metric_advmss(dst);
1736 if (tcp_sk(sk)->rx_opt.user_mss &&
1737 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1738 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1740 tcp_initialize_rcv_mss(newsk);
1741 tcp_synack_rtt_meas(newsk, req);
1742 newtp->total_retrans = req->retrans;
1744 #ifdef CONFIG_TCP_MD5SIG
1745 /* Copy over the MD5 key from the original socket */
1746 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1750 * We're using one, so create a matching key
1751 * on the newsk structure. If we fail to get
1752 * memory, then we end up not copying the key
1755 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1756 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1757 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1761 if (__inet_inherit_port(sk, newsk) < 0)
1763 __inet_hash_nolisten(newsk, NULL);
1768 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1772 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1775 tcp_clear_xmit_timers(newsk);
1776 tcp_cleanup_congestion_control(newsk);
1777 bh_unlock_sock(newsk);
1781 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1783 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1785 struct tcphdr *th = tcp_hdr(skb);
1786 const struct iphdr *iph = ip_hdr(skb);
1788 struct request_sock **prev;
1789 /* Find possible connection requests. */
1790 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1791 iph->saddr, iph->daddr);
1793 return tcp_check_req(sk, skb, req, prev, false);
1795 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1796 th->source, iph->daddr, th->dest, inet_iif(skb));
1799 if (nsk->sk_state != TCP_TIME_WAIT) {
1803 inet_twsk_put(inet_twsk(nsk));
1807 #ifdef CONFIG_SYN_COOKIES
1809 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1814 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1816 const struct iphdr *iph = ip_hdr(skb);
1818 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1819 if (!tcp_v4_check(skb->len, iph->saddr,
1820 iph->daddr, skb->csum)) {
1821 skb->ip_summed = CHECKSUM_UNNECESSARY;
1826 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1827 skb->len, IPPROTO_TCP, 0);
1829 if (skb->len <= 76) {
1830 return __skb_checksum_complete(skb);
1836 /* The socket must have it's spinlock held when we get
1839 * We have a potential double-lock case here, so even when
1840 * doing backlog processing we use the BH locking scheme.
1841 * This is because we cannot sleep with the original spinlock
1844 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1847 #ifdef CONFIG_TCP_MD5SIG
1849 * We really want to reject the packet as early as possible
1851 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1852 * o There is an MD5 option and we're not expecting one
1854 if (tcp_v4_inbound_md5_hash(sk, skb))
1858 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1859 struct dst_entry *dst = sk->sk_rx_dst;
1861 sock_rps_save_rxhash(sk, skb);
1863 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1864 dst->ops->check(dst, 0) == NULL) {
1866 sk->sk_rx_dst = NULL;
1869 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1876 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1879 if (sk->sk_state == TCP_LISTEN) {
1880 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1885 sock_rps_save_rxhash(nsk, skb);
1886 if (tcp_child_process(sk, nsk, skb)) {
1893 sock_rps_save_rxhash(sk, skb);
1895 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1902 tcp_v4_send_reset(rsk, skb);
1905 /* Be careful here. If this function gets more complicated and
1906 * gcc suffers from register pressure on the x86, sk (in %ebx)
1907 * might be destroyed here. This current version compiles correctly,
1908 * but you have been warned.
1913 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1916 EXPORT_SYMBOL(tcp_v4_do_rcv);
1918 void tcp_v4_early_demux(struct sk_buff *skb)
1920 struct net *net = dev_net(skb->dev);
1921 const struct iphdr *iph;
1922 const struct tcphdr *th;
1925 if (skb->pkt_type != PACKET_HOST)
1928 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1932 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1934 if (th->doff < sizeof(struct tcphdr) / 4)
1937 sk = __inet_lookup_established(net, &tcp_hashinfo,
1938 iph->saddr, th->source,
1939 iph->daddr, ntohs(th->dest),
1943 skb->destructor = sock_edemux;
1944 if (sk->sk_state != TCP_TIME_WAIT) {
1945 struct dst_entry *dst = sk->sk_rx_dst;
1948 dst = dst_check(dst, 0);
1950 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1951 skb_dst_set_noref(skb, dst);
1960 int tcp_v4_rcv(struct sk_buff *skb)
1962 const struct iphdr *iph;
1963 const struct tcphdr *th;
1966 struct net *net = dev_net(skb->dev);
1968 if (skb->pkt_type != PACKET_HOST)
1971 /* Count it even if it's bad */
1972 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1974 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1979 if (th->doff < sizeof(struct tcphdr) / 4)
1981 if (!pskb_may_pull(skb, th->doff * 4))
1984 /* An explanation is required here, I think.
1985 * Packet length and doff are validated by header prediction,
1986 * provided case of th->doff==0 is eliminated.
1987 * So, we defer the checks. */
1988 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1993 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1994 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1995 skb->len - th->doff * 4);
1996 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1997 TCP_SKB_CB(skb)->when = 0;
1998 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1999 TCP_SKB_CB(skb)->sacked = 0;
2001 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2006 if (sk->sk_state == TCP_TIME_WAIT)
2009 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2010 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2011 goto discard_and_relse;
2014 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2015 goto discard_and_relse;
2018 if (sk_filter(sk, skb))
2019 goto discard_and_relse;
2023 bh_lock_sock_nested(sk);
2025 if (!sock_owned_by_user(sk)) {
2026 #ifdef CONFIG_NET_DMA
2027 struct tcp_sock *tp = tcp_sk(sk);
2028 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2029 tp->ucopy.dma_chan = net_dma_find_channel();
2030 if (tp->ucopy.dma_chan)
2031 ret = tcp_v4_do_rcv(sk, skb);
2035 if (!tcp_prequeue(sk, skb))
2036 ret = tcp_v4_do_rcv(sk, skb);
2038 } else if (unlikely(sk_add_backlog(sk, skb,
2039 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2041 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2042 goto discard_and_relse;
2051 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2054 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2056 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2058 tcp_v4_send_reset(NULL, skb);
2062 /* Discard frame. */
2071 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2072 inet_twsk_put(inet_twsk(sk));
2076 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2077 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2078 inet_twsk_put(inet_twsk(sk));
2081 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2083 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2085 iph->daddr, th->dest,
2088 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2089 inet_twsk_put(inet_twsk(sk));
2093 /* Fall through to ACK */
2096 tcp_v4_timewait_ack(sk, skb);
2100 case TCP_TW_SUCCESS:;
2105 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2106 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2107 .twsk_unique = tcp_twsk_unique,
2108 .twsk_destructor= tcp_twsk_destructor,
2111 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2113 struct dst_entry *dst = skb_dst(skb);
2116 sk->sk_rx_dst = dst;
2117 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2119 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2121 const struct inet_connection_sock_af_ops ipv4_specific = {
2122 .queue_xmit = ip_queue_xmit,
2123 .send_check = tcp_v4_send_check,
2124 .rebuild_header = inet_sk_rebuild_header,
2125 .sk_rx_dst_set = inet_sk_rx_dst_set,
2126 .conn_request = tcp_v4_conn_request,
2127 .syn_recv_sock = tcp_v4_syn_recv_sock,
2128 .net_header_len = sizeof(struct iphdr),
2129 .setsockopt = ip_setsockopt,
2130 .getsockopt = ip_getsockopt,
2131 .addr2sockaddr = inet_csk_addr2sockaddr,
2132 .sockaddr_len = sizeof(struct sockaddr_in),
2133 .bind_conflict = inet_csk_bind_conflict,
2134 #ifdef CONFIG_COMPAT
2135 .compat_setsockopt = compat_ip_setsockopt,
2136 .compat_getsockopt = compat_ip_getsockopt,
2139 EXPORT_SYMBOL(ipv4_specific);
2141 #ifdef CONFIG_TCP_MD5SIG
2142 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2143 .md5_lookup = tcp_v4_md5_lookup,
2144 .calc_md5_hash = tcp_v4_md5_hash_skb,
2145 .md5_parse = tcp_v4_parse_md5_keys,
2149 /* NOTE: A lot of things set to zero explicitly by call to
2150 * sk_alloc() so need not be done here.
2152 static int tcp_v4_init_sock(struct sock *sk)
2154 struct inet_connection_sock *icsk = inet_csk(sk);
2158 icsk->icsk_af_ops = &ipv4_specific;
2160 #ifdef CONFIG_TCP_MD5SIG
2161 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2167 void tcp_v4_destroy_sock(struct sock *sk)
2169 struct tcp_sock *tp = tcp_sk(sk);
2171 tcp_clear_xmit_timers(sk);
2173 tcp_cleanup_congestion_control(sk);
2175 /* Cleanup up the write buffer. */
2176 tcp_write_queue_purge(sk);
2178 /* Cleans up our, hopefully empty, out_of_order_queue. */
2179 __skb_queue_purge(&tp->out_of_order_queue);
2181 #ifdef CONFIG_TCP_MD5SIG
2182 /* Clean up the MD5 key list, if any */
2183 if (tp->md5sig_info) {
2184 tcp_clear_md5_list(sk);
2185 kfree_rcu(tp->md5sig_info, rcu);
2186 tp->md5sig_info = NULL;
2190 #ifdef CONFIG_NET_DMA
2191 /* Cleans up our sk_async_wait_queue */
2192 __skb_queue_purge(&sk->sk_async_wait_queue);
2195 /* Clean prequeue, it must be empty really */
2196 __skb_queue_purge(&tp->ucopy.prequeue);
2198 /* Clean up a referenced TCP bind bucket. */
2199 if (inet_csk(sk)->icsk_bind_hash)
2202 /* TCP Cookie Transactions */
2203 if (tp->cookie_values != NULL) {
2204 kref_put(&tp->cookie_values->kref,
2205 tcp_cookie_values_release);
2206 tp->cookie_values = NULL;
2208 BUG_ON(tp->fastopen_rsk != NULL);
2210 /* If socket is aborted during connect operation */
2211 tcp_free_fastopen_req(tp);
2213 sk_sockets_allocated_dec(sk);
2214 sock_release_memcg(sk);
2216 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2218 #ifdef CONFIG_PROC_FS
2219 /* Proc filesystem TCP sock list dumping. */
2221 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2223 return hlist_nulls_empty(head) ? NULL :
2224 list_entry(head->first, struct inet_timewait_sock, tw_node);
2227 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2229 return !is_a_nulls(tw->tw_node.next) ?
2230 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2234 * Get next listener socket follow cur. If cur is NULL, get first socket
2235 * starting from bucket given in st->bucket; when st->bucket is zero the
2236 * very first socket in the hash table is returned.
2238 static void *listening_get_next(struct seq_file *seq, void *cur)
2240 struct inet_connection_sock *icsk;
2241 struct hlist_nulls_node *node;
2242 struct sock *sk = cur;
2243 struct inet_listen_hashbucket *ilb;
2244 struct tcp_iter_state *st = seq->private;
2245 struct net *net = seq_file_net(seq);
2248 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2249 spin_lock_bh(&ilb->lock);
2250 sk = sk_nulls_head(&ilb->head);
2254 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2258 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2259 struct request_sock *req = cur;
2261 icsk = inet_csk(st->syn_wait_sk);
2265 if (req->rsk_ops->family == st->family) {
2271 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2274 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2276 sk = sk_nulls_next(st->syn_wait_sk);
2277 st->state = TCP_SEQ_STATE_LISTENING;
2278 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2280 icsk = inet_csk(sk);
2281 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2282 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2284 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2285 sk = sk_nulls_next(sk);
2288 sk_nulls_for_each_from(sk, node) {
2289 if (!net_eq(sock_net(sk), net))
2291 if (sk->sk_family == st->family) {
2295 icsk = inet_csk(sk);
2296 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2297 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2299 st->uid = sock_i_uid(sk);
2300 st->syn_wait_sk = sk;
2301 st->state = TCP_SEQ_STATE_OPENREQ;
2305 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2307 spin_unlock_bh(&ilb->lock);
2309 if (++st->bucket < INET_LHTABLE_SIZE) {
2310 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2311 spin_lock_bh(&ilb->lock);
2312 sk = sk_nulls_head(&ilb->head);
2320 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2322 struct tcp_iter_state *st = seq->private;
2327 rc = listening_get_next(seq, NULL);
2329 while (rc && *pos) {
2330 rc = listening_get_next(seq, rc);
2336 static inline bool empty_bucket(struct tcp_iter_state *st)
2338 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2339 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2343 * Get first established socket starting from bucket given in st->bucket.
2344 * If st->bucket is zero, the very first socket in the hash is returned.
2346 static void *established_get_first(struct seq_file *seq)
2348 struct tcp_iter_state *st = seq->private;
2349 struct net *net = seq_file_net(seq);
2353 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2355 struct hlist_nulls_node *node;
2356 struct inet_timewait_sock *tw;
2357 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2359 /* Lockless fast path for the common case of empty buckets */
2360 if (empty_bucket(st))
2364 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2365 if (sk->sk_family != st->family ||
2366 !net_eq(sock_net(sk), net)) {
2372 st->state = TCP_SEQ_STATE_TIME_WAIT;
2373 inet_twsk_for_each(tw, node,
2374 &tcp_hashinfo.ehash[st->bucket].twchain) {
2375 if (tw->tw_family != st->family ||
2376 !net_eq(twsk_net(tw), net)) {
2382 spin_unlock_bh(lock);
2383 st->state = TCP_SEQ_STATE_ESTABLISHED;
2389 static void *established_get_next(struct seq_file *seq, void *cur)
2391 struct sock *sk = cur;
2392 struct inet_timewait_sock *tw;
2393 struct hlist_nulls_node *node;
2394 struct tcp_iter_state *st = seq->private;
2395 struct net *net = seq_file_net(seq);
2400 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2404 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2411 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2412 st->state = TCP_SEQ_STATE_ESTABLISHED;
2414 /* Look for next non empty bucket */
2416 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2419 if (st->bucket > tcp_hashinfo.ehash_mask)
2422 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2423 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2425 sk = sk_nulls_next(sk);
2427 sk_nulls_for_each_from(sk, node) {
2428 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2432 st->state = TCP_SEQ_STATE_TIME_WAIT;
2433 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2441 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2443 struct tcp_iter_state *st = seq->private;
2447 rc = established_get_first(seq);
2450 rc = established_get_next(seq, rc);
2456 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2459 struct tcp_iter_state *st = seq->private;
2461 st->state = TCP_SEQ_STATE_LISTENING;
2462 rc = listening_get_idx(seq, &pos);
2465 st->state = TCP_SEQ_STATE_ESTABLISHED;
2466 rc = established_get_idx(seq, pos);
2472 static void *tcp_seek_last_pos(struct seq_file *seq)
2474 struct tcp_iter_state *st = seq->private;
2475 int offset = st->offset;
2476 int orig_num = st->num;
2479 switch (st->state) {
2480 case TCP_SEQ_STATE_OPENREQ:
2481 case TCP_SEQ_STATE_LISTENING:
2482 if (st->bucket >= INET_LHTABLE_SIZE)
2484 st->state = TCP_SEQ_STATE_LISTENING;
2485 rc = listening_get_next(seq, NULL);
2486 while (offset-- && rc)
2487 rc = listening_get_next(seq, rc);
2492 case TCP_SEQ_STATE_ESTABLISHED:
2493 case TCP_SEQ_STATE_TIME_WAIT:
2494 st->state = TCP_SEQ_STATE_ESTABLISHED;
2495 if (st->bucket > tcp_hashinfo.ehash_mask)
2497 rc = established_get_first(seq);
2498 while (offset-- && rc)
2499 rc = established_get_next(seq, rc);
2507 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2509 struct tcp_iter_state *st = seq->private;
2512 if (*pos && *pos == st->last_pos) {
2513 rc = tcp_seek_last_pos(seq);
2518 st->state = TCP_SEQ_STATE_LISTENING;
2522 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2525 st->last_pos = *pos;
2529 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2531 struct tcp_iter_state *st = seq->private;
2534 if (v == SEQ_START_TOKEN) {
2535 rc = tcp_get_idx(seq, 0);
2539 switch (st->state) {
2540 case TCP_SEQ_STATE_OPENREQ:
2541 case TCP_SEQ_STATE_LISTENING:
2542 rc = listening_get_next(seq, v);
2544 st->state = TCP_SEQ_STATE_ESTABLISHED;
2547 rc = established_get_first(seq);
2550 case TCP_SEQ_STATE_ESTABLISHED:
2551 case TCP_SEQ_STATE_TIME_WAIT:
2552 rc = established_get_next(seq, v);
2557 st->last_pos = *pos;
2561 static void tcp_seq_stop(struct seq_file *seq, void *v)
2563 struct tcp_iter_state *st = seq->private;
2565 switch (st->state) {
2566 case TCP_SEQ_STATE_OPENREQ:
2568 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2569 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2571 case TCP_SEQ_STATE_LISTENING:
2572 if (v != SEQ_START_TOKEN)
2573 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2575 case TCP_SEQ_STATE_TIME_WAIT:
2576 case TCP_SEQ_STATE_ESTABLISHED:
2578 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2583 int tcp_seq_open(struct inode *inode, struct file *file)
2585 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2586 struct tcp_iter_state *s;
2589 err = seq_open_net(inode, file, &afinfo->seq_ops,
2590 sizeof(struct tcp_iter_state));
2594 s = ((struct seq_file *)file->private_data)->private;
2595 s->family = afinfo->family;
2599 EXPORT_SYMBOL(tcp_seq_open);
2601 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2604 struct proc_dir_entry *p;
2606 afinfo->seq_ops.start = tcp_seq_start;
2607 afinfo->seq_ops.next = tcp_seq_next;
2608 afinfo->seq_ops.stop = tcp_seq_stop;
2610 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2611 afinfo->seq_fops, afinfo);
2616 EXPORT_SYMBOL(tcp_proc_register);
2618 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2620 proc_net_remove(net, afinfo->name);
2622 EXPORT_SYMBOL(tcp_proc_unregister);
2624 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2625 struct seq_file *f, int i, kuid_t uid, int *len)
2627 const struct inet_request_sock *ireq = inet_rsk(req);
2628 long delta = req->expires - jiffies;
2630 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2631 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2634 ntohs(inet_sk(sk)->inet_sport),
2636 ntohs(ireq->rmt_port),
2638 0, 0, /* could print option size, but that is af dependent. */
2639 1, /* timers active (only the expire timer) */
2640 jiffies_delta_to_clock_t(delta),
2642 from_kuid_munged(seq_user_ns(f), uid),
2643 0, /* non standard timer */
2644 0, /* open_requests have no inode */
2645 atomic_read(&sk->sk_refcnt),
2650 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2653 unsigned long timer_expires;
2654 const struct tcp_sock *tp = tcp_sk(sk);
2655 const struct inet_connection_sock *icsk = inet_csk(sk);
2656 const struct inet_sock *inet = inet_sk(sk);
2657 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2658 __be32 dest = inet->inet_daddr;
2659 __be32 src = inet->inet_rcv_saddr;
2660 __u16 destp = ntohs(inet->inet_dport);
2661 __u16 srcp = ntohs(inet->inet_sport);
2664 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2666 timer_expires = icsk->icsk_timeout;
2667 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2669 timer_expires = icsk->icsk_timeout;
2670 } else if (timer_pending(&sk->sk_timer)) {
2672 timer_expires = sk->sk_timer.expires;
2675 timer_expires = jiffies;
2678 if (sk->sk_state == TCP_LISTEN)
2679 rx_queue = sk->sk_ack_backlog;
2682 * because we dont lock socket, we might find a transient negative value
2684 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2686 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2687 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2688 i, src, srcp, dest, destp, sk->sk_state,
2689 tp->write_seq - tp->snd_una,
2692 jiffies_delta_to_clock_t(timer_expires - jiffies),
2693 icsk->icsk_retransmits,
2694 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2695 icsk->icsk_probes_out,
2697 atomic_read(&sk->sk_refcnt), sk,
2698 jiffies_to_clock_t(icsk->icsk_rto),
2699 jiffies_to_clock_t(icsk->icsk_ack.ato),
2700 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2702 sk->sk_state == TCP_LISTEN ?
2703 (fastopenq ? fastopenq->max_qlen : 0) :
2704 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2708 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2709 struct seq_file *f, int i, int *len)
2713 long delta = tw->tw_ttd - jiffies;
2715 dest = tw->tw_daddr;
2716 src = tw->tw_rcv_saddr;
2717 destp = ntohs(tw->tw_dport);
2718 srcp = ntohs(tw->tw_sport);
2720 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2721 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2722 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2723 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2724 atomic_read(&tw->tw_refcnt), tw, len);
2729 static int tcp4_seq_show(struct seq_file *seq, void *v)
2731 struct tcp_iter_state *st;
2734 if (v == SEQ_START_TOKEN) {
2735 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2736 " sl local_address rem_address st tx_queue "
2737 "rx_queue tr tm->when retrnsmt uid timeout "
2743 switch (st->state) {
2744 case TCP_SEQ_STATE_LISTENING:
2745 case TCP_SEQ_STATE_ESTABLISHED:
2746 get_tcp4_sock(v, seq, st->num, &len);
2748 case TCP_SEQ_STATE_OPENREQ:
2749 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2751 case TCP_SEQ_STATE_TIME_WAIT:
2752 get_timewait4_sock(v, seq, st->num, &len);
2755 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2760 static const struct file_operations tcp_afinfo_seq_fops = {
2761 .owner = THIS_MODULE,
2762 .open = tcp_seq_open,
2764 .llseek = seq_lseek,
2765 .release = seq_release_net
2768 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2771 .seq_fops = &tcp_afinfo_seq_fops,
2773 .show = tcp4_seq_show,
2777 static int __net_init tcp4_proc_init_net(struct net *net)
2779 return tcp_proc_register(net, &tcp4_seq_afinfo);
2782 static void __net_exit tcp4_proc_exit_net(struct net *net)
2784 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2787 static struct pernet_operations tcp4_net_ops = {
2788 .init = tcp4_proc_init_net,
2789 .exit = tcp4_proc_exit_net,
2792 int __init tcp4_proc_init(void)
2794 return register_pernet_subsys(&tcp4_net_ops);
2797 void tcp4_proc_exit(void)
2799 unregister_pernet_subsys(&tcp4_net_ops);
2801 #endif /* CONFIG_PROC_FS */
2803 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2805 const struct iphdr *iph = skb_gro_network_header(skb);
2809 switch (skb->ip_summed) {
2810 case CHECKSUM_COMPLETE:
2811 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2813 skb->ip_summed = CHECKSUM_UNNECESSARY;
2817 NAPI_GRO_CB(skb)->flush = 1;
2821 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2822 skb_gro_len(skb), IPPROTO_TCP, 0);
2823 sum = csum_fold(skb_checksum(skb,
2824 skb_gro_offset(skb),
2830 skb->ip_summed = CHECKSUM_UNNECESSARY;
2834 return tcp_gro_receive(head, skb);
2837 int tcp4_gro_complete(struct sk_buff *skb)
2839 const struct iphdr *iph = ip_hdr(skb);
2840 struct tcphdr *th = tcp_hdr(skb);
2842 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2843 iph->saddr, iph->daddr, 0);
2844 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2846 return tcp_gro_complete(skb);
2849 struct proto tcp_prot = {
2851 .owner = THIS_MODULE,
2853 .connect = tcp_v4_connect,
2854 .disconnect = tcp_disconnect,
2855 .accept = inet_csk_accept,
2857 .init = tcp_v4_init_sock,
2858 .destroy = tcp_v4_destroy_sock,
2859 .shutdown = tcp_shutdown,
2860 .setsockopt = tcp_setsockopt,
2861 .getsockopt = tcp_getsockopt,
2862 .recvmsg = tcp_recvmsg,
2863 .sendmsg = tcp_sendmsg,
2864 .sendpage = tcp_sendpage,
2865 .backlog_rcv = tcp_v4_do_rcv,
2866 .release_cb = tcp_release_cb,
2867 .mtu_reduced = tcp_v4_mtu_reduced,
2869 .unhash = inet_unhash,
2870 .get_port = inet_csk_get_port,
2871 .enter_memory_pressure = tcp_enter_memory_pressure,
2872 .sockets_allocated = &tcp_sockets_allocated,
2873 .orphan_count = &tcp_orphan_count,
2874 .memory_allocated = &tcp_memory_allocated,
2875 .memory_pressure = &tcp_memory_pressure,
2876 .sysctl_wmem = sysctl_tcp_wmem,
2877 .sysctl_rmem = sysctl_tcp_rmem,
2878 .max_header = MAX_TCP_HEADER,
2879 .obj_size = sizeof(struct tcp_sock),
2880 .slab_flags = SLAB_DESTROY_BY_RCU,
2881 .twsk_prot = &tcp_timewait_sock_ops,
2882 .rsk_prot = &tcp_request_sock_ops,
2883 .h.hashinfo = &tcp_hashinfo,
2884 .no_autobind = true,
2885 #ifdef CONFIG_COMPAT
2886 .compat_setsockopt = compat_tcp_setsockopt,
2887 .compat_getsockopt = compat_tcp_getsockopt,
2889 #ifdef CONFIG_MEMCG_KMEM
2890 .init_cgroup = tcp_init_cgroup,
2891 .destroy_cgroup = tcp_destroy_cgroup,
2892 .proto_cgroup = tcp_proto_cgroup,
2895 EXPORT_SYMBOL(tcp_prot);
2897 static int __net_init tcp_sk_init(struct net *net)
2902 static void __net_exit tcp_sk_exit(struct net *net)
2906 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2908 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2911 static struct pernet_operations __net_initdata tcp_sk_ops = {
2912 .init = tcp_sk_init,
2913 .exit = tcp_sk_exit,
2914 .exit_batch = tcp_sk_exit_batch,
2917 void __init tcp_v4_init(void)
2919 inet_hashinfo_init(&tcp_hashinfo);
2920 if (register_pernet_subsys(&tcp_sk_ops))
2921 panic("Failed to create the TCP control socket.\n");