2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
75 #include <net/secure_seq.h>
77 #include <linux/inet.h>
78 #include <linux/ipv6.h>
79 #include <linux/stddef.h>
80 #include <linux/proc_fs.h>
81 #include <linux/seq_file.h>
83 #include <linux/crypto.h>
84 #include <linux/scatterlist.h>
86 int sysctl_tcp_tw_reuse __read_mostly;
87 int sysctl_tcp_low_latency __read_mostly;
88 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 #ifdef CONFIG_TCP_MD5SIG
92 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
104 struct inet_hashinfo tcp_hashinfo;
105 EXPORT_SYMBOL(tcp_hashinfo);
107 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
112 tcp_hdr(skb)->source);
115 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
117 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
118 struct tcp_sock *tp = tcp_sk(sk);
120 /* With PAWS, it is safe from the viewpoint
121 of data integrity. Even without PAWS it is safe provided sequence
122 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
124 Actually, the idea is close to VJ's one, only timestamp cache is
125 held not per host, but per port pair and TW bucket is used as state
128 If TW bucket has been already destroyed we fall back to VJ's scheme
129 and use initial timestamp retrieved from peer table.
131 if (tcptw->tw_ts_recent_stamp &&
132 (twp == NULL || (sysctl_tcp_tw_reuse &&
133 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
134 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
135 if (tp->write_seq == 0)
137 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
138 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
145 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
147 /* This will initiate an outgoing connection. */
148 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 struct inet_sock *inet = inet_sk(sk);
152 struct tcp_sock *tp = tcp_sk(sk);
153 __be16 orig_sport, orig_dport;
154 __be32 daddr, nexthop;
158 struct ip_options_rcu *inet_opt;
160 if (addr_len < sizeof(struct sockaddr_in))
163 if (usin->sin_family != AF_INET)
164 return -EAFNOSUPPORT;
166 nexthop = daddr = usin->sin_addr.s_addr;
167 inet_opt = rcu_dereference_protected(inet->inet_opt,
168 sock_owned_by_user(sk));
169 if (inet_opt && inet_opt->opt.srr) {
172 nexthop = inet_opt->opt.faddr;
175 orig_sport = inet->inet_sport;
176 orig_dport = usin->sin_port;
177 fl4 = &inet->cork.fl.u.ip4;
178 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
179 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
181 orig_sport, orig_dport, sk, true);
184 if (err == -ENETUNREACH)
185 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
189 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
194 if (!inet_opt || !inet_opt->opt.srr)
197 if (!inet->inet_saddr)
198 inet->inet_saddr = fl4->saddr;
199 inet->inet_rcv_saddr = inet->inet_saddr;
201 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
202 /* Reset inherited state */
203 tp->rx_opt.ts_recent = 0;
204 tp->rx_opt.ts_recent_stamp = 0;
208 if (tcp_death_row.sysctl_tw_recycle &&
209 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
210 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
212 * VJ's idea. We save last timestamp seen from
213 * the destination in peer table, when entering state
214 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
215 * when trying new connection.
218 inet_peer_refcheck(peer);
219 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
220 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
221 tp->rx_opt.ts_recent = peer->tcp_ts;
226 inet->inet_dport = usin->sin_port;
227 inet->inet_daddr = daddr;
229 inet_csk(sk)->icsk_ext_hdr_len = 0;
231 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
233 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
235 /* Socket identity is still unknown (sport may be zero).
236 * However we set state to SYN-SENT and not releasing socket
237 * lock select source port, enter ourselves into the hash tables and
238 * complete initialization after this.
240 tcp_set_state(sk, TCP_SYN_SENT);
241 err = inet_hash_connect(&tcp_death_row, sk);
245 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
246 inet->inet_sport, inet->inet_dport, sk);
252 /* OK, now commit destination to socket. */
253 sk->sk_gso_type = SKB_GSO_TCPV4;
254 sk_setup_caps(sk, &rt->dst);
257 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
262 inet->inet_id = tp->write_seq ^ jiffies;
264 err = tcp_connect(sk);
273 * This unhashes the socket and releases the local port,
276 tcp_set_state(sk, TCP_CLOSE);
278 sk->sk_route_caps = 0;
279 inet->inet_dport = 0;
282 EXPORT_SYMBOL(tcp_v4_connect);
285 * This routine does path mtu discovery as defined in RFC1191.
287 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
289 struct dst_entry *dst;
290 struct inet_sock *inet = inet_sk(sk);
292 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
293 * send out by Linux are always <576bytes so they should go through
296 if (sk->sk_state == TCP_LISTEN)
299 /* We don't check in the destentry if pmtu discovery is forbidden
300 * on this route. We just assume that no packet_to_big packets
301 * are send back when pmtu discovery is not active.
302 * There is a small race when the user changes this flag in the
303 * route, but I think that's acceptable.
305 if ((dst = __sk_dst_check(sk, 0)) == NULL)
308 dst->ops->update_pmtu(dst, mtu);
310 /* Something is about to be wrong... Remember soft error
311 * for the case, if this connection will not able to recover.
313 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314 sk->sk_err_soft = EMSGSIZE;
318 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
319 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
320 tcp_sync_mss(sk, mtu);
322 /* Resend the TCP packet because it's
323 * clear that the old packet has been
324 * dropped. This is the new "fast" path mtu
327 tcp_simple_retransmit(sk);
328 } /* else let the usual retransmit timer handle it */
332 * This routine is called by the ICMP module when it gets some
333 * sort of error condition. If err < 0 then the socket should
334 * be closed and the error returned to the user. If err > 0
335 * it's just the icmp type << 8 | icmp code. After adjustment
336 * header points to the first 8 bytes of the tcp header. We need
337 * to find the appropriate port.
339 * The locking strategy used here is very "optimistic". When
340 * someone else accesses the socket the ICMP is just dropped
341 * and for some paths there is no check at all.
342 * A more general error queue to queue errors for later handling
343 * is probably better.
347 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
349 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
350 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
351 struct inet_connection_sock *icsk;
353 struct inet_sock *inet;
354 const int type = icmp_hdr(icmp_skb)->type;
355 const int code = icmp_hdr(icmp_skb)->code;
361 struct net *net = dev_net(icmp_skb->dev);
363 if (icmp_skb->len < (iph->ihl << 2) + 8) {
364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
368 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
369 iph->saddr, th->source, inet_iif(icmp_skb));
371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
374 if (sk->sk_state == TCP_TIME_WAIT) {
375 inet_twsk_put(inet_twsk(sk));
380 /* If too many ICMPs get dropped on busy
381 * servers this needs to be solved differently.
383 if (sock_owned_by_user(sk))
384 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
386 if (sk->sk_state == TCP_CLOSE)
389 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
390 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
396 seq = ntohl(th->seq);
397 if (sk->sk_state != TCP_LISTEN &&
398 !between(seq, tp->snd_una, tp->snd_nxt)) {
399 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
404 case ICMP_SOURCE_QUENCH:
405 /* Just silently ignore these. */
407 case ICMP_PARAMETERPROB:
410 case ICMP_DEST_UNREACH:
411 if (code > NR_ICMP_UNREACH)
414 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
415 if (!sock_owned_by_user(sk))
416 do_pmtu_discovery(sk, iph, info);
420 err = icmp_err_convert[code].errno;
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 if (sock_owned_by_user(sk))
432 icsk->icsk_backoff--;
433 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
434 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
437 skb = tcp_write_queue_head(sk);
440 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
441 tcp_time_stamp - TCP_SKB_CB(skb)->when);
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
453 case ICMP_TIME_EXCEEDED:
460 switch (sk->sk_state) {
461 struct request_sock *req, **prev;
463 if (sock_owned_by_user(sk))
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk, req, prev);
491 case TCP_SYN_RECV: /* Cannot happen.
492 It can f.e. if SYNs crossed.
494 if (!sock_owned_by_user(sk)) {
497 sk->sk_error_report(sk);
501 sk->sk_err_soft = err;
506 /* If we've already connected we will keep trying
507 * until we time out, or the user gives up.
509 * rfc1122 4.2.3.9 allows to consider as hard errors
510 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
511 * but it is obsoleted by pmtu discovery).
513 * Note, that in modern internet, where routing is unreliable
514 * and in each dark corner broken firewalls sit, sending random
515 * errors ordered by their masters even this two messages finally lose
516 * their original sense (even Linux sends invalid PORT_UNREACHs)
518 * Now we are in compliance with RFCs.
523 if (!sock_owned_by_user(sk) && inet->recverr) {
525 sk->sk_error_report(sk);
526 } else { /* Only an error on timeout */
527 sk->sk_err_soft = err;
535 static void __tcp_v4_send_check(struct sk_buff *skb,
536 __be32 saddr, __be32 daddr)
538 struct tcphdr *th = tcp_hdr(skb);
540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
541 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
542 skb->csum_start = skb_transport_header(skb) - skb->head;
543 skb->csum_offset = offsetof(struct tcphdr, check);
545 th->check = tcp_v4_check(skb->len, saddr, daddr,
552 /* This routine computes an IPv4 TCP checksum. */
553 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
555 const struct inet_sock *inet = inet_sk(sk);
557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
559 EXPORT_SYMBOL(tcp_v4_send_check);
561 int tcp_v4_gso_send_check(struct sk_buff *skb)
563 const struct iphdr *iph;
566 if (!pskb_may_pull(skb, sizeof(*th)))
573 skb->ip_summed = CHECKSUM_PARTIAL;
574 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
579 * This routine will send an RST to the other tcp.
581 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
583 * Answer: if a packet caused RST, it is not for a socket
584 * existing in our system, if it is matched to a socket,
585 * it is just duplicate segment or bug in other side's TCP.
586 * So that we build reply only basing on parameters
587 * arrived with segment.
588 * Exception: precedence violation. We do not implement it in any case.
591 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
593 const struct tcphdr *th = tcp_hdr(skb);
596 #ifdef CONFIG_TCP_MD5SIG
597 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
600 struct ip_reply_arg arg;
601 #ifdef CONFIG_TCP_MD5SIG
602 struct tcp_md5sig_key *key;
606 /* Never send a reset in response to a reset. */
610 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
613 /* Swap the send and the receive. */
614 memset(&rep, 0, sizeof(rep));
615 rep.th.dest = th->source;
616 rep.th.source = th->dest;
617 rep.th.doff = sizeof(struct tcphdr) / 4;
621 rep.th.seq = th->ack_seq;
624 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
625 skb->len - (th->doff << 2));
628 memset(&arg, 0, sizeof(arg));
629 arg.iov[0].iov_base = (unsigned char *)&rep;
630 arg.iov[0].iov_len = sizeof(rep.th);
632 #ifdef CONFIG_TCP_MD5SIG
633 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
635 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_MD5SIG << 8) |
639 /* Update length and the length the header thinks exists */
640 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
641 rep.th.doff = arg.iov[0].iov_len / 4;
643 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
644 key, ip_hdr(skb)->saddr,
645 ip_hdr(skb)->daddr, &rep.th);
648 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
649 ip_hdr(skb)->saddr, /* XXX */
650 arg.iov[0].iov_len, IPPROTO_TCP, 0);
651 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
653 /* When socket is gone, all binding information is lost.
654 * routing might fail in this case. using iif for oif to
655 * make sure we can deliver it
657 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
659 net = dev_net(skb_dst(skb)->dev);
660 arg.tos = ip_hdr(skb)->tos;
661 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
662 &arg, arg.iov[0].iov_len);
664 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
665 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
668 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
669 outside socket context is ugly, certainly. What can I do?
672 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
673 u32 win, u32 ts, int oif,
674 struct tcp_md5sig_key *key,
675 int reply_flags, u8 tos)
677 const struct tcphdr *th = tcp_hdr(skb);
680 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
681 #ifdef CONFIG_TCP_MD5SIG
682 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
686 struct ip_reply_arg arg;
687 struct net *net = dev_net(skb_dst(skb)->dev);
689 memset(&rep.th, 0, sizeof(struct tcphdr));
690 memset(&arg, 0, sizeof(arg));
692 arg.iov[0].iov_base = (unsigned char *)&rep;
693 arg.iov[0].iov_len = sizeof(rep.th);
695 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
696 (TCPOPT_TIMESTAMP << 8) |
698 rep.opt[1] = htonl(tcp_time_stamp);
699 rep.opt[2] = htonl(ts);
700 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
703 /* Swap the send and the receive. */
704 rep.th.dest = th->source;
705 rep.th.source = th->dest;
706 rep.th.doff = arg.iov[0].iov_len / 4;
707 rep.th.seq = htonl(seq);
708 rep.th.ack_seq = htonl(ack);
710 rep.th.window = htons(win);
712 #ifdef CONFIG_TCP_MD5SIG
714 int offset = (ts) ? 3 : 0;
716 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
718 (TCPOPT_MD5SIG << 8) |
720 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
721 rep.th.doff = arg.iov[0].iov_len/4;
723 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
724 key, ip_hdr(skb)->saddr,
725 ip_hdr(skb)->daddr, &rep.th);
728 arg.flags = reply_flags;
729 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
730 ip_hdr(skb)->saddr, /* XXX */
731 arg.iov[0].iov_len, IPPROTO_TCP, 0);
732 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
734 arg.bound_dev_if = oif;
736 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
737 &arg, arg.iov[0].iov_len);
739 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
742 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
744 struct inet_timewait_sock *tw = inet_twsk(sk);
745 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
747 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
748 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
751 tcp_twsk_md5_key(tcptw),
752 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
759 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
760 struct request_sock *req)
762 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
763 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
766 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
767 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
772 * Send a SYN-ACK after having received a SYN.
773 * This still operates on a request_sock only, not on a big
776 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
777 struct request_sock *req,
778 struct request_values *rvp)
780 const struct inet_request_sock *ireq = inet_rsk(req);
783 struct sk_buff * skb;
785 /* First, grab a route. */
786 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
789 skb = tcp_make_synack(sk, dst, req, rvp);
792 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
794 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
797 err = net_xmit_eval(err);
804 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
805 struct request_values *rvp)
807 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
808 return tcp_v4_send_synack(sk, NULL, req, rvp);
812 * IPv4 request_sock destructor.
814 static void tcp_v4_reqsk_destructor(struct request_sock *req)
816 kfree(inet_rsk(req)->opt);
820 * Return 1 if a syncookie should be sent
822 int tcp_syn_flood_action(struct sock *sk,
823 const struct sk_buff *skb,
826 const char *msg = "Dropping request";
828 struct listen_sock *lopt;
832 #ifdef CONFIG_SYN_COOKIES
833 if (sysctl_tcp_syncookies) {
834 msg = "Sending cookies";
836 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
839 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
841 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
842 if (!lopt->synflood_warned) {
843 lopt->synflood_warned = 1;
844 pr_info("%s: Possible SYN flooding on port %d. %s. "
845 " Check SNMP counters.\n",
846 proto, ntohs(tcp_hdr(skb)->dest), msg);
850 EXPORT_SYMBOL(tcp_syn_flood_action);
853 * Save and compile IPv4 options into the request_sock if needed.
855 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
858 const struct ip_options *opt = &(IPCB(skb)->opt);
859 struct ip_options_rcu *dopt = NULL;
861 if (opt && opt->optlen) {
862 int opt_size = sizeof(*dopt) + opt->optlen;
864 dopt = kmalloc(opt_size, GFP_ATOMIC);
866 if (ip_options_echo(&dopt->opt, skb)) {
875 #ifdef CONFIG_TCP_MD5SIG
877 * RFC2385 MD5 checksumming requires a mapping of
878 * IP address->MD5 Key.
879 * We need to maintain these in the sk structure.
882 /* Find the Key structure for an address. */
883 static struct tcp_md5sig_key *
884 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
886 struct tcp_sock *tp = tcp_sk(sk);
889 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
891 for (i = 0; i < tp->md5sig_info->entries4; i++) {
892 if (tp->md5sig_info->keys4[i].addr == addr)
893 return &tp->md5sig_info->keys4[i].base;
898 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
899 struct sock *addr_sk)
901 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
903 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
906 struct request_sock *req)
908 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
911 /* This can be called on a newly created socket, from other files */
912 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
913 u8 *newkey, u8 newkeylen)
915 /* Add Key to the list */
916 struct tcp_md5sig_key *key;
917 struct tcp_sock *tp = tcp_sk(sk);
918 struct tcp4_md5sig_key *keys;
920 key = tcp_v4_md5_do_lookup(sk, addr);
922 /* Pre-existing entry - just update that one. */
925 key->keylen = newkeylen;
927 struct tcp_md5sig_info *md5sig;
929 if (!tp->md5sig_info) {
930 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
932 if (!tp->md5sig_info) {
936 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
939 md5sig = tp->md5sig_info;
940 if (md5sig->entries4 == 0 &&
941 tcp_alloc_md5sig_pool(sk) == NULL) {
946 if (md5sig->alloced4 == md5sig->entries4) {
947 keys = kmalloc((sizeof(*keys) *
948 (md5sig->entries4 + 1)), GFP_ATOMIC);
951 if (md5sig->entries4 == 0)
952 tcp_free_md5sig_pool();
956 if (md5sig->entries4)
957 memcpy(keys, md5sig->keys4,
958 sizeof(*keys) * md5sig->entries4);
960 /* Free old key list, and reference new one */
961 kfree(md5sig->keys4);
962 md5sig->keys4 = keys;
966 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
967 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
968 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
972 EXPORT_SYMBOL(tcp_v4_md5_do_add);
974 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
975 u8 *newkey, u8 newkeylen)
977 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
981 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
983 struct tcp_sock *tp = tcp_sk(sk);
986 for (i = 0; i < tp->md5sig_info->entries4; i++) {
987 if (tp->md5sig_info->keys4[i].addr == addr) {
989 kfree(tp->md5sig_info->keys4[i].base.key);
990 tp->md5sig_info->entries4--;
992 if (tp->md5sig_info->entries4 == 0) {
993 kfree(tp->md5sig_info->keys4);
994 tp->md5sig_info->keys4 = NULL;
995 tp->md5sig_info->alloced4 = 0;
996 tcp_free_md5sig_pool();
997 } else if (tp->md5sig_info->entries4 != i) {
998 /* Need to do some manipulation */
999 memmove(&tp->md5sig_info->keys4[i],
1000 &tp->md5sig_info->keys4[i+1],
1001 (tp->md5sig_info->entries4 - i) *
1002 sizeof(struct tcp4_md5sig_key));
1009 EXPORT_SYMBOL(tcp_v4_md5_do_del);
1011 static void tcp_v4_clear_md5_list(struct sock *sk)
1013 struct tcp_sock *tp = tcp_sk(sk);
1015 /* Free each key, then the set of key keys,
1016 * the crypto element, and then decrement our
1017 * hold on the last resort crypto.
1019 if (tp->md5sig_info->entries4) {
1021 for (i = 0; i < tp->md5sig_info->entries4; i++)
1022 kfree(tp->md5sig_info->keys4[i].base.key);
1023 tp->md5sig_info->entries4 = 0;
1024 tcp_free_md5sig_pool();
1026 if (tp->md5sig_info->keys4) {
1027 kfree(tp->md5sig_info->keys4);
1028 tp->md5sig_info->keys4 = NULL;
1029 tp->md5sig_info->alloced4 = 0;
1033 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1036 struct tcp_md5sig cmd;
1037 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1040 if (optlen < sizeof(cmd))
1043 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1046 if (sin->sin_family != AF_INET)
1049 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1050 if (!tcp_sk(sk)->md5sig_info)
1052 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1055 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1058 if (!tcp_sk(sk)->md5sig_info) {
1059 struct tcp_sock *tp = tcp_sk(sk);
1060 struct tcp_md5sig_info *p;
1062 p = kzalloc(sizeof(*p), sk->sk_allocation);
1066 tp->md5sig_info = p;
1067 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1070 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1073 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1074 newkey, cmd.tcpm_keylen);
1077 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1078 __be32 daddr, __be32 saddr, int nbytes)
1080 struct tcp4_pseudohdr *bp;
1081 struct scatterlist sg;
1083 bp = &hp->md5_blk.ip4;
1086 * 1. the TCP pseudo-header (in the order: source IP address,
1087 * destination IP address, zero-padded protocol number, and
1093 bp->protocol = IPPROTO_TCP;
1094 bp->len = cpu_to_be16(nbytes);
1096 sg_init_one(&sg, bp, sizeof(*bp));
1097 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1100 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1101 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1103 struct tcp_md5sig_pool *hp;
1104 struct hash_desc *desc;
1106 hp = tcp_get_md5sig_pool();
1108 goto clear_hash_noput;
1109 desc = &hp->md5_desc;
1111 if (crypto_hash_init(desc))
1113 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1115 if (tcp_md5_hash_header(hp, th))
1117 if (tcp_md5_hash_key(hp, key))
1119 if (crypto_hash_final(desc, md5_hash))
1122 tcp_put_md5sig_pool();
1126 tcp_put_md5sig_pool();
1128 memset(md5_hash, 0, 16);
1132 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1133 const struct sock *sk, const struct request_sock *req,
1134 const struct sk_buff *skb)
1136 struct tcp_md5sig_pool *hp;
1137 struct hash_desc *desc;
1138 const struct tcphdr *th = tcp_hdr(skb);
1139 __be32 saddr, daddr;
1142 saddr = inet_sk(sk)->inet_saddr;
1143 daddr = inet_sk(sk)->inet_daddr;
1145 saddr = inet_rsk(req)->loc_addr;
1146 daddr = inet_rsk(req)->rmt_addr;
1148 const struct iphdr *iph = ip_hdr(skb);
1153 hp = tcp_get_md5sig_pool();
1155 goto clear_hash_noput;
1156 desc = &hp->md5_desc;
1158 if (crypto_hash_init(desc))
1161 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1163 if (tcp_md5_hash_header(hp, th))
1165 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1167 if (tcp_md5_hash_key(hp, key))
1169 if (crypto_hash_final(desc, md5_hash))
1172 tcp_put_md5sig_pool();
1176 tcp_put_md5sig_pool();
1178 memset(md5_hash, 0, 16);
1181 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1183 static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1186 * This gets called for each TCP segment that arrives
1187 * so we want to be efficient.
1188 * We have 3 drop cases:
1189 * o No MD5 hash and one expected.
1190 * o MD5 hash and we're not expecting one.
1191 * o MD5 hash and its wrong.
1193 const __u8 *hash_location = NULL;
1194 struct tcp_md5sig_key *hash_expected;
1195 const struct iphdr *iph = ip_hdr(skb);
1196 const struct tcphdr *th = tcp_hdr(skb);
1198 unsigned char newhash[16];
1200 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1201 hash_location = tcp_parse_md5sig_option(th);
1203 /* We've parsed the options - do we have a hash? */
1204 if (!hash_expected && !hash_location)
1207 if (hash_expected && !hash_location) {
1208 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1212 if (!hash_expected && hash_location) {
1213 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1217 /* Okay, so this is hash_expected and hash_location -
1218 * so we need to calculate the checksum.
1220 genhash = tcp_v4_md5_hash_skb(newhash,
1224 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1225 if (net_ratelimit()) {
1226 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1227 &iph->saddr, ntohs(th->source),
1228 &iph->daddr, ntohs(th->dest),
1229 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1238 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1240 .obj_size = sizeof(struct tcp_request_sock),
1241 .rtx_syn_ack = tcp_v4_rtx_synack,
1242 .send_ack = tcp_v4_reqsk_send_ack,
1243 .destructor = tcp_v4_reqsk_destructor,
1244 .send_reset = tcp_v4_send_reset,
1245 .syn_ack_timeout = tcp_syn_ack_timeout,
1248 #ifdef CONFIG_TCP_MD5SIG
1249 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1250 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1251 .calc_md5_hash = tcp_v4_md5_hash_skb,
1255 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1257 struct tcp_extend_values tmp_ext;
1258 struct tcp_options_received tmp_opt;
1259 const u8 *hash_location;
1260 struct request_sock *req;
1261 struct inet_request_sock *ireq;
1262 struct tcp_sock *tp = tcp_sk(sk);
1263 struct dst_entry *dst = NULL;
1264 __be32 saddr = ip_hdr(skb)->saddr;
1265 __be32 daddr = ip_hdr(skb)->daddr;
1266 __u32 isn = TCP_SKB_CB(skb)->when;
1267 int want_cookie = 0;
1269 /* Never answer to SYNs send to broadcast or multicast */
1270 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1273 /* TW buckets are converted to open requests without
1274 * limitations, they conserve resources and peer is
1275 * evidently real one.
1277 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1278 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1283 /* Accept backlog is full. If we have already queued enough
1284 * of warm entries in syn queue, drop request. It is better than
1285 * clogging syn queue with openreqs with exponentially increasing
1288 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1291 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1295 #ifdef CONFIG_TCP_MD5SIG
1296 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1299 tcp_clear_options(&tmp_opt);
1300 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1301 tmp_opt.user_mss = tp->rx_opt.user_mss;
1302 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1304 if (tmp_opt.cookie_plus > 0 &&
1305 tmp_opt.saw_tstamp &&
1306 !tp->rx_opt.cookie_out_never &&
1307 (sysctl_tcp_cookie_size > 0 ||
1308 (tp->cookie_values != NULL &&
1309 tp->cookie_values->cookie_desired > 0))) {
1311 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1312 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1314 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1315 goto drop_and_release;
1317 /* Secret recipe starts with IP addresses */
1318 *mess++ ^= (__force u32)daddr;
1319 *mess++ ^= (__force u32)saddr;
1321 /* plus variable length Initiator Cookie */
1324 *c++ ^= *hash_location++;
1326 want_cookie = 0; /* not our kind of cookie */
1327 tmp_ext.cookie_out_never = 0; /* false */
1328 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1329 } else if (!tp->rx_opt.cookie_in_always) {
1330 /* redundant indications, but ensure initialization. */
1331 tmp_ext.cookie_out_never = 1; /* true */
1332 tmp_ext.cookie_plus = 0;
1334 goto drop_and_release;
1336 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1338 if (want_cookie && !tmp_opt.saw_tstamp)
1339 tcp_clear_options(&tmp_opt);
1341 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1342 tcp_openreq_init(req, &tmp_opt, skb);
1344 ireq = inet_rsk(req);
1345 ireq->loc_addr = daddr;
1346 ireq->rmt_addr = saddr;
1347 ireq->no_srccheck = inet_sk(sk)->transparent;
1348 ireq->opt = tcp_v4_save_options(sk, skb);
1350 if (security_inet_conn_request(sk, skb, req))
1353 if (!want_cookie || tmp_opt.tstamp_ok)
1354 TCP_ECN_create_request(req, tcp_hdr(skb));
1357 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1358 req->cookie_ts = tmp_opt.tstamp_ok;
1360 struct inet_peer *peer = NULL;
1363 /* VJ's idea. We save last timestamp seen
1364 * from the destination in peer table, when entering
1365 * state TIME-WAIT, and check against it before
1366 * accepting new connection request.
1368 * If "isn" is not zero, this request hit alive
1369 * timewait bucket, so that all the necessary checks
1370 * are made in the function processing timewait state.
1372 if (tmp_opt.saw_tstamp &&
1373 tcp_death_row.sysctl_tw_recycle &&
1374 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1375 fl4.daddr == saddr &&
1376 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1377 inet_peer_refcheck(peer);
1378 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1379 (s32)(peer->tcp_ts - req->ts_recent) >
1381 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1382 goto drop_and_release;
1385 /* Kill the following clause, if you dislike this way. */
1386 else if (!sysctl_tcp_syncookies &&
1387 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1388 (sysctl_max_syn_backlog >> 2)) &&
1389 (!peer || !peer->tcp_ts_stamp) &&
1390 (!dst || !dst_metric(dst, RTAX_RTT))) {
1391 /* Without syncookies last quarter of
1392 * backlog is filled with destinations,
1393 * proven to be alive.
1394 * It means that we continue to communicate
1395 * to destinations, already remembered
1396 * to the moment of synflood.
1398 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1399 &saddr, ntohs(tcp_hdr(skb)->source));
1400 goto drop_and_release;
1403 isn = tcp_v4_init_sequence(skb);
1405 tcp_rsk(req)->snt_isn = isn;
1406 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1408 if (tcp_v4_send_synack(sk, dst, req,
1409 (struct request_values *)&tmp_ext) ||
1413 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1423 EXPORT_SYMBOL(tcp_v4_conn_request);
1427 * The three way handshake has completed - we got a valid synack -
1428 * now create the new socket.
1430 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 struct request_sock *req,
1432 struct dst_entry *dst)
1434 struct inet_request_sock *ireq;
1435 struct inet_sock *newinet;
1436 struct tcp_sock *newtp;
1438 #ifdef CONFIG_TCP_MD5SIG
1439 struct tcp_md5sig_key *key;
1441 struct ip_options_rcu *inet_opt;
1443 if (sk_acceptq_is_full(sk))
1446 newsk = tcp_create_openreq_child(sk, req, skb);
1450 newsk->sk_gso_type = SKB_GSO_TCPV4;
1452 newtp = tcp_sk(newsk);
1453 newinet = inet_sk(newsk);
1454 ireq = inet_rsk(req);
1455 newinet->inet_daddr = ireq->rmt_addr;
1456 newinet->inet_rcv_saddr = ireq->loc_addr;
1457 newinet->inet_saddr = ireq->loc_addr;
1458 inet_opt = ireq->opt;
1459 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1461 newinet->mc_index = inet_iif(skb);
1462 newinet->mc_ttl = ip_hdr(skb)->ttl;
1463 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1465 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1466 newinet->inet_id = newtp->write_seq ^ jiffies;
1469 dst = inet_csk_route_child_sock(sk, newsk, req);
1473 /* syncookie case : see end of cookie_v4_check() */
1475 sk_setup_caps(newsk, dst);
1477 tcp_mtup_init(newsk);
1478 tcp_sync_mss(newsk, dst_mtu(dst));
1479 newtp->advmss = dst_metric_advmss(dst);
1480 if (tcp_sk(sk)->rx_opt.user_mss &&
1481 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1482 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1484 tcp_initialize_rcv_mss(newsk);
1485 if (tcp_rsk(req)->snt_synack)
1486 tcp_valid_rtt_meas(newsk,
1487 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1488 newtp->total_retrans = req->retrans;
1490 #ifdef CONFIG_TCP_MD5SIG
1491 /* Copy over the MD5 key from the original socket */
1492 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1495 * We're using one, so create a matching key
1496 * on the newsk structure. If we fail to get
1497 * memory, then we end up not copying the key
1500 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1502 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1503 newkey, key->keylen);
1504 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1508 if (__inet_inherit_port(sk, newsk) < 0)
1510 __inet_hash_nolisten(newsk, NULL);
1515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1519 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1522 tcp_clear_xmit_timers(newsk);
1523 bh_unlock_sock(newsk);
1527 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1529 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1531 struct tcphdr *th = tcp_hdr(skb);
1532 const struct iphdr *iph = ip_hdr(skb);
1534 struct request_sock **prev;
1535 /* Find possible connection requests. */
1536 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1537 iph->saddr, iph->daddr);
1539 return tcp_check_req(sk, skb, req, prev);
1541 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1542 th->source, iph->daddr, th->dest, inet_iif(skb));
1545 if (nsk->sk_state != TCP_TIME_WAIT) {
1549 inet_twsk_put(inet_twsk(nsk));
1553 #ifdef CONFIG_SYN_COOKIES
1555 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1560 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1562 const struct iphdr *iph = ip_hdr(skb);
1564 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1565 if (!tcp_v4_check(skb->len, iph->saddr,
1566 iph->daddr, skb->csum)) {
1567 skb->ip_summed = CHECKSUM_UNNECESSARY;
1572 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1573 skb->len, IPPROTO_TCP, 0);
1575 if (skb->len <= 76) {
1576 return __skb_checksum_complete(skb);
1582 /* The socket must have it's spinlock held when we get
1585 * We have a potential double-lock case here, so even when
1586 * doing backlog processing we use the BH locking scheme.
1587 * This is because we cannot sleep with the original spinlock
1590 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1593 #ifdef CONFIG_TCP_MD5SIG
1595 * We really want to reject the packet as early as possible
1597 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1598 * o There is an MD5 option and we're not expecting one
1600 if (tcp_v4_inbound_md5_hash(sk, skb))
1604 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1605 sock_rps_save_rxhash(sk, skb);
1606 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1613 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1616 if (sk->sk_state == TCP_LISTEN) {
1617 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1622 sock_rps_save_rxhash(nsk, skb);
1623 if (tcp_child_process(sk, nsk, skb)) {
1630 sock_rps_save_rxhash(sk, skb);
1632 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1639 tcp_v4_send_reset(rsk, skb);
1642 /* Be careful here. If this function gets more complicated and
1643 * gcc suffers from register pressure on the x86, sk (in %ebx)
1644 * might be destroyed here. This current version compiles correctly,
1645 * but you have been warned.
1650 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1653 EXPORT_SYMBOL(tcp_v4_do_rcv);
1659 int tcp_v4_rcv(struct sk_buff *skb)
1661 const struct iphdr *iph;
1662 const struct tcphdr *th;
1665 struct net *net = dev_net(skb->dev);
1667 if (skb->pkt_type != PACKET_HOST)
1670 /* Count it even if it's bad */
1671 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1673 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1678 if (th->doff < sizeof(struct tcphdr) / 4)
1680 if (!pskb_may_pull(skb, th->doff * 4))
1683 /* An explanation is required here, I think.
1684 * Packet length and doff are validated by header prediction,
1685 * provided case of th->doff==0 is eliminated.
1686 * So, we defer the checks. */
1687 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1692 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1693 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1694 skb->len - th->doff * 4);
1695 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1696 TCP_SKB_CB(skb)->when = 0;
1697 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1698 TCP_SKB_CB(skb)->sacked = 0;
1700 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1705 if (sk->sk_state == TCP_TIME_WAIT)
1708 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1709 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1710 goto discard_and_relse;
1713 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1714 goto discard_and_relse;
1717 if (sk_filter(sk, skb))
1718 goto discard_and_relse;
1722 bh_lock_sock_nested(sk);
1724 if (!sock_owned_by_user(sk)) {
1725 #ifdef CONFIG_NET_DMA
1726 struct tcp_sock *tp = tcp_sk(sk);
1727 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1728 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1729 if (tp->ucopy.dma_chan)
1730 ret = tcp_v4_do_rcv(sk, skb);
1734 if (!tcp_prequeue(sk, skb))
1735 ret = tcp_v4_do_rcv(sk, skb);
1737 } else if (unlikely(sk_add_backlog(sk, skb))) {
1739 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1740 goto discard_and_relse;
1749 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1752 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1754 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1756 tcp_v4_send_reset(NULL, skb);
1760 /* Discard frame. */
1769 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1770 inet_twsk_put(inet_twsk(sk));
1774 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1775 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1776 inet_twsk_put(inet_twsk(sk));
1779 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1781 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1783 iph->daddr, th->dest,
1786 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1787 inet_twsk_put(inet_twsk(sk));
1791 /* Fall through to ACK */
1794 tcp_v4_timewait_ack(sk, skb);
1798 case TCP_TW_SUCCESS:;
1803 struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1805 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1806 struct inet_sock *inet = inet_sk(sk);
1807 struct inet_peer *peer;
1810 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1811 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1815 rt_bind_peer(rt, inet->inet_daddr, 1);
1817 *release_it = false;
1822 EXPORT_SYMBOL(tcp_v4_get_peer);
1824 void *tcp_v4_tw_get_peer(struct sock *sk)
1826 const struct inet_timewait_sock *tw = inet_twsk(sk);
1828 return inet_getpeer_v4(tw->tw_daddr, 1);
1830 EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1832 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1833 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1834 .twsk_unique = tcp_twsk_unique,
1835 .twsk_destructor= tcp_twsk_destructor,
1836 .twsk_getpeer = tcp_v4_tw_get_peer,
1839 const struct inet_connection_sock_af_ops ipv4_specific = {
1840 .queue_xmit = ip_queue_xmit,
1841 .send_check = tcp_v4_send_check,
1842 .rebuild_header = inet_sk_rebuild_header,
1843 .conn_request = tcp_v4_conn_request,
1844 .syn_recv_sock = tcp_v4_syn_recv_sock,
1845 .get_peer = tcp_v4_get_peer,
1846 .net_header_len = sizeof(struct iphdr),
1847 .setsockopt = ip_setsockopt,
1848 .getsockopt = ip_getsockopt,
1849 .addr2sockaddr = inet_csk_addr2sockaddr,
1850 .sockaddr_len = sizeof(struct sockaddr_in),
1851 .bind_conflict = inet_csk_bind_conflict,
1852 #ifdef CONFIG_COMPAT
1853 .compat_setsockopt = compat_ip_setsockopt,
1854 .compat_getsockopt = compat_ip_getsockopt,
1857 EXPORT_SYMBOL(ipv4_specific);
1859 #ifdef CONFIG_TCP_MD5SIG
1860 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1861 .md5_lookup = tcp_v4_md5_lookup,
1862 .calc_md5_hash = tcp_v4_md5_hash_skb,
1863 .md5_add = tcp_v4_md5_add_func,
1864 .md5_parse = tcp_v4_parse_md5_keys,
1868 /* NOTE: A lot of things set to zero explicitly by call to
1869 * sk_alloc() so need not be done here.
1871 static int tcp_v4_init_sock(struct sock *sk)
1873 struct inet_connection_sock *icsk = inet_csk(sk);
1874 struct tcp_sock *tp = tcp_sk(sk);
1876 skb_queue_head_init(&tp->out_of_order_queue);
1877 tcp_init_xmit_timers(sk);
1878 tcp_prequeue_init(tp);
1880 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1881 tp->mdev = TCP_TIMEOUT_INIT;
1883 /* So many TCP implementations out there (incorrectly) count the
1884 * initial SYN frame in their delayed-ACK and congestion control
1885 * algorithms that we must have the following bandaid to talk
1886 * efficiently to them. -DaveM
1888 tp->snd_cwnd = TCP_INIT_CWND;
1890 /* See draft-stevens-tcpca-spec-01 for discussion of the
1891 * initialization of these values.
1893 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1894 tp->snd_cwnd_clamp = ~0;
1895 tp->mss_cache = TCP_MSS_DEFAULT;
1897 tp->reordering = sysctl_tcp_reordering;
1898 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1900 sk->sk_state = TCP_CLOSE;
1902 sk->sk_write_space = sk_stream_write_space;
1903 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1905 icsk->icsk_af_ops = &ipv4_specific;
1906 icsk->icsk_sync_mss = tcp_sync_mss;
1907 #ifdef CONFIG_TCP_MD5SIG
1908 tp->af_specific = &tcp_sock_ipv4_specific;
1911 /* TCP Cookie Transactions */
1912 if (sysctl_tcp_cookie_size > 0) {
1913 /* Default, cookies without s_data_payload. */
1915 kzalloc(sizeof(*tp->cookie_values),
1917 if (tp->cookie_values != NULL)
1918 kref_init(&tp->cookie_values->kref);
1920 /* Presumed zeroed, in order of appearance:
1921 * cookie_in_always, cookie_out_never,
1922 * s_data_constant, s_data_in, s_data_out
1924 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1925 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1928 percpu_counter_inc(&tcp_sockets_allocated);
1934 void tcp_v4_destroy_sock(struct sock *sk)
1936 struct tcp_sock *tp = tcp_sk(sk);
1938 tcp_clear_xmit_timers(sk);
1940 tcp_cleanup_congestion_control(sk);
1942 /* Cleanup up the write buffer. */
1943 tcp_write_queue_purge(sk);
1945 /* Cleans up our, hopefully empty, out_of_order_queue. */
1946 __skb_queue_purge(&tp->out_of_order_queue);
1948 #ifdef CONFIG_TCP_MD5SIG
1949 /* Clean up the MD5 key list, if any */
1950 if (tp->md5sig_info) {
1951 tcp_v4_clear_md5_list(sk);
1952 kfree(tp->md5sig_info);
1953 tp->md5sig_info = NULL;
1957 #ifdef CONFIG_NET_DMA
1958 /* Cleans up our sk_async_wait_queue */
1959 __skb_queue_purge(&sk->sk_async_wait_queue);
1962 /* Clean prequeue, it must be empty really */
1963 __skb_queue_purge(&tp->ucopy.prequeue);
1965 /* Clean up a referenced TCP bind bucket. */
1966 if (inet_csk(sk)->icsk_bind_hash)
1970 * If sendmsg cached page exists, toss it.
1972 if (sk->sk_sndmsg_page) {
1973 __free_page(sk->sk_sndmsg_page);
1974 sk->sk_sndmsg_page = NULL;
1977 /* TCP Cookie Transactions */
1978 if (tp->cookie_values != NULL) {
1979 kref_put(&tp->cookie_values->kref,
1980 tcp_cookie_values_release);
1981 tp->cookie_values = NULL;
1984 percpu_counter_dec(&tcp_sockets_allocated);
1986 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1988 #ifdef CONFIG_PROC_FS
1989 /* Proc filesystem TCP sock list dumping. */
1991 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1993 return hlist_nulls_empty(head) ? NULL :
1994 list_entry(head->first, struct inet_timewait_sock, tw_node);
1997 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1999 return !is_a_nulls(tw->tw_node.next) ?
2000 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2004 * Get next listener socket follow cur. If cur is NULL, get first socket
2005 * starting from bucket given in st->bucket; when st->bucket is zero the
2006 * very first socket in the hash table is returned.
2008 static void *listening_get_next(struct seq_file *seq, void *cur)
2010 struct inet_connection_sock *icsk;
2011 struct hlist_nulls_node *node;
2012 struct sock *sk = cur;
2013 struct inet_listen_hashbucket *ilb;
2014 struct tcp_iter_state *st = seq->private;
2015 struct net *net = seq_file_net(seq);
2018 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2019 spin_lock_bh(&ilb->lock);
2020 sk = sk_nulls_head(&ilb->head);
2024 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2028 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2029 struct request_sock *req = cur;
2031 icsk = inet_csk(st->syn_wait_sk);
2035 if (req->rsk_ops->family == st->family) {
2041 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2044 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2046 sk = sk_nulls_next(st->syn_wait_sk);
2047 st->state = TCP_SEQ_STATE_LISTENING;
2048 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2050 icsk = inet_csk(sk);
2051 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2054 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2055 sk = sk_nulls_next(sk);
2058 sk_nulls_for_each_from(sk, node) {
2059 if (!net_eq(sock_net(sk), net))
2061 if (sk->sk_family == st->family) {
2065 icsk = inet_csk(sk);
2066 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2067 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2069 st->uid = sock_i_uid(sk);
2070 st->syn_wait_sk = sk;
2071 st->state = TCP_SEQ_STATE_OPENREQ;
2075 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2077 spin_unlock_bh(&ilb->lock);
2079 if (++st->bucket < INET_LHTABLE_SIZE) {
2080 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2081 spin_lock_bh(&ilb->lock);
2082 sk = sk_nulls_head(&ilb->head);
2090 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2092 struct tcp_iter_state *st = seq->private;
2097 rc = listening_get_next(seq, NULL);
2099 while (rc && *pos) {
2100 rc = listening_get_next(seq, rc);
2106 static inline int empty_bucket(struct tcp_iter_state *st)
2108 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2109 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2113 * Get first established socket starting from bucket given in st->bucket.
2114 * If st->bucket is zero, the very first socket in the hash is returned.
2116 static void *established_get_first(struct seq_file *seq)
2118 struct tcp_iter_state *st = seq->private;
2119 struct net *net = seq_file_net(seq);
2123 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2125 struct hlist_nulls_node *node;
2126 struct inet_timewait_sock *tw;
2127 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2129 /* Lockless fast path for the common case of empty buckets */
2130 if (empty_bucket(st))
2134 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2135 if (sk->sk_family != st->family ||
2136 !net_eq(sock_net(sk), net)) {
2142 st->state = TCP_SEQ_STATE_TIME_WAIT;
2143 inet_twsk_for_each(tw, node,
2144 &tcp_hashinfo.ehash[st->bucket].twchain) {
2145 if (tw->tw_family != st->family ||
2146 !net_eq(twsk_net(tw), net)) {
2152 spin_unlock_bh(lock);
2153 st->state = TCP_SEQ_STATE_ESTABLISHED;
2159 static void *established_get_next(struct seq_file *seq, void *cur)
2161 struct sock *sk = cur;
2162 struct inet_timewait_sock *tw;
2163 struct hlist_nulls_node *node;
2164 struct tcp_iter_state *st = seq->private;
2165 struct net *net = seq_file_net(seq);
2170 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2174 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2181 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2182 st->state = TCP_SEQ_STATE_ESTABLISHED;
2184 /* Look for next non empty bucket */
2186 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2189 if (st->bucket > tcp_hashinfo.ehash_mask)
2192 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2193 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2195 sk = sk_nulls_next(sk);
2197 sk_nulls_for_each_from(sk, node) {
2198 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2202 st->state = TCP_SEQ_STATE_TIME_WAIT;
2203 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2211 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2213 struct tcp_iter_state *st = seq->private;
2217 rc = established_get_first(seq);
2220 rc = established_get_next(seq, rc);
2226 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2229 struct tcp_iter_state *st = seq->private;
2231 st->state = TCP_SEQ_STATE_LISTENING;
2232 rc = listening_get_idx(seq, &pos);
2235 st->state = TCP_SEQ_STATE_ESTABLISHED;
2236 rc = established_get_idx(seq, pos);
2242 static void *tcp_seek_last_pos(struct seq_file *seq)
2244 struct tcp_iter_state *st = seq->private;
2245 int offset = st->offset;
2246 int orig_num = st->num;
2249 switch (st->state) {
2250 case TCP_SEQ_STATE_OPENREQ:
2251 case TCP_SEQ_STATE_LISTENING:
2252 if (st->bucket >= INET_LHTABLE_SIZE)
2254 st->state = TCP_SEQ_STATE_LISTENING;
2255 rc = listening_get_next(seq, NULL);
2256 while (offset-- && rc)
2257 rc = listening_get_next(seq, rc);
2262 case TCP_SEQ_STATE_ESTABLISHED:
2263 case TCP_SEQ_STATE_TIME_WAIT:
2264 st->state = TCP_SEQ_STATE_ESTABLISHED;
2265 if (st->bucket > tcp_hashinfo.ehash_mask)
2267 rc = established_get_first(seq);
2268 while (offset-- && rc)
2269 rc = established_get_next(seq, rc);
2277 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2279 struct tcp_iter_state *st = seq->private;
2282 if (*pos && *pos == st->last_pos) {
2283 rc = tcp_seek_last_pos(seq);
2288 st->state = TCP_SEQ_STATE_LISTENING;
2292 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2295 st->last_pos = *pos;
2299 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2301 struct tcp_iter_state *st = seq->private;
2304 if (v == SEQ_START_TOKEN) {
2305 rc = tcp_get_idx(seq, 0);
2309 switch (st->state) {
2310 case TCP_SEQ_STATE_OPENREQ:
2311 case TCP_SEQ_STATE_LISTENING:
2312 rc = listening_get_next(seq, v);
2314 st->state = TCP_SEQ_STATE_ESTABLISHED;
2317 rc = established_get_first(seq);
2320 case TCP_SEQ_STATE_ESTABLISHED:
2321 case TCP_SEQ_STATE_TIME_WAIT:
2322 rc = established_get_next(seq, v);
2327 st->last_pos = *pos;
2331 static void tcp_seq_stop(struct seq_file *seq, void *v)
2333 struct tcp_iter_state *st = seq->private;
2335 switch (st->state) {
2336 case TCP_SEQ_STATE_OPENREQ:
2338 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2339 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2341 case TCP_SEQ_STATE_LISTENING:
2342 if (v != SEQ_START_TOKEN)
2343 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2345 case TCP_SEQ_STATE_TIME_WAIT:
2346 case TCP_SEQ_STATE_ESTABLISHED:
2348 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2353 int tcp_seq_open(struct inode *inode, struct file *file)
2355 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2356 struct tcp_iter_state *s;
2359 err = seq_open_net(inode, file, &afinfo->seq_ops,
2360 sizeof(struct tcp_iter_state));
2364 s = ((struct seq_file *)file->private_data)->private;
2365 s->family = afinfo->family;
2369 EXPORT_SYMBOL(tcp_seq_open);
2371 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2374 struct proc_dir_entry *p;
2376 afinfo->seq_ops.start = tcp_seq_start;
2377 afinfo->seq_ops.next = tcp_seq_next;
2378 afinfo->seq_ops.stop = tcp_seq_stop;
2380 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2381 afinfo->seq_fops, afinfo);
2386 EXPORT_SYMBOL(tcp_proc_register);
2388 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2390 proc_net_remove(net, afinfo->name);
2392 EXPORT_SYMBOL(tcp_proc_unregister);
2394 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2395 struct seq_file *f, int i, int uid, int *len)
2397 const struct inet_request_sock *ireq = inet_rsk(req);
2398 int ttd = req->expires - jiffies;
2400 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2401 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2404 ntohs(inet_sk(sk)->inet_sport),
2406 ntohs(ireq->rmt_port),
2408 0, 0, /* could print option size, but that is af dependent. */
2409 1, /* timers active (only the expire timer) */
2410 jiffies_to_clock_t(ttd),
2413 0, /* non standard timer */
2414 0, /* open_requests have no inode */
2415 atomic_read(&sk->sk_refcnt),
2420 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2423 unsigned long timer_expires;
2424 const struct tcp_sock *tp = tcp_sk(sk);
2425 const struct inet_connection_sock *icsk = inet_csk(sk);
2426 const struct inet_sock *inet = inet_sk(sk);
2427 __be32 dest = inet->inet_daddr;
2428 __be32 src = inet->inet_rcv_saddr;
2429 __u16 destp = ntohs(inet->inet_dport);
2430 __u16 srcp = ntohs(inet->inet_sport);
2433 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2435 timer_expires = icsk->icsk_timeout;
2436 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2438 timer_expires = icsk->icsk_timeout;
2439 } else if (timer_pending(&sk->sk_timer)) {
2441 timer_expires = sk->sk_timer.expires;
2444 timer_expires = jiffies;
2447 if (sk->sk_state == TCP_LISTEN)
2448 rx_queue = sk->sk_ack_backlog;
2451 * because we dont lock socket, we might find a transient negative value
2453 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2455 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2456 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2457 i, src, srcp, dest, destp, sk->sk_state,
2458 tp->write_seq - tp->snd_una,
2461 jiffies_to_clock_t(timer_expires - jiffies),
2462 icsk->icsk_retransmits,
2464 icsk->icsk_probes_out,
2466 atomic_read(&sk->sk_refcnt), sk,
2467 jiffies_to_clock_t(icsk->icsk_rto),
2468 jiffies_to_clock_t(icsk->icsk_ack.ato),
2469 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2471 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2475 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2476 struct seq_file *f, int i, int *len)
2480 int ttd = tw->tw_ttd - jiffies;
2485 dest = tw->tw_daddr;
2486 src = tw->tw_rcv_saddr;
2487 destp = ntohs(tw->tw_dport);
2488 srcp = ntohs(tw->tw_sport);
2490 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2491 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2492 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2493 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2494 atomic_read(&tw->tw_refcnt), tw, len);
2499 static int tcp4_seq_show(struct seq_file *seq, void *v)
2501 struct tcp_iter_state *st;
2504 if (v == SEQ_START_TOKEN) {
2505 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2506 " sl local_address rem_address st tx_queue "
2507 "rx_queue tr tm->when retrnsmt uid timeout "
2513 switch (st->state) {
2514 case TCP_SEQ_STATE_LISTENING:
2515 case TCP_SEQ_STATE_ESTABLISHED:
2516 get_tcp4_sock(v, seq, st->num, &len);
2518 case TCP_SEQ_STATE_OPENREQ:
2519 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2521 case TCP_SEQ_STATE_TIME_WAIT:
2522 get_timewait4_sock(v, seq, st->num, &len);
2525 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2530 static const struct file_operations tcp_afinfo_seq_fops = {
2531 .owner = THIS_MODULE,
2532 .open = tcp_seq_open,
2534 .llseek = seq_lseek,
2535 .release = seq_release_net
2538 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2541 .seq_fops = &tcp_afinfo_seq_fops,
2543 .show = tcp4_seq_show,
2547 static int __net_init tcp4_proc_init_net(struct net *net)
2549 return tcp_proc_register(net, &tcp4_seq_afinfo);
2552 static void __net_exit tcp4_proc_exit_net(struct net *net)
2554 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2557 static struct pernet_operations tcp4_net_ops = {
2558 .init = tcp4_proc_init_net,
2559 .exit = tcp4_proc_exit_net,
2562 int __init tcp4_proc_init(void)
2564 return register_pernet_subsys(&tcp4_net_ops);
2567 void tcp4_proc_exit(void)
2569 unregister_pernet_subsys(&tcp4_net_ops);
2571 #endif /* CONFIG_PROC_FS */
2573 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2575 const struct iphdr *iph = skb_gro_network_header(skb);
2577 switch (skb->ip_summed) {
2578 case CHECKSUM_COMPLETE:
2579 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2581 skb->ip_summed = CHECKSUM_UNNECESSARY;
2587 NAPI_GRO_CB(skb)->flush = 1;
2591 return tcp_gro_receive(head, skb);
2594 int tcp4_gro_complete(struct sk_buff *skb)
2596 const struct iphdr *iph = ip_hdr(skb);
2597 struct tcphdr *th = tcp_hdr(skb);
2599 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2600 iph->saddr, iph->daddr, 0);
2601 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2603 return tcp_gro_complete(skb);
2606 struct proto tcp_prot = {
2608 .owner = THIS_MODULE,
2610 .connect = tcp_v4_connect,
2611 .disconnect = tcp_disconnect,
2612 .accept = inet_csk_accept,
2614 .init = tcp_v4_init_sock,
2615 .destroy = tcp_v4_destroy_sock,
2616 .shutdown = tcp_shutdown,
2617 .setsockopt = tcp_setsockopt,
2618 .getsockopt = tcp_getsockopt,
2619 .recvmsg = tcp_recvmsg,
2620 .sendmsg = tcp_sendmsg,
2621 .sendpage = tcp_sendpage,
2622 .backlog_rcv = tcp_v4_do_rcv,
2624 .unhash = inet_unhash,
2625 .get_port = inet_csk_get_port,
2626 .enter_memory_pressure = tcp_enter_memory_pressure,
2627 .sockets_allocated = &tcp_sockets_allocated,
2628 .orphan_count = &tcp_orphan_count,
2629 .memory_allocated = &tcp_memory_allocated,
2630 .memory_pressure = &tcp_memory_pressure,
2631 .sysctl_mem = sysctl_tcp_mem,
2632 .sysctl_wmem = sysctl_tcp_wmem,
2633 .sysctl_rmem = sysctl_tcp_rmem,
2634 .max_header = MAX_TCP_HEADER,
2635 .obj_size = sizeof(struct tcp_sock),
2636 .slab_flags = SLAB_DESTROY_BY_RCU,
2637 .twsk_prot = &tcp_timewait_sock_ops,
2638 .rsk_prot = &tcp_request_sock_ops,
2639 .h.hashinfo = &tcp_hashinfo,
2640 .no_autobind = true,
2641 #ifdef CONFIG_COMPAT
2642 .compat_setsockopt = compat_tcp_setsockopt,
2643 .compat_getsockopt = compat_tcp_getsockopt,
2646 EXPORT_SYMBOL(tcp_prot);
2649 static int __net_init tcp_sk_init(struct net *net)
2651 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2652 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2655 static void __net_exit tcp_sk_exit(struct net *net)
2657 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2660 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2662 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2665 static struct pernet_operations __net_initdata tcp_sk_ops = {
2666 .init = tcp_sk_init,
2667 .exit = tcp_sk_exit,
2668 .exit_batch = tcp_sk_exit_batch,
2671 void __init tcp_v4_init(void)
2673 inet_hashinfo_init(&tcp_hashinfo);
2674 if (register_pernet_subsys(&tcp_sk_ops))
2675 panic("Failed to create the TCP control socket.\n");