2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 int sysctl_tcp_tw_reuse;
82 int sysctl_tcp_low_latency;
84 /* Check TCP sequence numbers in ICMP packets. */
85 #define ICMP_MIN_LENGTH 8
87 /* Socket used for sending RSTs */
88 static struct socket *tcp_socket;
90 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
92 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED,
94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
98 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
100 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
101 inet_csk_bind_conflict);
104 static void tcp_v4_hash(struct sock *sk)
106 inet_hash(&tcp_hashinfo, sk);
109 void tcp_unhash(struct sock *sk)
111 inet_unhash(&tcp_hashinfo, sk);
114 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
116 return secure_tcp_sequence_number(skb->nh.iph->daddr,
122 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
124 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
125 struct tcp_sock *tp = tcp_sk(sk);
127 /* With PAWS, it is safe from the viewpoint
128 of data integrity. Even without PAWS it is safe provided sequence
129 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
131 Actually, the idea is close to VJ's one, only timestamp cache is
132 held not per host, but per port pair and TW bucket is used as state
135 If TW bucket has been already destroyed we fall back to VJ's scheme
136 and use initial timestamp retrieved from peer table.
138 if (tcptw->tw_ts_recent_stamp &&
139 (twp == NULL || (sysctl_tcp_tw_reuse &&
140 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
141 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
142 if (tp->write_seq == 0)
144 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
145 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
153 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
155 /* This will initiate an outgoing connection. */
156 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
158 struct inet_sock *inet = inet_sk(sk);
159 struct tcp_sock *tp = tcp_sk(sk);
160 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
166 if (addr_len < sizeof(struct sockaddr_in))
169 if (usin->sin_family != AF_INET)
170 return -EAFNOSUPPORT;
172 nexthop = daddr = usin->sin_addr.s_addr;
173 if (inet->opt && inet->opt->srr) {
176 nexthop = inet->opt->faddr;
179 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
180 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 inet->sport, usin->sin_port, sk);
186 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
191 if (!inet->opt || !inet->opt->srr)
195 inet->saddr = rt->rt_src;
196 inet->rcv_saddr = inet->saddr;
198 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
199 /* Reset inherited state */
200 tp->rx_opt.ts_recent = 0;
201 tp->rx_opt.ts_recent_stamp = 0;
205 if (tcp_death_row.sysctl_tw_recycle &&
206 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
207 struct inet_peer *peer = rt_get_peer(rt);
209 /* VJ's idea. We save last timestamp seen from
210 * the destination in peer table, when entering state TIME-WAIT
211 * and initialize rx_opt.ts_recent from it, when trying new connection.
214 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
215 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
216 tp->rx_opt.ts_recent = peer->tcp_ts;
220 inet->dport = usin->sin_port;
223 tp->ext_header_len = 0;
225 tp->ext_header_len = inet->opt->optlen;
227 tp->rx_opt.mss_clamp = 536;
229 /* Socket identity is still unknown (sport may be zero).
230 * However we set state to SYN-SENT and not releasing socket
231 * lock select source port, enter ourselves into the hash tables and
232 * complete initialization after this.
234 tcp_set_state(sk, TCP_SYN_SENT);
235 err = inet_hash_connect(&tcp_death_row, sk);
239 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
243 /* OK, now commit destination to socket. */
244 sk_setup_caps(sk, &rt->u.dst);
247 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
252 inet->id = tp->write_seq ^ jiffies;
254 err = tcp_connect(sk);
262 /* This unhashes the socket and releases the local port, if necessary. */
263 tcp_set_state(sk, TCP_CLOSE);
265 sk->sk_route_caps = 0;
271 * This routine does path mtu discovery as defined in RFC1191.
273 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
276 struct dst_entry *dst;
277 struct inet_sock *inet = inet_sk(sk);
278 struct tcp_sock *tp = tcp_sk(sk);
280 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
281 * send out by Linux are always <576bytes so they should go through
284 if (sk->sk_state == TCP_LISTEN)
287 /* We don't check in the destentry if pmtu discovery is forbidden
288 * on this route. We just assume that no packet_to_big packets
289 * are send back when pmtu discovery is not active.
290 * There is a small race when the user changes this flag in the
291 * route, but I think that's acceptable.
293 if ((dst = __sk_dst_check(sk, 0)) == NULL)
296 dst->ops->update_pmtu(dst, mtu);
298 /* Something is about to be wrong... Remember soft error
299 * for the case, if this connection will not able to recover.
301 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
302 sk->sk_err_soft = EMSGSIZE;
306 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
307 tp->pmtu_cookie > mtu) {
308 tcp_sync_mss(sk, mtu);
310 /* Resend the TCP packet because it's
311 * clear that the old packet has been
312 * dropped. This is the new "fast" path mtu
315 tcp_simple_retransmit(sk);
316 } /* else let the usual retransmit timer handle it */
320 * This routine is called by the ICMP module when it gets some
321 * sort of error condition. If err < 0 then the socket should
322 * be closed and the error returned to the user. If err > 0
323 * it's just the icmp type << 8 | icmp code. After adjustment
324 * header points to the first 8 bytes of the tcp header. We need
325 * to find the appropriate port.
327 * The locking strategy used here is very "optimistic". When
328 * someone else accesses the socket the ICMP is just dropped
329 * and for some paths there is no check at all.
330 * A more general error queue to queue errors for later handling
331 * is probably better.
335 void tcp_v4_err(struct sk_buff *skb, u32 info)
337 struct iphdr *iph = (struct iphdr *)skb->data;
338 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
340 struct inet_sock *inet;
341 int type = skb->h.icmph->type;
342 int code = skb->h.icmph->code;
347 if (skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
352 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
353 th->source, inet_iif(skb));
355 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put((struct inet_timewait_sock *)sk);
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
374 seq = ntohl(th->seq);
375 if (sk->sk_state != TCP_LISTEN &&
376 !between(seq, tp->snd_una, tp->snd_nxt)) {
377 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
382 case ICMP_SOURCE_QUENCH:
383 /* Just silently ignore these. */
385 case ICMP_PARAMETERPROB:
388 case ICMP_DEST_UNREACH:
389 if (code > NR_ICMP_UNREACH)
392 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
393 if (!sock_owned_by_user(sk))
394 do_pmtu_discovery(sk, iph, info);
398 err = icmp_err_convert[code].errno;
400 case ICMP_TIME_EXCEEDED:
407 switch (sk->sk_state) {
408 struct request_sock *req, **prev;
410 if (sock_owned_by_user(sk))
413 req = inet_csk_search_req(sk, &prev, th->dest,
414 iph->daddr, iph->saddr);
418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here.
423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
429 * Still in SYN_RECV, just remove it silently.
430 * There is no good way to pass the error to the newly
431 * created socket, and POSIX does not want network
432 * errors returned from accept().
434 inet_csk_reqsk_queue_drop(sk, req, prev);
438 case TCP_SYN_RECV: /* Cannot happen.
439 It can f.e. if SYNs crossed.
441 if (!sock_owned_by_user(sk)) {
442 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
445 sk->sk_error_report(sk);
449 sk->sk_err_soft = err;
454 /* If we've already connected we will keep trying
455 * until we time out, or the user gives up.
457 * rfc1122 4.2.3.9 allows to consider as hard errors
458 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
459 * but it is obsoleted by pmtu discovery).
461 * Note, that in modern internet, where routing is unreliable
462 * and in each dark corner broken firewalls sit, sending random
463 * errors ordered by their masters even this two messages finally lose
464 * their original sense (even Linux sends invalid PORT_UNREACHs)
466 * Now we are in compliance with RFCs.
471 if (!sock_owned_by_user(sk) && inet->recverr) {
473 sk->sk_error_report(sk);
474 } else { /* Only an error on timeout */
475 sk->sk_err_soft = err;
483 /* This routine computes an IPv4 TCP checksum. */
484 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
486 struct inet_sock *inet = inet_sk(sk);
487 struct tcphdr *th = skb->h.th;
489 if (skb->ip_summed == CHECKSUM_HW) {
490 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
491 skb->csum = offsetof(struct tcphdr, check);
493 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
494 csum_partial((char *)th,
501 * This routine will send an RST to the other tcp.
503 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
505 * Answer: if a packet caused RST, it is not for a socket
506 * existing in our system, if it is matched to a socket,
507 * it is just duplicate segment or bug in other side's TCP.
508 * So that we build reply only basing on parameters
509 * arrived with segment.
510 * Exception: precedence violation. We do not implement it in any case.
513 static void tcp_v4_send_reset(struct sk_buff *skb)
515 struct tcphdr *th = skb->h.th;
517 struct ip_reply_arg arg;
519 /* Never send a reset in response to a reset. */
523 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
526 /* Swap the send and the receive. */
527 memset(&rth, 0, sizeof(struct tcphdr));
528 rth.dest = th->source;
529 rth.source = th->dest;
530 rth.doff = sizeof(struct tcphdr) / 4;
534 rth.seq = th->ack_seq;
537 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
538 skb->len - (th->doff << 2));
541 memset(&arg, 0, sizeof arg);
542 arg.iov[0].iov_base = (unsigned char *)&rth;
543 arg.iov[0].iov_len = sizeof rth;
544 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
545 skb->nh.iph->saddr, /*XXX*/
546 sizeof(struct tcphdr), IPPROTO_TCP, 0);
547 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
549 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
551 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
552 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
555 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
556 outside socket context is ugly, certainly. What can I do?
559 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
562 struct tcphdr *th = skb->h.th;
567 struct ip_reply_arg arg;
569 memset(&rep.th, 0, sizeof(struct tcphdr));
570 memset(&arg, 0, sizeof arg);
572 arg.iov[0].iov_base = (unsigned char *)&rep;
573 arg.iov[0].iov_len = sizeof(rep.th);
575 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
576 (TCPOPT_TIMESTAMP << 8) |
578 rep.tsopt[1] = htonl(tcp_time_stamp);
579 rep.tsopt[2] = htonl(ts);
580 arg.iov[0].iov_len = sizeof(rep);
583 /* Swap the send and the receive. */
584 rep.th.dest = th->source;
585 rep.th.source = th->dest;
586 rep.th.doff = arg.iov[0].iov_len / 4;
587 rep.th.seq = htonl(seq);
588 rep.th.ack_seq = htonl(ack);
590 rep.th.window = htons(win);
592 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
593 skb->nh.iph->saddr, /*XXX*/
594 arg.iov[0].iov_len, IPPROTO_TCP, 0);
595 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
597 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
599 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
602 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
604 struct inet_timewait_sock *tw = inet_twsk(sk);
605 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
607 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
608 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
613 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
615 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
620 * Send a SYN-ACK after having received an ACK.
621 * This still operates on a request_sock only, not on a big
624 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
625 struct dst_entry *dst)
627 const struct inet_request_sock *ireq = inet_rsk(req);
629 struct sk_buff * skb;
631 /* First, grab a route. */
632 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
635 skb = tcp_make_synack(sk, dst, req);
638 struct tcphdr *th = skb->h.th;
640 th->check = tcp_v4_check(th, skb->len,
643 csum_partial((char *)th, skb->len,
646 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
649 if (err == NET_XMIT_CN)
659 * IPv4 request_sock destructor.
661 static void tcp_v4_reqsk_destructor(struct request_sock *req)
663 kfree(inet_rsk(req)->opt);
666 static inline void syn_flood_warning(struct sk_buff *skb)
668 static unsigned long warntime;
670 if (time_after(jiffies, (warntime + HZ * 60))) {
673 "possible SYN flooding on port %d. Sending cookies.\n",
674 ntohs(skb->h.th->dest));
679 * Save and compile IPv4 options into the request_sock if needed.
681 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
684 struct ip_options *opt = &(IPCB(skb)->opt);
685 struct ip_options *dopt = NULL;
687 if (opt && opt->optlen) {
688 int opt_size = optlength(opt);
689 dopt = kmalloc(opt_size, GFP_ATOMIC);
691 if (ip_options_echo(dopt, skb)) {
700 struct request_sock_ops tcp_request_sock_ops = {
702 .obj_size = sizeof(struct tcp_request_sock),
703 .rtx_syn_ack = tcp_v4_send_synack,
704 .send_ack = tcp_v4_reqsk_send_ack,
705 .destructor = tcp_v4_reqsk_destructor,
706 .send_reset = tcp_v4_send_reset,
709 static struct timewait_sock_ops tcp_timewait_sock_ops = {
710 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
711 .twsk_unique = tcp_twsk_unique,
714 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
716 struct inet_request_sock *ireq;
717 struct tcp_options_received tmp_opt;
718 struct request_sock *req;
719 __u32 saddr = skb->nh.iph->saddr;
720 __u32 daddr = skb->nh.iph->daddr;
721 __u32 isn = TCP_SKB_CB(skb)->when;
722 struct dst_entry *dst = NULL;
723 #ifdef CONFIG_SYN_COOKIES
726 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
729 /* Never answer to SYNs send to broadcast or multicast */
730 if (((struct rtable *)skb->dst)->rt_flags &
731 (RTCF_BROADCAST | RTCF_MULTICAST))
734 /* TW buckets are converted to open requests without
735 * limitations, they conserve resources and peer is
736 * evidently real one.
738 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
739 #ifdef CONFIG_SYN_COOKIES
740 if (sysctl_tcp_syncookies) {
747 /* Accept backlog is full. If we have already queued enough
748 * of warm entries in syn queue, drop request. It is better than
749 * clogging syn queue with openreqs with exponentially increasing
752 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
755 req = reqsk_alloc(&tcp_request_sock_ops);
759 tcp_clear_options(&tmp_opt);
760 tmp_opt.mss_clamp = 536;
761 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
763 tcp_parse_options(skb, &tmp_opt, 0);
766 tcp_clear_options(&tmp_opt);
767 tmp_opt.saw_tstamp = 0;
770 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
771 /* Some OSes (unknown ones, but I see them on web server, which
772 * contains information interesting only for windows'
773 * users) do not send their stamp in SYN. It is easy case.
774 * We simply do not advertise TS support.
776 tmp_opt.saw_tstamp = 0;
777 tmp_opt.tstamp_ok = 0;
779 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
781 tcp_openreq_init(req, &tmp_opt, skb);
783 ireq = inet_rsk(req);
784 ireq->loc_addr = daddr;
785 ireq->rmt_addr = saddr;
786 ireq->opt = tcp_v4_save_options(sk, skb);
788 TCP_ECN_create_request(req, skb->h.th);
791 #ifdef CONFIG_SYN_COOKIES
792 syn_flood_warning(skb);
794 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
796 struct inet_peer *peer = NULL;
798 /* VJ's idea. We save last timestamp seen
799 * from the destination in peer table, when entering
800 * state TIME-WAIT, and check against it before
801 * accepting new connection request.
803 * If "isn" is not zero, this request hit alive
804 * timewait bucket, so that all the necessary checks
805 * are made in the function processing timewait state.
807 if (tmp_opt.saw_tstamp &&
808 tcp_death_row.sysctl_tw_recycle &&
809 (dst = inet_csk_route_req(sk, req)) != NULL &&
810 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
811 peer->v4daddr == saddr) {
812 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
813 (s32)(peer->tcp_ts - req->ts_recent) >
815 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
820 /* Kill the following clause, if you dislike this way. */
821 else if (!sysctl_tcp_syncookies &&
822 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
823 (sysctl_max_syn_backlog >> 2)) &&
824 (!peer || !peer->tcp_ts_stamp) &&
825 (!dst || !dst_metric(dst, RTAX_RTT))) {
826 /* Without syncookies last quarter of
827 * backlog is filled with destinations,
828 * proven to be alive.
829 * It means that we continue to communicate
830 * to destinations, already remembered
831 * to the moment of synflood.
833 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
834 "request from %u.%u.%u.%u/%u\n",
836 ntohs(skb->h.th->source));
841 isn = tcp_v4_init_sequence(sk, skb);
843 tcp_rsk(req)->snt_isn = isn;
845 if (tcp_v4_send_synack(sk, req, dst))
851 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
858 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
864 * The three way handshake has completed - we got a valid synack -
865 * now create the new socket.
867 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
868 struct request_sock *req,
869 struct dst_entry *dst)
871 struct inet_request_sock *ireq;
872 struct inet_sock *newinet;
873 struct tcp_sock *newtp;
876 if (sk_acceptq_is_full(sk))
879 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
882 newsk = tcp_create_openreq_child(sk, req, skb);
886 sk_setup_caps(newsk, dst);
888 newtp = tcp_sk(newsk);
889 newinet = inet_sk(newsk);
890 ireq = inet_rsk(req);
891 newinet->daddr = ireq->rmt_addr;
892 newinet->rcv_saddr = ireq->loc_addr;
893 newinet->saddr = ireq->loc_addr;
894 newinet->opt = ireq->opt;
896 newinet->mc_index = inet_iif(skb);
897 newinet->mc_ttl = skb->nh.iph->ttl;
898 newtp->ext_header_len = 0;
900 newtp->ext_header_len = newinet->opt->optlen;
901 newinet->id = newtp->write_seq ^ jiffies;
903 tcp_sync_mss(newsk, dst_mtu(dst));
904 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
905 tcp_initialize_rcv_mss(newsk);
907 __inet_hash(&tcp_hashinfo, newsk, 0);
908 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
913 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
915 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
920 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
922 struct tcphdr *th = skb->h.th;
923 struct iphdr *iph = skb->nh.iph;
925 struct request_sock **prev;
926 /* Find possible connection requests. */
927 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
928 iph->saddr, iph->daddr);
930 return tcp_check_req(sk, skb, req, prev);
932 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
933 th->source, skb->nh.iph->daddr,
934 ntohs(th->dest), inet_iif(skb));
937 if (nsk->sk_state != TCP_TIME_WAIT) {
941 inet_twsk_put((struct inet_timewait_sock *)nsk);
945 #ifdef CONFIG_SYN_COOKIES
946 if (!th->rst && !th->syn && th->ack)
947 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
952 static int tcp_v4_checksum_init(struct sk_buff *skb)
954 if (skb->ip_summed == CHECKSUM_HW) {
955 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
956 skb->nh.iph->daddr, skb->csum)) {
957 skb->ip_summed = CHECKSUM_UNNECESSARY;
962 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
963 skb->len, IPPROTO_TCP, 0);
965 if (skb->len <= 76) {
966 return __skb_checksum_complete(skb);
972 /* The socket must have it's spinlock held when we get
975 * We have a potential double-lock case here, so even when
976 * doing backlog processing we use the BH locking scheme.
977 * This is because we cannot sleep with the original spinlock
980 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
982 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
984 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
990 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
993 if (sk->sk_state == TCP_LISTEN) {
994 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
999 if (tcp_child_process(sk, nsk, skb))
1005 TCP_CHECK_TIMER(sk);
1006 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1008 TCP_CHECK_TIMER(sk);
1012 tcp_v4_send_reset(skb);
1015 /* Be careful here. If this function gets more complicated and
1016 * gcc suffers from register pressure on the x86, sk (in %ebx)
1017 * might be destroyed here. This current version compiles correctly,
1018 * but you have been warned.
1023 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1031 int tcp_v4_rcv(struct sk_buff *skb)
1037 if (skb->pkt_type != PACKET_HOST)
1040 /* Count it even if it's bad */
1041 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1043 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1048 if (th->doff < sizeof(struct tcphdr) / 4)
1050 if (!pskb_may_pull(skb, th->doff * 4))
1053 /* An explanation is required here, I think.
1054 * Packet length and doff are validated by header prediction,
1055 * provided case of th->doff==0 is eliminated.
1056 * So, we defer the checks. */
1057 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1058 tcp_v4_checksum_init(skb)))
1062 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1063 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1064 skb->len - th->doff * 4);
1065 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1066 TCP_SKB_CB(skb)->when = 0;
1067 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1068 TCP_SKB_CB(skb)->sacked = 0;
1070 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1071 skb->nh.iph->daddr, ntohs(th->dest),
1078 if (sk->sk_state == TCP_TIME_WAIT)
1081 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1082 goto discard_and_relse;
1084 if (sk_filter(sk, skb, 0))
1085 goto discard_and_relse;
1091 if (!sock_owned_by_user(sk)) {
1092 if (!tcp_prequeue(sk, skb))
1093 ret = tcp_v4_do_rcv(sk, skb);
1095 sk_add_backlog(sk, skb);
1103 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1106 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1108 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1110 tcp_v4_send_reset(skb);
1114 /* Discard frame. */
1123 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1124 inet_twsk_put((struct inet_timewait_sock *) sk);
1128 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1129 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1130 inet_twsk_put((struct inet_timewait_sock *) sk);
1133 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1136 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1141 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1143 inet_twsk_put((struct inet_timewait_sock *)sk);
1147 /* Fall through to ACK */
1150 tcp_v4_timewait_ack(sk, skb);
1154 case TCP_TW_SUCCESS:;
1159 /* VJ's idea. Save last timestamp seen from this destination
1160 * and hold it at least for normal timewait interval to use for duplicate
1161 * segment detection in subsequent connections, before they enter synchronized
1165 int tcp_v4_remember_stamp(struct sock *sk)
1167 struct inet_sock *inet = inet_sk(sk);
1168 struct tcp_sock *tp = tcp_sk(sk);
1169 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1170 struct inet_peer *peer = NULL;
1173 if (!rt || rt->rt_dst != inet->daddr) {
1174 peer = inet_getpeer(inet->daddr, 1);
1178 rt_bind_peer(rt, 1);
1183 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1184 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1185 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1186 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1187 peer->tcp_ts = tp->rx_opt.ts_recent;
1197 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1199 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1202 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1204 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1205 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1206 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1207 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1208 peer->tcp_ts = tcptw->tw_ts_recent;
1217 struct inet_connection_sock_af_ops ipv4_specific = {
1218 .queue_xmit = ip_queue_xmit,
1219 .send_check = tcp_v4_send_check,
1220 .rebuild_header = inet_sk_rebuild_header,
1221 .conn_request = tcp_v4_conn_request,
1222 .syn_recv_sock = tcp_v4_syn_recv_sock,
1223 .remember_stamp = tcp_v4_remember_stamp,
1224 .net_header_len = sizeof(struct iphdr),
1225 .setsockopt = ip_setsockopt,
1226 .getsockopt = ip_getsockopt,
1227 .addr2sockaddr = inet_csk_addr2sockaddr,
1228 .sockaddr_len = sizeof(struct sockaddr_in),
1231 /* NOTE: A lot of things set to zero explicitly by call to
1232 * sk_alloc() so need not be done here.
1234 static int tcp_v4_init_sock(struct sock *sk)
1236 struct inet_connection_sock *icsk = inet_csk(sk);
1237 struct tcp_sock *tp = tcp_sk(sk);
1239 skb_queue_head_init(&tp->out_of_order_queue);
1240 tcp_init_xmit_timers(sk);
1241 tcp_prequeue_init(tp);
1243 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1244 tp->mdev = TCP_TIMEOUT_INIT;
1246 /* So many TCP implementations out there (incorrectly) count the
1247 * initial SYN frame in their delayed-ACK and congestion control
1248 * algorithms that we must have the following bandaid to talk
1249 * efficiently to them. -DaveM
1253 /* See draft-stevens-tcpca-spec-01 for discussion of the
1254 * initialization of these values.
1256 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1257 tp->snd_cwnd_clamp = ~0;
1258 tp->mss_cache = 536;
1260 tp->reordering = sysctl_tcp_reordering;
1261 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1263 sk->sk_state = TCP_CLOSE;
1265 sk->sk_write_space = sk_stream_write_space;
1266 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1268 icsk->icsk_af_ops = &ipv4_specific;
1270 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1271 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1273 atomic_inc(&tcp_sockets_allocated);
1278 int tcp_v4_destroy_sock(struct sock *sk)
1280 struct tcp_sock *tp = tcp_sk(sk);
1282 tcp_clear_xmit_timers(sk);
1284 tcp_cleanup_congestion_control(sk);
1286 /* Cleanup up the write buffer. */
1287 sk_stream_writequeue_purge(sk);
1289 /* Cleans up our, hopefully empty, out_of_order_queue. */
1290 __skb_queue_purge(&tp->out_of_order_queue);
1292 /* Clean prequeue, it must be empty really */
1293 __skb_queue_purge(&tp->ucopy.prequeue);
1295 /* Clean up a referenced TCP bind bucket. */
1296 if (inet_csk(sk)->icsk_bind_hash)
1297 inet_put_port(&tcp_hashinfo, sk);
1300 * If sendmsg cached page exists, toss it.
1302 if (sk->sk_sndmsg_page) {
1303 __free_page(sk->sk_sndmsg_page);
1304 sk->sk_sndmsg_page = NULL;
1307 atomic_dec(&tcp_sockets_allocated);
1312 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1314 #ifdef CONFIG_PROC_FS
1315 /* Proc filesystem TCP sock list dumping. */
1317 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1319 return hlist_empty(head) ? NULL :
1320 list_entry(head->first, struct inet_timewait_sock, tw_node);
1323 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1325 return tw->tw_node.next ?
1326 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1329 static void *listening_get_next(struct seq_file *seq, void *cur)
1331 struct inet_connection_sock *icsk;
1332 struct hlist_node *node;
1333 struct sock *sk = cur;
1334 struct tcp_iter_state* st = seq->private;
1338 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1344 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1345 struct request_sock *req = cur;
1347 icsk = inet_csk(st->syn_wait_sk);
1351 if (req->rsk_ops->family == st->family) {
1357 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1360 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1362 sk = sk_next(st->syn_wait_sk);
1363 st->state = TCP_SEQ_STATE_LISTENING;
1364 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1366 icsk = inet_csk(sk);
1367 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1368 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1370 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1374 sk_for_each_from(sk, node) {
1375 if (sk->sk_family == st->family) {
1379 icsk = inet_csk(sk);
1380 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1381 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1383 st->uid = sock_i_uid(sk);
1384 st->syn_wait_sk = sk;
1385 st->state = TCP_SEQ_STATE_OPENREQ;
1389 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1391 if (++st->bucket < INET_LHTABLE_SIZE) {
1392 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1400 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1402 void *rc = listening_get_next(seq, NULL);
1404 while (rc && *pos) {
1405 rc = listening_get_next(seq, rc);
1411 static void *established_get_first(struct seq_file *seq)
1413 struct tcp_iter_state* st = seq->private;
1416 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1418 struct hlist_node *node;
1419 struct inet_timewait_sock *tw;
1421 /* We can reschedule _before_ having picked the target: */
1422 cond_resched_softirq();
1424 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1425 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1426 if (sk->sk_family != st->family) {
1432 st->state = TCP_SEQ_STATE_TIME_WAIT;
1433 inet_twsk_for_each(tw, node,
1434 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1435 if (tw->tw_family != st->family) {
1441 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1442 st->state = TCP_SEQ_STATE_ESTABLISHED;
1448 static void *established_get_next(struct seq_file *seq, void *cur)
1450 struct sock *sk = cur;
1451 struct inet_timewait_sock *tw;
1452 struct hlist_node *node;
1453 struct tcp_iter_state* st = seq->private;
1457 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1461 while (tw && tw->tw_family != st->family) {
1468 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1469 st->state = TCP_SEQ_STATE_ESTABLISHED;
1471 /* We can reschedule between buckets: */
1472 cond_resched_softirq();
1474 if (++st->bucket < tcp_hashinfo.ehash_size) {
1475 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1476 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1484 sk_for_each_from(sk, node) {
1485 if (sk->sk_family == st->family)
1489 st->state = TCP_SEQ_STATE_TIME_WAIT;
1490 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1498 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1500 void *rc = established_get_first(seq);
1503 rc = established_get_next(seq, rc);
1509 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1512 struct tcp_iter_state* st = seq->private;
1514 inet_listen_lock(&tcp_hashinfo);
1515 st->state = TCP_SEQ_STATE_LISTENING;
1516 rc = listening_get_idx(seq, &pos);
1519 inet_listen_unlock(&tcp_hashinfo);
1521 st->state = TCP_SEQ_STATE_ESTABLISHED;
1522 rc = established_get_idx(seq, pos);
1528 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1530 struct tcp_iter_state* st = seq->private;
1531 st->state = TCP_SEQ_STATE_LISTENING;
1533 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1536 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1539 struct tcp_iter_state* st;
1541 if (v == SEQ_START_TOKEN) {
1542 rc = tcp_get_idx(seq, 0);
1547 switch (st->state) {
1548 case TCP_SEQ_STATE_OPENREQ:
1549 case TCP_SEQ_STATE_LISTENING:
1550 rc = listening_get_next(seq, v);
1552 inet_listen_unlock(&tcp_hashinfo);
1554 st->state = TCP_SEQ_STATE_ESTABLISHED;
1555 rc = established_get_first(seq);
1558 case TCP_SEQ_STATE_ESTABLISHED:
1559 case TCP_SEQ_STATE_TIME_WAIT:
1560 rc = established_get_next(seq, v);
1568 static void tcp_seq_stop(struct seq_file *seq, void *v)
1570 struct tcp_iter_state* st = seq->private;
1572 switch (st->state) {
1573 case TCP_SEQ_STATE_OPENREQ:
1575 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1576 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1578 case TCP_SEQ_STATE_LISTENING:
1579 if (v != SEQ_START_TOKEN)
1580 inet_listen_unlock(&tcp_hashinfo);
1582 case TCP_SEQ_STATE_TIME_WAIT:
1583 case TCP_SEQ_STATE_ESTABLISHED:
1585 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1591 static int tcp_seq_open(struct inode *inode, struct file *file)
1593 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1594 struct seq_file *seq;
1595 struct tcp_iter_state *s;
1598 if (unlikely(afinfo == NULL))
1601 s = kmalloc(sizeof(*s), GFP_KERNEL);
1604 memset(s, 0, sizeof(*s));
1605 s->family = afinfo->family;
1606 s->seq_ops.start = tcp_seq_start;
1607 s->seq_ops.next = tcp_seq_next;
1608 s->seq_ops.show = afinfo->seq_show;
1609 s->seq_ops.stop = tcp_seq_stop;
1611 rc = seq_open(file, &s->seq_ops);
1614 seq = file->private_data;
1623 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1626 struct proc_dir_entry *p;
1630 afinfo->seq_fops->owner = afinfo->owner;
1631 afinfo->seq_fops->open = tcp_seq_open;
1632 afinfo->seq_fops->read = seq_read;
1633 afinfo->seq_fops->llseek = seq_lseek;
1634 afinfo->seq_fops->release = seq_release_private;
1636 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1644 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1648 proc_net_remove(afinfo->name);
1649 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1652 static void get_openreq4(struct sock *sk, struct request_sock *req,
1653 char *tmpbuf, int i, int uid)
1655 const struct inet_request_sock *ireq = inet_rsk(req);
1656 int ttd = req->expires - jiffies;
1658 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1659 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1662 ntohs(inet_sk(sk)->sport),
1664 ntohs(ireq->rmt_port),
1666 0, 0, /* could print option size, but that is af dependent. */
1667 1, /* timers active (only the expire timer) */
1668 jiffies_to_clock_t(ttd),
1671 0, /* non standard timer */
1672 0, /* open_requests have no inode */
1673 atomic_read(&sk->sk_refcnt),
1677 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1680 unsigned long timer_expires;
1681 struct tcp_sock *tp = tcp_sk(sp);
1682 const struct inet_connection_sock *icsk = inet_csk(sp);
1683 struct inet_sock *inet = inet_sk(sp);
1684 unsigned int dest = inet->daddr;
1685 unsigned int src = inet->rcv_saddr;
1686 __u16 destp = ntohs(inet->dport);
1687 __u16 srcp = ntohs(inet->sport);
1689 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1691 timer_expires = icsk->icsk_timeout;
1692 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1694 timer_expires = icsk->icsk_timeout;
1695 } else if (timer_pending(&sp->sk_timer)) {
1697 timer_expires = sp->sk_timer.expires;
1700 timer_expires = jiffies;
1703 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1704 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1705 i, src, srcp, dest, destp, sp->sk_state,
1706 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1708 jiffies_to_clock_t(timer_expires - jiffies),
1709 icsk->icsk_retransmits,
1711 icsk->icsk_probes_out,
1713 atomic_read(&sp->sk_refcnt), sp,
1716 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1718 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1721 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1723 unsigned int dest, src;
1725 int ttd = tw->tw_ttd - jiffies;
1730 dest = tw->tw_daddr;
1731 src = tw->tw_rcv_saddr;
1732 destp = ntohs(tw->tw_dport);
1733 srcp = ntohs(tw->tw_sport);
1735 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1736 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1737 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1738 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1739 atomic_read(&tw->tw_refcnt), tw);
1744 static int tcp4_seq_show(struct seq_file *seq, void *v)
1746 struct tcp_iter_state* st;
1747 char tmpbuf[TMPSZ + 1];
1749 if (v == SEQ_START_TOKEN) {
1750 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1751 " sl local_address rem_address st tx_queue "
1752 "rx_queue tr tm->when retrnsmt uid timeout "
1758 switch (st->state) {
1759 case TCP_SEQ_STATE_LISTENING:
1760 case TCP_SEQ_STATE_ESTABLISHED:
1761 get_tcp4_sock(v, tmpbuf, st->num);
1763 case TCP_SEQ_STATE_OPENREQ:
1764 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1766 case TCP_SEQ_STATE_TIME_WAIT:
1767 get_timewait4_sock(v, tmpbuf, st->num);
1770 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1775 static struct file_operations tcp4_seq_fops;
1776 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1777 .owner = THIS_MODULE,
1780 .seq_show = tcp4_seq_show,
1781 .seq_fops = &tcp4_seq_fops,
1784 int __init tcp4_proc_init(void)
1786 return tcp_proc_register(&tcp4_seq_afinfo);
1789 void tcp4_proc_exit(void)
1791 tcp_proc_unregister(&tcp4_seq_afinfo);
1793 #endif /* CONFIG_PROC_FS */
1795 struct proto tcp_prot = {
1797 .owner = THIS_MODULE,
1799 .connect = tcp_v4_connect,
1800 .disconnect = tcp_disconnect,
1801 .accept = inet_csk_accept,
1803 .init = tcp_v4_init_sock,
1804 .destroy = tcp_v4_destroy_sock,
1805 .shutdown = tcp_shutdown,
1806 .setsockopt = tcp_setsockopt,
1807 .getsockopt = tcp_getsockopt,
1808 .sendmsg = tcp_sendmsg,
1809 .recvmsg = tcp_recvmsg,
1810 .backlog_rcv = tcp_v4_do_rcv,
1811 .hash = tcp_v4_hash,
1812 .unhash = tcp_unhash,
1813 .get_port = tcp_v4_get_port,
1814 .enter_memory_pressure = tcp_enter_memory_pressure,
1815 .sockets_allocated = &tcp_sockets_allocated,
1816 .orphan_count = &tcp_orphan_count,
1817 .memory_allocated = &tcp_memory_allocated,
1818 .memory_pressure = &tcp_memory_pressure,
1819 .sysctl_mem = sysctl_tcp_mem,
1820 .sysctl_wmem = sysctl_tcp_wmem,
1821 .sysctl_rmem = sysctl_tcp_rmem,
1822 .max_header = MAX_TCP_HEADER,
1823 .obj_size = sizeof(struct tcp_sock),
1824 .twsk_prot = &tcp_timewait_sock_ops,
1825 .rsk_prot = &tcp_request_sock_ops,
1830 void __init tcp_v4_init(struct net_proto_family *ops)
1832 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
1834 panic("Failed to create the TCP control socket.\n");
1835 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
1836 inet_sk(tcp_socket->sk)->uc_ttl = -1;
1838 /* Unhash it so that IP input processing does not even
1839 * see it, we do not wish this socket to see incoming
1842 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
1845 EXPORT_SYMBOL(ipv4_specific);
1846 EXPORT_SYMBOL(inet_bind_bucket_create);
1847 EXPORT_SYMBOL(tcp_hashinfo);
1848 EXPORT_SYMBOL(tcp_prot);
1849 EXPORT_SYMBOL(tcp_unhash);
1850 EXPORT_SYMBOL(tcp_v4_conn_request);
1851 EXPORT_SYMBOL(tcp_v4_connect);
1852 EXPORT_SYMBOL(tcp_v4_do_rcv);
1853 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1854 EXPORT_SYMBOL(tcp_v4_send_check);
1855 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1857 #ifdef CONFIG_PROC_FS
1858 EXPORT_SYMBOL(tcp_proc_register);
1859 EXPORT_SYMBOL(tcp_proc_unregister);
1861 EXPORT_SYMBOL(sysctl_local_port_range);
1862 EXPORT_SYMBOL(sysctl_tcp_low_latency);
1863 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);