2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
14 * Network name space (netns) aware.
15 * Global data moved to netns i.e struct netns_ipvs
16 * tcp_timeouts table has copy per netns in a hash table per
17 * protocol ip_vs_proto_data and is handled by netns
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23 #include <linux/kernel.h>
25 #include <linux/tcp.h> /* for tcphdr */
27 #include <net/tcp.h> /* for csum_tcpudp_magic */
28 #include <net/ip6_checksum.h>
29 #include <linux/netfilter.h>
30 #include <linux/netfilter_ipv4.h>
32 #include <net/ip_vs.h>
35 tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
36 struct ip_vs_proto_data *pd,
37 int *verdict, struct ip_vs_conn **cpp,
38 struct ip_vs_iphdr *iph)
40 struct ip_vs_service *svc;
41 struct tcphdr _tcph, *th;
42 __be16 _ports[2], *ports = NULL;
44 /* In the event of icmp, we're only guaranteed to have the first 8
45 * bytes of the transport header, so we only check the rest of the
46 * TCP packet for non-ICMP packets
48 if (likely(!ip_vs_iph_icmp(iph))) {
49 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
51 if (th->rst || !(sysctl_sloppy_tcp(ipvs) || th->syn))
56 ports = skb_header_pointer(
57 skb, iph->len, sizeof(_ports), &_ports);
65 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
68 if (likely(!ip_vs_iph_inverse(iph)))
69 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
70 &iph->daddr, ports[1]);
72 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
73 &iph->saddr, ports[0]);
78 if (ip_vs_todrop(ipvs)) {
80 * It seems that we are very loaded.
81 * We have to drop this packet :(
89 * Let the virtual server select a real server for the
90 * incoming connection, and create a connection entry.
92 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
93 if (!*cpp && ignored <= 0) {
95 *verdict = ip_vs_leave(svc, skb, pd, iph);
109 tcp_fast_csum_update(int af, struct tcphdr *tcph,
110 const union nf_inet_addr *oldip,
111 const union nf_inet_addr *newip,
112 __be16 oldport, __be16 newport)
114 #ifdef CONFIG_IP_VS_IPV6
117 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
118 ip_vs_check_diff2(oldport, newport,
119 ~csum_unfold(tcph->check))));
123 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
124 ip_vs_check_diff2(oldport, newport,
125 ~csum_unfold(tcph->check))));
130 tcp_partial_csum_update(int af, struct tcphdr *tcph,
131 const union nf_inet_addr *oldip,
132 const union nf_inet_addr *newip,
133 __be16 oldlen, __be16 newlen)
135 #ifdef CONFIG_IP_VS_IPV6
138 ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
139 ip_vs_check_diff2(oldlen, newlen,
140 csum_unfold(tcph->check))));
144 ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
145 ip_vs_check_diff2(oldlen, newlen,
146 csum_unfold(tcph->check))));
151 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
152 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
155 unsigned int tcphoff = iph->len;
157 int payload_csum = 0;
159 #ifdef CONFIG_IP_VS_IPV6
160 if (cp->af == AF_INET6 && iph->fragoffs)
163 oldlen = skb->len - tcphoff;
165 /* csum_check requires unshared skb */
166 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
169 if (unlikely(cp->app != NULL)) {
172 /* Some checks before mangling */
173 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
176 /* Call application helper if needed */
177 if (!(ret = ip_vs_app_pkt_out(cp, skb)))
179 /* ret=2: csum update is needed after payload mangling */
181 oldlen = skb->len - tcphoff;
186 tcph = (void *)skb_network_header(skb) + tcphoff;
187 tcph->source = cp->vport;
189 /* Adjust TCP checksums */
190 if (skb->ip_summed == CHECKSUM_PARTIAL) {
191 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
193 htons(skb->len - tcphoff));
194 } else if (!payload_csum) {
195 /* Only port and addr are changed, do fast csum update */
196 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
197 cp->dport, cp->vport);
198 if (skb->ip_summed == CHECKSUM_COMPLETE)
199 skb->ip_summed = (cp->app && pp->csum_check) ?
200 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
202 /* full checksum calculation */
204 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
205 #ifdef CONFIG_IP_VS_IPV6
206 if (cp->af == AF_INET6)
207 tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
210 cp->protocol, skb->csum);
213 tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
218 skb->ip_summed = CHECKSUM_UNNECESSARY;
220 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
221 pp->name, tcph->check,
222 (char*)&(tcph->check) - (char*)tcph);
229 tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
230 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
233 unsigned int tcphoff = iph->len;
235 int payload_csum = 0;
237 #ifdef CONFIG_IP_VS_IPV6
238 if (cp->af == AF_INET6 && iph->fragoffs)
241 oldlen = skb->len - tcphoff;
243 /* csum_check requires unshared skb */
244 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
247 if (unlikely(cp->app != NULL)) {
250 /* Some checks before mangling */
251 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
255 * Attempt ip_vs_app call.
256 * It will fix ip_vs_conn and iph ack_seq stuff
258 if (!(ret = ip_vs_app_pkt_in(cp, skb)))
260 /* ret=2: csum update is needed after payload mangling */
262 oldlen = skb->len - tcphoff;
267 tcph = (void *)skb_network_header(skb) + tcphoff;
268 tcph->dest = cp->dport;
271 * Adjust TCP checksums
273 if (skb->ip_summed == CHECKSUM_PARTIAL) {
274 tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
276 htons(skb->len - tcphoff));
277 } else if (!payload_csum) {
278 /* Only port and addr are changed, do fast csum update */
279 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
280 cp->vport, cp->dport);
281 if (skb->ip_summed == CHECKSUM_COMPLETE)
282 skb->ip_summed = (cp->app && pp->csum_check) ?
283 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
285 /* full checksum calculation */
287 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
288 #ifdef CONFIG_IP_VS_IPV6
289 if (cp->af == AF_INET6)
290 tcph->check = csum_ipv6_magic(&cp->caddr.in6,
293 cp->protocol, skb->csum);
296 tcph->check = csum_tcpudp_magic(cp->caddr.ip,
301 skb->ip_summed = CHECKSUM_UNNECESSARY;
308 tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
310 unsigned int tcphoff;
312 #ifdef CONFIG_IP_VS_IPV6
314 tcphoff = sizeof(struct ipv6hdr);
317 tcphoff = ip_hdrlen(skb);
319 switch (skb->ip_summed) {
321 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
322 case CHECKSUM_COMPLETE:
323 #ifdef CONFIG_IP_VS_IPV6
324 if (af == AF_INET6) {
325 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
326 &ipv6_hdr(skb)->daddr,
328 ipv6_hdr(skb)->nexthdr,
330 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
331 "Failed checksum for");
336 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
339 ip_hdr(skb)->protocol,
341 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
342 "Failed checksum for");
347 /* No need to checksum. */
355 #define TCP_DIR_INPUT 0
356 #define TCP_DIR_OUTPUT 4
357 #define TCP_DIR_INPUT_ONLY 8
359 static const int tcp_state_off[IP_VS_DIR_LAST] = {
360 [IP_VS_DIR_INPUT] = TCP_DIR_INPUT,
361 [IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT,
362 [IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY,
366 * Timeout table[state]
368 static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
369 [IP_VS_TCP_S_NONE] = 2*HZ,
370 [IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
371 [IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
372 [IP_VS_TCP_S_SYN_RECV] = 1*60*HZ,
373 [IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ,
374 [IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ,
375 [IP_VS_TCP_S_CLOSE] = 10*HZ,
376 [IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ,
377 [IP_VS_TCP_S_LAST_ACK] = 30*HZ,
378 [IP_VS_TCP_S_LISTEN] = 2*60*HZ,
379 [IP_VS_TCP_S_SYNACK] = 120*HZ,
380 [IP_VS_TCP_S_LAST] = 2*HZ,
383 static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
384 [IP_VS_TCP_S_NONE] = "NONE",
385 [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED",
386 [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT",
387 [IP_VS_TCP_S_SYN_RECV] = "SYN_RECV",
388 [IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT",
389 [IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT",
390 [IP_VS_TCP_S_CLOSE] = "CLOSE",
391 [IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT",
392 [IP_VS_TCP_S_LAST_ACK] = "LAST_ACK",
393 [IP_VS_TCP_S_LISTEN] = "LISTEN",
394 [IP_VS_TCP_S_SYNACK] = "SYNACK",
395 [IP_VS_TCP_S_LAST] = "BUG!",
398 static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = {
399 [IP_VS_TCP_S_NONE] = false,
400 [IP_VS_TCP_S_ESTABLISHED] = true,
401 [IP_VS_TCP_S_SYN_SENT] = true,
402 [IP_VS_TCP_S_SYN_RECV] = true,
403 [IP_VS_TCP_S_FIN_WAIT] = false,
404 [IP_VS_TCP_S_TIME_WAIT] = false,
405 [IP_VS_TCP_S_CLOSE] = false,
406 [IP_VS_TCP_S_CLOSE_WAIT] = false,
407 [IP_VS_TCP_S_LAST_ACK] = false,
408 [IP_VS_TCP_S_LISTEN] = false,
409 [IP_VS_TCP_S_SYNACK] = true,
412 #define sNO IP_VS_TCP_S_NONE
413 #define sES IP_VS_TCP_S_ESTABLISHED
414 #define sSS IP_VS_TCP_S_SYN_SENT
415 #define sSR IP_VS_TCP_S_SYN_RECV
416 #define sFW IP_VS_TCP_S_FIN_WAIT
417 #define sTW IP_VS_TCP_S_TIME_WAIT
418 #define sCL IP_VS_TCP_S_CLOSE
419 #define sCW IP_VS_TCP_S_CLOSE_WAIT
420 #define sLA IP_VS_TCP_S_LAST_ACK
421 #define sLI IP_VS_TCP_S_LISTEN
422 #define sSA IP_VS_TCP_S_SYNACK
424 struct tcp_states_t {
425 int next_state[IP_VS_TCP_S_LAST];
428 static const char * tcp_state_name(int state)
430 if (state >= IP_VS_TCP_S_LAST)
432 return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
435 static bool tcp_state_active(int state)
437 if (state >= IP_VS_TCP_S_LAST)
439 return tcp_state_active_table[state];
442 static struct tcp_states_t tcp_states [] = {
444 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
445 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
446 /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},
447 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
448 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},
451 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
452 /*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},
453 /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
454 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
455 /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
458 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
459 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
460 /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
461 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
462 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
465 static struct tcp_states_t tcp_states_dos [] = {
467 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
468 /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
469 /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},
470 /*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},
471 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
474 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
475 /*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},
476 /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},
477 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},
478 /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},
481 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
482 /*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},
483 /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},
484 /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},
485 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
488 static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
490 int on = (flags & 1); /* secure_tcp */
493 ** FIXME: change secure_tcp to independent sysctl var
494 ** or make it per-service or per-app because it is valid
495 ** for most if not for all of the applications. Something
496 ** like "capabilities" (flags) for each object.
498 pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
501 static inline int tcp_state_idx(struct tcphdr *th)
515 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
516 int direction, struct tcphdr *th)
519 int new_state = IP_VS_TCP_S_CLOSE;
520 int state_off = tcp_state_off[direction];
523 * Update state offset to INPUT_ONLY if necessary
524 * or delete NO_OUTPUT flag if output packet detected
526 if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {
527 if (state_off == TCP_DIR_OUTPUT)
528 cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;
530 state_off = TCP_DIR_INPUT_ONLY;
533 if ((state_idx = tcp_state_idx(th)) < 0) {
534 IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
539 pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
542 if (new_state != cp->state) {
543 struct ip_vs_dest *dest = cp->dest;
545 IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
546 "%s:%d state: %s->%s conn->refcnt:%d\n",
548 ((state_off == TCP_DIR_OUTPUT) ?
549 "output " : "input "),
554 IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
556 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
558 tcp_state_name(cp->state),
559 tcp_state_name(new_state),
560 atomic_read(&cp->refcnt));
563 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
564 !tcp_state_active(new_state)) {
565 atomic_dec(&dest->activeconns);
566 atomic_inc(&dest->inactconns);
567 cp->flags |= IP_VS_CONN_F_INACTIVE;
568 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
569 tcp_state_active(new_state)) {
570 atomic_inc(&dest->activeconns);
571 atomic_dec(&dest->inactconns);
572 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
578 cp->timeout = pd->timeout_table[cp->state = new_state];
579 else /* What to do ? */
580 cp->timeout = tcp_timeouts[cp->state = new_state];
584 * Handle state transitions
587 tcp_state_transition(struct ip_vs_conn *cp, int direction,
588 const struct sk_buff *skb,
589 struct ip_vs_proto_data *pd)
591 struct tcphdr _tcph, *th;
593 #ifdef CONFIG_IP_VS_IPV6
594 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
596 int ihl = ip_hdrlen(skb);
599 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
603 spin_lock_bh(&cp->lock);
604 set_tcp_state(pd, cp, direction, th);
605 spin_unlock_bh(&cp->lock);
608 static inline __u16 tcp_app_hashkey(__be16 port)
610 return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
615 static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
619 __be16 port = inc->port;
621 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
623 hash = tcp_app_hashkey(port);
625 list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
626 if (i->port == port) {
631 list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]);
632 atomic_inc(&pd->appcnt);
640 tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
642 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
644 atomic_dec(&pd->appcnt);
645 list_del_rcu(&inc->p_list);
650 tcp_app_conn_bind(struct ip_vs_conn *cp)
652 struct netns_ipvs *ipvs = cp->ipvs;
654 struct ip_vs_app *inc;
657 /* Default binding: bind app only for NAT */
658 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
661 /* Lookup application incarnations and bind the right one */
662 hash = tcp_app_hashkey(cp->vport);
665 list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) {
666 if (inc->port == cp->vport) {
667 if (unlikely(!ip_vs_app_inc_get(inc)))
671 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
672 "%s:%u to app %s on port %u\n",
674 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
676 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
678 inc->name, ntohs(inc->port));
682 result = inc->init_conn(inc, cp);
694 * Set LISTEN timeout. (ip_vs_conn_put will setup timer)
696 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
698 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP);
700 spin_lock_bh(&cp->lock);
701 cp->state = IP_VS_TCP_S_LISTEN;
702 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
703 : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
704 spin_unlock_bh(&cp->lock);
707 /* ---------------------------------------------
708 * timeouts is netns related now.
709 * ---------------------------------------------
711 static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
713 ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
714 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
715 sizeof(tcp_timeouts));
716 if (!pd->timeout_table)
718 pd->tcp_state_table = tcp_states;
722 static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
724 kfree(pd->timeout_table);
728 struct ip_vs_protocol ip_vs_protocol_tcp = {
730 .protocol = IPPROTO_TCP,
731 .num_states = IP_VS_TCP_S_LAST,
735 .init_netns = __ip_vs_tcp_init,
736 .exit_netns = __ip_vs_tcp_exit,
737 .register_app = tcp_register_app,
738 .unregister_app = tcp_unregister_app,
739 .conn_schedule = tcp_conn_schedule,
740 .conn_in_get = ip_vs_conn_in_get_proto,
741 .conn_out_get = ip_vs_conn_out_get_proto,
742 .snat_handler = tcp_snat_handler,
743 .dnat_handler = tcp_dnat_handler,
744 .csum_check = tcp_csum_check,
745 .state_name = tcp_state_name,
746 .state_transition = tcp_state_transition,
747 .app_conn_bind = tcp_app_conn_bind,
748 .debug_packet = ip_vs_tcpudp_debug_packet,
749 .timeout_change = tcp_timeout_change,