2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #define KMSG_COMPONENT "IPVS"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/tcp.h> /* for tcphdr */
23 #include <net/tcp.h> /* for csum_tcpudp_magic */
25 #include <net/icmp.h> /* for icmp_send */
26 #include <net/route.h> /* for ip_route_output */
28 #include <net/ip6_route.h>
29 #include <linux/icmpv6.h>
30 #include <linux/netfilter.h>
31 #include <linux/netfilter_ipv4.h>
33 #include <net/ip_vs.h>
37 * Destination cache to speed up outgoing route lookup
40 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst)
42 struct dst_entry *old_dst;
44 old_dst = dest->dst_cache;
45 dest->dst_cache = dst;
46 dest->dst_rtos = rtos;
50 static inline struct dst_entry *
51 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
53 struct dst_entry *dst = dest->dst_cache;
58 || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
59 dst->ops->check(dst, cookie) == NULL) {
60 dest->dst_cache = NULL;
68 static struct rtable *
69 __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
71 struct rtable *rt; /* Route to the other host */
72 struct ip_vs_dest *dest = cp->dest;
75 spin_lock(&dest->dst_lock);
76 if (!(rt = (struct rtable *)
77 __ip_vs_dst_check(dest, rtos, 0))) {
82 .daddr = dest->addr.ip,
87 if (ip_route_output_key(&init_net, &rt, &fl)) {
88 spin_unlock(&dest->dst_lock);
89 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
93 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
94 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
96 atomic_read(&rt->u.dst.__refcnt), rtos);
98 spin_unlock(&dest->dst_lock);
104 .daddr = cp->daddr.ip,
109 if (ip_route_output_key(&init_net, &rt, &fl)) {
110 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
119 #ifdef CONFIG_IP_VS_IPV6
120 static struct rt6_info *
121 __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
123 struct rt6_info *rt; /* Route to the other host */
124 struct ip_vs_dest *dest = cp->dest;
127 spin_lock(&dest->dst_lock);
128 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
134 .daddr = dest->addr.in6,
143 rt = (struct rt6_info *)ip6_route_output(&init_net,
146 spin_unlock(&dest->dst_lock);
147 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n",
151 __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
152 IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
154 atomic_read(&rt->u.dst.__refcnt));
156 spin_unlock(&dest->dst_lock);
162 .daddr = cp->daddr.in6,
164 .s6_addr32 = { 0, 0, 0, 0 },
170 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
172 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n",
184 * Release dest->dst_cache before a dest is removed
187 ip_vs_dst_reset(struct ip_vs_dest *dest)
189 struct dst_entry *old_dst;
191 old_dst = dest->dst_cache;
192 dest->dst_cache = NULL;
193 dst_release(old_dst);
196 #define IP_VS_XMIT(pf, skb, rt) \
198 (skb)->ipvs_property = 1; \
199 skb_forward_csum(skb); \
200 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
201 (rt)->u.dst.dev, dst_output); \
206 * NULL transmitter (do nothing except return NF_ACCEPT)
209 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
210 struct ip_vs_protocol *pp)
212 /* we do not touch skb and do not need pskb ptr */
219 * Let packets bypass the destination when the destination is not
220 * available, it may be only used in transparent cache cluster.
223 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
224 struct ip_vs_protocol *pp)
226 struct rtable *rt; /* Route to the other host */
227 struct iphdr *iph = ip_hdr(skb);
236 .tos = RT_TOS(tos), } },
241 if (ip_route_output_key(&init_net, &rt, &fl)) {
242 IP_VS_DBG_RL("%s(): ip_route_output error, dest: %pI4\n",
243 __func__, &iph->daddr);
248 mtu = dst_mtu(&rt->u.dst);
249 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
251 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
252 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
257 * Call ip_send_check because we are not sure it is called
258 * after ip_defrag. Is copy-on-write needed?
260 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
264 ip_send_check(ip_hdr(skb));
268 skb_dst_set(skb, &rt->u.dst);
270 /* Another hack: avoid icmp_send in ip_fragment */
273 IP_VS_XMIT(PF_INET, skb, rt);
279 dst_link_failure(skb);
286 #ifdef CONFIG_IP_VS_IPV6
288 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
289 struct ip_vs_protocol *pp)
291 struct rt6_info *rt; /* Route to the other host */
292 struct ipv6hdr *iph = ipv6_hdr(skb);
299 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
304 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
306 IP_VS_DBG_RL("%s(): ip6_route_output error, dest: %pI6\n",
307 __func__, &iph->daddr);
312 mtu = dst_mtu(&rt->u.dst);
313 if (skb->len > mtu) {
314 dst_release(&rt->u.dst);
315 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
316 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
321 * Call ip_send_check because we are not sure it is called
322 * after ip_defrag. Is copy-on-write needed?
324 skb = skb_share_check(skb, GFP_ATOMIC);
325 if (unlikely(skb == NULL)) {
326 dst_release(&rt->u.dst);
332 skb_dst_set(skb, &rt->u.dst);
334 /* Another hack: avoid icmp_send in ip_fragment */
337 IP_VS_XMIT(PF_INET6, skb, rt);
343 dst_link_failure(skb);
352 * NAT transmitter (only for outside-to-inside nat forwarding)
353 * Not used for related ICMP
356 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
357 struct ip_vs_protocol *pp)
359 struct rtable *rt; /* Route to the other host */
361 struct iphdr *iph = ip_hdr(skb);
365 /* check if it is a connection of no-client-port */
366 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
368 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
371 ip_vs_conn_fill_cport(cp, *p);
372 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
375 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
379 mtu = dst_mtu(&rt->u.dst);
380 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
382 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
383 IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
387 /* copy-on-write the packet before mangling it */
388 if (!skb_make_writable(skb, sizeof(struct iphdr)))
391 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
396 skb_dst_set(skb, &rt->u.dst);
398 /* mangle the packet */
399 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
401 ip_hdr(skb)->daddr = cp->daddr.ip;
402 ip_send_check(ip_hdr(skb));
404 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
406 /* FIXME: when application helper enlarges the packet and the length
407 is larger than the MTU of outgoing device, there will be still
410 /* Another hack: avoid icmp_send in ip_fragment */
413 IP_VS_XMIT(PF_INET, skb, rt);
419 dst_link_failure(skb);
429 #ifdef CONFIG_IP_VS_IPV6
431 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
432 struct ip_vs_protocol *pp)
434 struct rt6_info *rt; /* Route to the other host */
439 /* check if it is a connection of no-client-port */
440 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
442 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
446 ip_vs_conn_fill_cport(cp, *p);
447 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
450 rt = __ip_vs_get_out_rt_v6(cp);
455 mtu = dst_mtu(&rt->u.dst);
456 if (skb->len > mtu) {
457 dst_release(&rt->u.dst);
458 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
459 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
460 "ip_vs_nat_xmit_v6(): frag needed for");
464 /* copy-on-write the packet before mangling it */
465 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
468 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
473 skb_dst_set(skb, &rt->u.dst);
475 /* mangle the packet */
476 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
478 ipv6_hdr(skb)->daddr = cp->daddr.in6;
480 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
482 /* FIXME: when application helper enlarges the packet and the length
483 is larger than the MTU of outgoing device, there will be still
486 /* Another hack: avoid icmp_send in ip_fragment */
489 IP_VS_XMIT(PF_INET6, skb, rt);
495 dst_link_failure(skb);
501 dst_release(&rt->u.dst);
508 * IP Tunneling transmitter
510 * This function encapsulates the packet in a new IP packet, its
511 * destination will be set to cp->daddr. Most code of this function
512 * is taken from ipip.c.
514 * It is used in VS/TUN cluster. The load balancer selects a real
515 * server from a cluster based on a scheduling algorithm,
516 * encapsulates the request packet and forwards it to the selected
517 * server. For example, all real servers are configured with
518 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
519 * the encapsulated packet, it will decapsulate the packet, processe
520 * the request and return the response packets directly to the client
521 * without passing the load balancer. This can greatly increase the
522 * scalability of virtual server.
524 * Used for ANY protocol
527 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
528 struct ip_vs_protocol *pp)
530 struct rtable *rt; /* Route to the other host */
531 struct net_device *tdev; /* Device to other host */
532 struct iphdr *old_iph = ip_hdr(skb);
533 u8 tos = old_iph->tos;
534 __be16 df = old_iph->frag_off;
535 sk_buff_data_t old_transport_header = skb->transport_header;
536 struct iphdr *iph; /* Our new IP header */
537 unsigned int max_headroom; /* The extra header space needed */
542 if (skb->protocol != htons(ETH_P_IP)) {
543 IP_VS_DBG_RL("%s(): protocol error, "
544 "ETH_P_IP: %d, skb protocol: %d\n",
545 __func__, htons(ETH_P_IP), skb->protocol);
549 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
552 tdev = rt->u.dst.dev;
554 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
557 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
561 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
563 df |= (old_iph->frag_off & htons(IP_DF));
565 if ((old_iph->frag_off & htons(IP_DF))
566 && mtu < ntohs(old_iph->tot_len)) {
567 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
569 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
574 * Okay, now see if we can stuff it in the buffer as-is.
576 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
578 if (skb_headroom(skb) < max_headroom
579 || skb_cloned(skb) || skb_shared(skb)) {
580 struct sk_buff *new_skb =
581 skb_realloc_headroom(skb, max_headroom);
585 IP_VS_ERR_RL("%s(): no memory\n", __func__);
590 old_iph = ip_hdr(skb);
593 skb->transport_header = old_transport_header;
595 /* fix old IP header checksum */
596 ip_send_check(old_iph);
598 skb_push(skb, sizeof(struct iphdr));
599 skb_reset_network_header(skb);
600 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
604 skb_dst_set(skb, &rt->u.dst);
607 * Push down and install the IPIP header.
611 iph->ihl = sizeof(struct iphdr)>>2;
613 iph->protocol = IPPROTO_IPIP;
615 iph->daddr = rt->rt_dst;
616 iph->saddr = rt->rt_src;
617 iph->ttl = old_iph->ttl;
618 ip_select_ident(iph, &rt->u.dst, NULL);
620 /* Another hack: avoid icmp_send in ip_fragment */
630 dst_link_failure(skb);
637 #ifdef CONFIG_IP_VS_IPV6
639 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
640 struct ip_vs_protocol *pp)
642 struct rt6_info *rt; /* Route to the other host */
643 struct net_device *tdev; /* Device to other host */
644 struct ipv6hdr *old_iph = ipv6_hdr(skb);
645 sk_buff_data_t old_transport_header = skb->transport_header;
646 struct ipv6hdr *iph; /* Our new IP header */
647 unsigned int max_headroom; /* The extra header space needed */
652 if (skb->protocol != htons(ETH_P_IPV6)) {
653 IP_VS_DBG_RL("%s(): protocol error, "
654 "ETH_P_IPV6: %d, skb protocol: %d\n",
655 __func__, htons(ETH_P_IPV6), skb->protocol);
659 rt = __ip_vs_get_out_rt_v6(cp);
663 tdev = rt->u.dst.dev;
665 mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
666 /* TODO IPv6: do we need this check in IPv6? */
668 dst_release(&rt->u.dst);
669 IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
673 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
675 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
676 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
677 dst_release(&rt->u.dst);
678 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
683 * Okay, now see if we can stuff it in the buffer as-is.
685 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
687 if (skb_headroom(skb) < max_headroom
688 || skb_cloned(skb) || skb_shared(skb)) {
689 struct sk_buff *new_skb =
690 skb_realloc_headroom(skb, max_headroom);
692 dst_release(&rt->u.dst);
694 IP_VS_ERR_RL("%s(): no memory\n", __func__);
699 old_iph = ipv6_hdr(skb);
702 skb->transport_header = old_transport_header;
704 skb_push(skb, sizeof(struct ipv6hdr));
705 skb_reset_network_header(skb);
706 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
710 skb_dst_set(skb, &rt->u.dst);
713 * Push down and install the IPIP header.
717 iph->nexthdr = IPPROTO_IPV6;
718 iph->payload_len = old_iph->payload_len;
719 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
720 iph->priority = old_iph->priority;
721 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
722 iph->daddr = rt->rt6i_dst.addr;
723 iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */
724 iph->hop_limit = old_iph->hop_limit;
726 /* Another hack: avoid icmp_send in ip_fragment */
736 dst_link_failure(skb);
746 * Direct Routing transmitter
747 * Used for ANY protocol
750 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
751 struct ip_vs_protocol *pp)
753 struct rtable *rt; /* Route to the other host */
754 struct iphdr *iph = ip_hdr(skb);
759 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
763 mtu = dst_mtu(&rt->u.dst);
764 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
765 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
767 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
772 * Call ip_send_check because we are not sure it is called
773 * after ip_defrag. Is copy-on-write needed?
775 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
779 ip_send_check(ip_hdr(skb));
783 skb_dst_set(skb, &rt->u.dst);
785 /* Another hack: avoid icmp_send in ip_fragment */
788 IP_VS_XMIT(PF_INET, skb, rt);
794 dst_link_failure(skb);
801 #ifdef CONFIG_IP_VS_IPV6
803 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
804 struct ip_vs_protocol *pp)
806 struct rt6_info *rt; /* Route to the other host */
811 rt = __ip_vs_get_out_rt_v6(cp);
816 mtu = dst_mtu(&rt->u.dst);
817 if (skb->len > mtu) {
818 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
819 dst_release(&rt->u.dst);
820 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
825 * Call ip_send_check because we are not sure it is called
826 * after ip_defrag. Is copy-on-write needed?
828 skb = skb_share_check(skb, GFP_ATOMIC);
829 if (unlikely(skb == NULL)) {
830 dst_release(&rt->u.dst);
836 skb_dst_set(skb, &rt->u.dst);
838 /* Another hack: avoid icmp_send in ip_fragment */
841 IP_VS_XMIT(PF_INET6, skb, rt);
847 dst_link_failure(skb);
857 * ICMP packet transmitter
858 * called by the ip_vs_in_icmp
861 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
862 struct ip_vs_protocol *pp, int offset)
864 struct rtable *rt; /* Route to the other host */
870 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
871 forwarded directly here, because there is no need to
872 translate address/port back */
873 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
875 rc = cp->packet_xmit(skb, cp, pp);
878 /* do not touch skb anymore */
879 atomic_inc(&cp->in_pkts);
884 * mangle and send the packet here (only for VS/NAT)
887 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
891 mtu = dst_mtu(&rt->u.dst);
892 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
894 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
895 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
899 /* copy-on-write the packet before mangling it */
900 if (!skb_make_writable(skb, offset))
903 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
906 /* drop the old route when skb is not shared */
908 skb_dst_set(skb, &rt->u.dst);
910 ip_vs_nat_icmp(skb, pp, cp, 0);
912 /* Another hack: avoid icmp_send in ip_fragment */
915 IP_VS_XMIT(PF_INET, skb, rt);
921 dst_link_failure(skb);
933 #ifdef CONFIG_IP_VS_IPV6
935 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
936 struct ip_vs_protocol *pp, int offset)
938 struct rt6_info *rt; /* Route to the other host */
944 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
945 forwarded directly here, because there is no need to
946 translate address/port back */
947 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
949 rc = cp->packet_xmit(skb, cp, pp);
952 /* do not touch skb anymore */
953 atomic_inc(&cp->in_pkts);
958 * mangle and send the packet here (only for VS/NAT)
961 rt = __ip_vs_get_out_rt_v6(cp);
966 mtu = dst_mtu(&rt->u.dst);
967 if (skb->len > mtu) {
968 dst_release(&rt->u.dst);
969 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
970 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
974 /* copy-on-write the packet before mangling it */
975 if (!skb_make_writable(skb, offset))
978 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
981 /* drop the old route when skb is not shared */
983 skb_dst_set(skb, &rt->u.dst);
985 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
987 /* Another hack: avoid icmp_send in ip_fragment */
990 IP_VS_XMIT(PF_INET6, skb, rt);
996 dst_link_failure(skb);
1004 dst_release(&rt->u.dst);