2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
59 #include <net/ip6_checksum.h>
61 #include <net/protocol.h>
63 #include <net/rawv6.h>
64 #include <net/transp_v6.h>
65 #include <net/ip6_route.h>
66 #include <net/addrconf.h>
69 #include <net/inet_common.h>
70 #include <net/dsfield.h>
72 #include <asm/uaccess.h>
75 * The ICMP socket(s). This is the most convenient way to flow control
76 * our ICMP output as well as maintain a clean interface throughout
77 * all layers. All Socketless IP sends will soon be gone.
79 * On SMP we have one ICMP socket per-cpu.
81 static inline struct sock *icmpv6_sk(struct net *net)
83 return net->ipv6.icmp_sk[smp_processor_id()];
86 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
87 u8 type, u8 code, int offset, __be32 info)
89 /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
90 struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
91 struct net *net = dev_net(skb->dev);
93 if (type == ICMPV6_PKT_TOOBIG)
94 ip6_update_pmtu(skb, net, info, 0, 0);
95 else if (type == NDISC_REDIRECT)
96 ip6_redirect(skb, net, skb->dev->ifindex, 0);
98 if (!(type & ICMPV6_INFOMSG_MASK))
99 if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
100 ping_err(skb, offset, info);
103 static int icmpv6_rcv(struct sk_buff *skb);
105 static const struct inet6_protocol icmpv6_protocol = {
106 .handler = icmpv6_rcv,
107 .err_handler = icmpv6_err,
108 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
111 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
118 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
119 /* This can happen if the output path (f.e. SIT or
120 * ip6ip6 tunnel) signals dst_link_failure() for an
121 * outgoing ICMP6 packet.
129 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
131 spin_unlock_bh(&sk->sk_lock.slock);
135 * Figure out, may we reply to this packet with icmp error.
137 * We do not reply, if:
138 * - it was icmp error message.
139 * - it is truncated, so that it is known, that protocol is ICMPV6
140 * (i.e. in the middle of some exthdr)
145 static bool is_ineligible(const struct sk_buff *skb)
147 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
148 int len = skb->len - ptr;
149 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
155 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
158 if (nexthdr == IPPROTO_ICMPV6) {
160 tp = skb_header_pointer(skb,
161 ptr+offsetof(struct icmp6hdr, icmp6_type),
162 sizeof(_type), &_type);
164 !(*tp & ICMPV6_INFOMSG_MASK))
171 * Check the ICMP output rate limit
173 static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
176 struct dst_entry *dst;
177 struct net *net = sock_net(sk);
180 /* Informational messages are not limited. */
181 if (type & ICMPV6_INFOMSG_MASK)
184 /* Do not limit pmtu discovery, it would break it. */
185 if (type == ICMPV6_PKT_TOOBIG)
189 * Look up the output route.
190 * XXX: perhaps the expire for routing entries cloned by
191 * this lookup should be more aggressive (not longer than timeout).
193 dst = ip6_route_output(net, sk, fl6);
195 IP6_INC_STATS(net, ip6_dst_idev(dst),
196 IPSTATS_MIB_OUTNOROUTES);
197 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
200 struct rt6_info *rt = (struct rt6_info *)dst;
201 int tmo = net->ipv6.sysctl.icmpv6_time;
202 struct inet_peer *peer;
204 /* Give more bandwidth to wider prefixes. */
205 if (rt->rt6i_dst.plen < 128)
206 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
208 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
209 res = inet_peer_xrlim_allow(peer, tmo);
218 * an inline helper for the "simple" if statement below
219 * checks if parameter problem report is caused by an
220 * unrecognized IPv6 option that has the Option Type
221 * highest-order two bits set to 10
224 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
228 offset += skb_network_offset(skb);
229 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
232 return (*op & 0xC0) == 0x80;
235 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
236 struct icmp6hdr *thdr, int len)
239 struct icmp6hdr *icmp6h;
242 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
245 icmp6h = icmp6_hdr(skb);
246 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
247 icmp6h->icmp6_cksum = 0;
249 if (skb_queue_len(&sk->sk_write_queue) == 1) {
250 skb->csum = csum_partial(icmp6h,
251 sizeof(struct icmp6hdr), skb->csum);
252 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
254 len, fl6->flowi6_proto,
259 skb_queue_walk(&sk->sk_write_queue, skb) {
260 tmp_csum = csum_add(tmp_csum, skb->csum);
263 tmp_csum = csum_partial(icmp6h,
264 sizeof(struct icmp6hdr), tmp_csum);
265 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
267 len, fl6->flowi6_proto,
270 ip6_push_pending_frames(sk);
281 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
283 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
284 struct sk_buff *org_skb = msg->skb;
287 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
289 skb->csum = csum_block_add(skb->csum, csum, odd);
290 if (!(msg->type & ICMPV6_INFOMSG_MASK))
291 nf_ct_attach(skb, org_skb);
295 #if IS_ENABLED(CONFIG_IPV6_MIP6)
296 static void mip6_addr_swap(struct sk_buff *skb)
298 struct ipv6hdr *iph = ipv6_hdr(skb);
299 struct inet6_skb_parm *opt = IP6CB(skb);
300 struct ipv6_destopt_hao *hao;
305 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
306 if (likely(off >= 0)) {
307 hao = (struct ipv6_destopt_hao *)
308 (skb_network_header(skb) + off);
310 iph->saddr = hao->addr;
316 static inline void mip6_addr_swap(struct sk_buff *skb) {}
319 static struct dst_entry *icmpv6_route_lookup(struct net *net,
324 struct dst_entry *dst, *dst2;
328 err = ip6_dst_lookup(sk, &dst, fl6);
333 * We won't send icmp if the destination is known
336 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
337 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n");
339 return ERR_PTR(-EINVAL);
342 /* No need to clone since we're just using its address. */
345 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
350 if (PTR_ERR(dst) == -EPERM)
356 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
358 goto relookup_failed;
360 err = ip6_dst_lookup(sk, &dst2, &fl2);
362 goto relookup_failed;
364 dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
374 goto relookup_failed;
384 * Send an ICMP message in response to a packet in error
386 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
388 struct net *net = dev_net(skb->dev);
389 struct inet6_dev *idev = NULL;
390 struct ipv6hdr *hdr = ipv6_hdr(skb);
392 struct ipv6_pinfo *np;
393 const struct in6_addr *saddr = NULL;
394 struct dst_entry *dst;
395 struct icmp6hdr tmp_hdr;
397 struct icmpv6_msg msg;
404 if ((u8 *)hdr < skb->head ||
405 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
409 * Make sure we respect the rules
410 * i.e. RFC 1885 2.4(e)
411 * Rule (e.1) is enforced by not using icmp6_send
412 * in any code that processes icmp errors.
414 addr_type = ipv6_addr_type(&hdr->daddr);
416 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
423 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
424 if (type != ICMPV6_PKT_TOOBIG &&
425 !(type == ICMPV6_PARAMPROB &&
426 code == ICMPV6_UNK_OPTION &&
427 (opt_unrec(skb, info))))
433 addr_type = ipv6_addr_type(&hdr->saddr);
439 if (__ipv6_addr_needs_scope_id(addr_type))
440 iif = skb->dev->ifindex;
443 * Must not send error if the source does not uniquely
444 * identify a single node (RFC2463 Section 2.4).
445 * We check unspecified / multicast addresses here,
446 * and anycast addresses will be checked later.
448 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
449 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n");
454 * Never answer to a ICMP packet.
456 if (is_ineligible(skb)) {
457 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n");
463 memset(&fl6, 0, sizeof(fl6));
464 fl6.flowi6_proto = IPPROTO_ICMPV6;
465 fl6.daddr = hdr->saddr;
468 fl6.flowi6_oif = iif;
469 fl6.fl6_icmp_type = type;
470 fl6.fl6_icmp_code = code;
471 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
473 sk = icmpv6_xmit_lock(net);
478 if (!icmpv6_xrlim_allow(sk, type, &fl6))
481 tmp_hdr.icmp6_type = type;
482 tmp_hdr.icmp6_code = code;
483 tmp_hdr.icmp6_cksum = 0;
484 tmp_hdr.icmp6_pointer = htonl(info);
486 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
487 fl6.flowi6_oif = np->mcast_oif;
488 else if (!fl6.flowi6_oif)
489 fl6.flowi6_oif = np->ucast_oif;
491 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
495 if (ipv6_addr_is_multicast(&fl6.daddr))
496 hlimit = np->mcast_hops;
498 hlimit = np->hop_limit;
500 hlimit = ip6_dst_hoplimit(dst);
503 msg.offset = skb_network_offset(skb);
506 len = skb->len - msg.offset;
507 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
509 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
510 goto out_dst_release;
514 idev = __in6_dev_get(skb->dev);
516 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
517 len + sizeof(struct icmp6hdr),
518 sizeof(struct icmp6hdr), hlimit,
519 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
520 MSG_DONTWAIT, np->dontfrag);
522 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
523 ip6_flush_pending_frames(sk);
525 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
526 len + sizeof(struct icmp6hdr));
532 icmpv6_xmit_unlock(sk);
535 /* Slightly more convenient version of icmp6_send.
537 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
539 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos);
543 static void icmpv6_echo_reply(struct sk_buff *skb)
545 struct net *net = dev_net(skb->dev);
547 struct inet6_dev *idev;
548 struct ipv6_pinfo *np;
549 const struct in6_addr *saddr = NULL;
550 struct icmp6hdr *icmph = icmp6_hdr(skb);
551 struct icmp6hdr tmp_hdr;
553 struct icmpv6_msg msg;
554 struct dst_entry *dst;
559 saddr = &ipv6_hdr(skb)->daddr;
561 if (!ipv6_unicast_destination(skb) &&
562 !(net->ipv6.sysctl.anycast_src_echo_reply &&
563 ipv6_anycast_destination(skb)))
566 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
567 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
569 memset(&fl6, 0, sizeof(fl6));
570 fl6.flowi6_proto = IPPROTO_ICMPV6;
571 fl6.daddr = ipv6_hdr(skb)->saddr;
574 fl6.flowi6_oif = skb->dev->ifindex;
575 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
576 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
578 sk = icmpv6_xmit_lock(net);
583 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
584 fl6.flowi6_oif = np->mcast_oif;
585 else if (!fl6.flowi6_oif)
586 fl6.flowi6_oif = np->ucast_oif;
588 err = ip6_dst_lookup(sk, &dst, &fl6);
591 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
595 if (ipv6_addr_is_multicast(&fl6.daddr))
596 hlimit = np->mcast_hops;
598 hlimit = np->hop_limit;
600 hlimit = ip6_dst_hoplimit(dst);
602 idev = __in6_dev_get(skb->dev);
606 msg.type = ICMPV6_ECHO_REPLY;
608 tclass = ipv6_get_dsfield(ipv6_hdr(skb));
609 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
610 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6,
611 (struct rt6_info *)dst, MSG_DONTWAIT,
615 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
616 ip6_flush_pending_frames(sk);
618 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
619 skb->len + sizeof(struct icmp6hdr));
623 icmpv6_xmit_unlock(sk);
626 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
628 const struct inet6_protocol *ipprot;
633 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
636 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
637 if (ipv6_ext_hdr(nexthdr)) {
638 /* now skip over extension headers */
639 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
640 &nexthdr, &frag_off);
644 inner_offset = sizeof(struct ipv6hdr);
647 /* Checkin header including 8 bytes of inner protocol header. */
648 if (!pskb_may_pull(skb, inner_offset+8))
651 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
652 Without this we will not able f.e. to make source routed
654 Corresponding argument (opt) to notifiers is already added.
659 ipprot = rcu_dereference(inet6_protos[nexthdr]);
660 if (ipprot && ipprot->err_handler)
661 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
664 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
668 * Handle icmp messages
671 static int icmpv6_rcv(struct sk_buff *skb)
673 struct net_device *dev = skb->dev;
674 struct inet6_dev *idev = __in6_dev_get(dev);
675 const struct in6_addr *saddr, *daddr;
676 struct icmp6hdr *hdr;
679 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
680 struct sec_path *sp = skb_sec_path(skb);
683 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
687 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
690 nh = skb_network_offset(skb);
691 skb_set_network_header(skb, sizeof(*hdr));
693 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
696 skb_set_network_header(skb, nh);
699 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
701 saddr = &ipv6_hdr(skb)->saddr;
702 daddr = &ipv6_hdr(skb)->daddr;
704 /* Perform checksum. */
705 switch (skb->ip_summed) {
706 case CHECKSUM_COMPLETE:
707 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
712 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
714 if (__skb_checksum_complete(skb)) {
715 LIMIT_NETDEBUG(KERN_DEBUG
716 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
722 if (!pskb_pull(skb, sizeof(*hdr)))
725 hdr = icmp6_hdr(skb);
727 type = hdr->icmp6_type;
729 ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
732 case ICMPV6_ECHO_REQUEST:
733 icmpv6_echo_reply(skb);
736 case ICMPV6_ECHO_REPLY:
740 case ICMPV6_PKT_TOOBIG:
741 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
742 standard destination cache. Seems, only "advanced"
743 destination cache will allow to solve this problem
746 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
748 hdr = icmp6_hdr(skb);
751 * Drop through to notify
754 case ICMPV6_DEST_UNREACH:
755 case ICMPV6_TIME_EXCEED:
756 case ICMPV6_PARAMPROB:
757 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
760 case NDISC_ROUTER_SOLICITATION:
761 case NDISC_ROUTER_ADVERTISEMENT:
762 case NDISC_NEIGHBOUR_SOLICITATION:
763 case NDISC_NEIGHBOUR_ADVERTISEMENT:
768 case ICMPV6_MGM_QUERY:
769 igmp6_event_query(skb);
772 case ICMPV6_MGM_REPORT:
773 igmp6_event_report(skb);
776 case ICMPV6_MGM_REDUCTION:
777 case ICMPV6_NI_QUERY:
778 case ICMPV6_NI_REPLY:
779 case ICMPV6_MLD2_REPORT:
780 case ICMPV6_DHAAD_REQUEST:
781 case ICMPV6_DHAAD_REPLY:
782 case ICMPV6_MOBILE_PREFIX_SOL:
783 case ICMPV6_MOBILE_PREFIX_ADV:
787 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
790 if (type & ICMPV6_INFOMSG_MASK)
794 * error of unknown type.
795 * must pass to upper level
798 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
805 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
807 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
813 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
815 const struct in6_addr *saddr,
816 const struct in6_addr *daddr,
819 memset(fl6, 0, sizeof(*fl6));
822 fl6->flowi6_proto = IPPROTO_ICMPV6;
823 fl6->fl6_icmp_type = type;
824 fl6->fl6_icmp_code = 0;
825 fl6->flowi6_oif = oif;
826 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
830 * Special lock-class for __icmpv6_sk:
832 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
834 static int __net_init icmpv6_sk_init(struct net *net)
840 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
841 if (net->ipv6.icmp_sk == NULL)
844 for_each_possible_cpu(i) {
845 err = inet_ctl_sock_create(&sk, PF_INET6,
846 SOCK_RAW, IPPROTO_ICMPV6, net);
848 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
853 net->ipv6.icmp_sk[i] = sk;
856 * Split off their lock-class, because sk->sk_dst_lock
857 * gets used from softirqs, which is safe for
858 * __icmpv6_sk (because those never get directly used
859 * via userspace syscalls), but unsafe for normal sockets.
861 lockdep_set_class(&sk->sk_dst_lock,
862 &icmpv6_socket_sk_dst_lock_key);
864 /* Enough space for 2 64K ICMP packets, including
865 * sk_buff struct overhead.
867 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
872 for (j = 0; j < i; j++)
873 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
874 kfree(net->ipv6.icmp_sk);
878 static void __net_exit icmpv6_sk_exit(struct net *net)
882 for_each_possible_cpu(i) {
883 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
885 kfree(net->ipv6.icmp_sk);
888 static struct pernet_operations icmpv6_sk_ops = {
889 .init = icmpv6_sk_init,
890 .exit = icmpv6_sk_exit,
893 int __init icmpv6_init(void)
897 err = register_pernet_subsys(&icmpv6_sk_ops);
902 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
905 err = inet6_register_icmp_sender(icmp6_send);
911 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
913 pr_err("Failed to register ICMP6 protocol\n");
914 unregister_pernet_subsys(&icmpv6_sk_ops);
918 void icmpv6_cleanup(void)
920 inet6_unregister_icmp_sender(icmp6_send);
921 unregister_pernet_subsys(&icmpv6_sk_ops);
922 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
926 static const struct icmp6_err {
934 { /* ADM_PROHIBITED */
938 { /* Was NOT_NEIGHBOUR, now reserved */
960 int icmpv6_err_convert(u8 type, u8 code, int *err)
967 case ICMPV6_DEST_UNREACH:
969 if (code < ARRAY_SIZE(tab_unreach)) {
970 *err = tab_unreach[code].err;
971 fatal = tab_unreach[code].fatal;
975 case ICMPV6_PKT_TOOBIG:
979 case ICMPV6_PARAMPROB:
984 case ICMPV6_TIME_EXCEED:
991 EXPORT_SYMBOL(icmpv6_err_convert);
994 static struct ctl_table ipv6_icmp_table_template[] = {
996 .procname = "ratelimit",
997 .data = &init_net.ipv6.sysctl.icmpv6_time,
998 .maxlen = sizeof(int),
1000 .proc_handler = proc_dointvec_ms_jiffies,
1005 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
1007 struct ctl_table *table;
1009 table = kmemdup(ipv6_icmp_table_template,
1010 sizeof(ipv6_icmp_table_template),
1014 table[0].data = &net->ipv6.sysctl.icmpv6_time;