2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool;
49 #ifdef CONFIG_NETPOLL_TRAP
50 static atomic_t trapped;
51 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
54 DEFINE_STATIC_SRCU(netpoll_srcu);
56 #define USEC_PER_POLL 50
58 #define MAX_SKB_SIZE \
59 (sizeof(struct ethhdr) + \
60 sizeof(struct iphdr) + \
61 sizeof(struct udphdr) + \
64 static void zap_completion_queue(void);
65 static void netpoll_async_cleanup(struct work_struct *work);
67 static unsigned int carrier_timeout = 4;
68 module_param(carrier_timeout, uint, 0644);
70 #define np_info(np, fmt, ...) \
71 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
72 #define np_err(np, fmt, ...) \
73 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
74 #define np_notice(np, fmt, ...) \
75 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
77 static void queue_process(struct work_struct *work)
79 struct netpoll_info *npinfo =
80 container_of(work, struct netpoll_info, tx_work.work);
84 while ((skb = skb_dequeue(&npinfo->txq))) {
85 struct net_device *dev = skb->dev;
86 const struct net_device_ops *ops = dev->netdev_ops;
87 struct netdev_queue *txq;
89 if (!netif_device_present(dev) || !netif_running(dev)) {
94 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
96 local_irq_save(flags);
97 __netif_tx_lock(txq, smp_processor_id());
98 if (netif_xmit_frozen_or_stopped(txq) ||
99 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
100 skb_queue_head(&npinfo->txq, skb);
101 __netif_tx_unlock(txq);
102 local_irq_restore(flags);
104 schedule_delayed_work(&npinfo->tx_work, HZ/10);
107 __netif_tx_unlock(txq);
108 local_irq_restore(flags);
112 #ifdef CONFIG_NETPOLL_TRAP
113 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
114 unsigned short ulen, __be32 saddr, __be32 daddr)
118 if (uh->check == 0 || skb_csum_unnecessary(skb))
121 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
123 if (skb->ip_summed == CHECKSUM_COMPLETE &&
124 !csum_fold(csum_add(psum, skb->csum)))
129 return __skb_checksum_complete(skb);
131 #endif /* CONFIG_NETPOLL_TRAP */
134 * Check whether delayed processing was scheduled for our NIC. If so,
135 * we attempt to grab the poll lock and use ->poll() to pump the card.
136 * If this fails, either we've recursed in ->poll() or it's already
137 * running on another CPU.
139 * Note: we don't mask interrupts with this lock because we're using
140 * trylock here and interrupts are already disabled in the softirq
141 * case. Further, we test the poll_owner to avoid recursion on UP
142 * systems where the lock doesn't exist.
144 * In cases where there is bi-directional communications, reading only
145 * one message at a time can lead to packets being dropped by the
146 * network adapter, forcing superfluous retries and possibly timeouts.
147 * Thus, we set our budget to greater than 1.
149 static int poll_one_napi(struct napi_struct *napi, int budget)
153 /* net_rx_action's ->poll() invocations and our's are
154 * synchronized by this test which is only made while
155 * holding the napi->poll_lock.
157 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
160 set_bit(NAPI_STATE_NPSVC, &napi->state);
162 work = napi->poll(napi, budget);
163 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
164 trace_napi_poll(napi);
166 clear_bit(NAPI_STATE_NPSVC, &napi->state);
168 return budget - work;
171 static void poll_napi(struct net_device *dev, int budget)
173 struct napi_struct *napi;
175 list_for_each_entry(napi, &dev->napi_list, dev_list) {
176 if (napi->poll_owner != smp_processor_id() &&
177 spin_trylock(&napi->poll_lock)) {
178 budget = poll_one_napi(napi, budget);
179 spin_unlock(&napi->poll_lock);
184 #ifdef CONFIG_NETPOLL_TRAP
185 static void service_neigh_queue(struct net_device *dev,
186 struct netpoll_info *npi)
189 if (dev->flags & IFF_SLAVE) {
190 struct net_device *bond_dev;
191 struct netpoll_info *bond_ni;
193 bond_dev = netdev_master_upper_dev_get_rcu(dev);
194 bond_ni = rcu_dereference_bh(bond_dev->npinfo);
195 while ((skb = skb_dequeue(&npi->neigh_tx))) {
197 skb_queue_tail(&bond_ni->neigh_tx, skb);
200 while ((skb = skb_dequeue(&npi->neigh_tx)))
201 netpoll_neigh_reply(skb, npi);
203 #else /* !CONFIG_NETPOLL_TRAP */
204 static inline void service_neigh_queue(struct net_device *dev,
205 struct netpoll_info *npi)
208 #endif /* CONFIG_NETPOLL_TRAP */
210 static void netpoll_poll_dev(struct net_device *dev)
212 const struct net_device_ops *ops;
213 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
214 bool rx_processing = netpoll_rx_processing(ni);
215 int budget = rx_processing? 16 : 0;
217 /* Don't do any rx activity if the dev_lock mutex is held
218 * the dev_open/close paths use this to block netpoll activity
219 * while changing device state
221 if (down_trylock(&ni->dev_lock))
224 if (!netif_running(dev)) {
232 ops = dev->netdev_ops;
233 if (!ops->ndo_poll_controller) {
238 /* Process pending work on NIC */
239 ops->ndo_poll_controller(dev);
241 poll_napi(dev, budget);
248 service_neigh_queue(dev, ni);
250 zap_completion_queue();
253 void netpoll_rx_disable(struct net_device *dev)
255 struct netpoll_info *ni;
258 idx = srcu_read_lock(&netpoll_srcu);
259 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
262 srcu_read_unlock(&netpoll_srcu, idx);
264 EXPORT_SYMBOL(netpoll_rx_disable);
266 void netpoll_rx_enable(struct net_device *dev)
268 struct netpoll_info *ni;
270 ni = rcu_dereference(dev->npinfo);
275 EXPORT_SYMBOL(netpoll_rx_enable);
277 static void refill_skbs(void)
282 spin_lock_irqsave(&skb_pool.lock, flags);
283 while (skb_pool.qlen < MAX_SKBS) {
284 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
288 __skb_queue_tail(&skb_pool, skb);
290 spin_unlock_irqrestore(&skb_pool.lock, flags);
293 static void zap_completion_queue(void)
296 struct softnet_data *sd = &get_cpu_var(softnet_data);
298 if (sd->completion_queue) {
299 struct sk_buff *clist;
301 local_irq_save(flags);
302 clist = sd->completion_queue;
303 sd->completion_queue = NULL;
304 local_irq_restore(flags);
306 while (clist != NULL) {
307 struct sk_buff *skb = clist;
309 if (skb->destructor) {
310 atomic_inc(&skb->users);
311 dev_kfree_skb_any(skb); /* put this one back */
318 put_cpu_var(softnet_data);
321 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
326 zap_completion_queue();
330 skb = alloc_skb(len, GFP_ATOMIC);
332 skb = skb_dequeue(&skb_pool);
336 netpoll_poll_dev(np->dev);
342 atomic_set(&skb->users, 1);
343 skb_reserve(skb, reserve);
347 static int netpoll_owner_active(struct net_device *dev)
349 struct napi_struct *napi;
351 list_for_each_entry(napi, &dev->napi_list, dev_list) {
352 if (napi->poll_owner == smp_processor_id())
358 /* call with IRQ disabled */
359 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
360 struct net_device *dev)
362 int status = NETDEV_TX_BUSY;
364 const struct net_device_ops *ops = dev->netdev_ops;
365 /* It is up to the caller to keep npinfo alive. */
366 struct netpoll_info *npinfo;
368 WARN_ON_ONCE(!irqs_disabled());
370 npinfo = rcu_dereference_bh(np->dev->npinfo);
371 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
376 /* don't get messages out of order, and no recursion */
377 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
378 struct netdev_queue *txq;
380 txq = netdev_pick_tx(dev, skb, NULL);
382 /* try until next clock tick */
383 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
384 tries > 0; --tries) {
385 if (__netif_tx_trylock(txq)) {
386 if (!netif_xmit_stopped(txq)) {
387 if (vlan_tx_tag_present(skb) &&
388 !vlan_hw_offload_capable(netif_skb_features(skb),
390 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
391 if (unlikely(!skb)) {
392 /* This is actually a packet drop, but we
393 * don't want the code at the end of this
394 * function to try and re-queue a NULL skb.
396 status = NETDEV_TX_OK;
402 status = ops->ndo_start_xmit(skb, dev);
403 if (status == NETDEV_TX_OK)
404 txq_trans_update(txq);
407 __netif_tx_unlock(txq);
409 if (status == NETDEV_TX_OK)
414 /* tickle device maybe there is some cleanup */
415 netpoll_poll_dev(np->dev);
417 udelay(USEC_PER_POLL);
420 WARN_ONCE(!irqs_disabled(),
421 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
422 dev->name, ops->ndo_start_xmit);
426 if (status != NETDEV_TX_OK) {
427 skb_queue_tail(&npinfo->txq, skb);
428 schedule_delayed_work(&npinfo->tx_work,0);
431 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
433 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
435 int total_len, ip_len, udp_len;
440 static atomic_t ip_ident;
441 struct ipv6hdr *ip6h;
443 udp_len = len + sizeof(*udph);
445 ip_len = udp_len + sizeof(*ip6h);
447 ip_len = udp_len + sizeof(*iph);
449 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
451 skb = find_skb(np, total_len + np->dev->needed_tailroom,
456 skb_copy_to_linear_data(skb, msg, len);
459 skb_push(skb, sizeof(*udph));
460 skb_reset_transport_header(skb);
462 udph->source = htons(np->local_port);
463 udph->dest = htons(np->remote_port);
464 udph->len = htons(udp_len);
468 udph->check = csum_ipv6_magic(&np->local_ip.in6,
470 udp_len, IPPROTO_UDP,
471 csum_partial(udph, udp_len, 0));
472 if (udph->check == 0)
473 udph->check = CSUM_MANGLED_0;
475 skb_push(skb, sizeof(*ip6h));
476 skb_reset_network_header(skb);
477 ip6h = ipv6_hdr(skb);
479 /* ip6h->version = 6; ip6h->priority = 0; */
480 put_unaligned(0x60, (unsigned char *)ip6h);
481 ip6h->flow_lbl[0] = 0;
482 ip6h->flow_lbl[1] = 0;
483 ip6h->flow_lbl[2] = 0;
485 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
486 ip6h->nexthdr = IPPROTO_UDP;
487 ip6h->hop_limit = 32;
488 ip6h->saddr = np->local_ip.in6;
489 ip6h->daddr = np->remote_ip.in6;
491 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
492 skb_reset_mac_header(skb);
493 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
496 udph->check = csum_tcpudp_magic(np->local_ip.ip,
498 udp_len, IPPROTO_UDP,
499 csum_partial(udph, udp_len, 0));
500 if (udph->check == 0)
501 udph->check = CSUM_MANGLED_0;
503 skb_push(skb, sizeof(*iph));
504 skb_reset_network_header(skb);
507 /* iph->version = 4; iph->ihl = 5; */
508 put_unaligned(0x45, (unsigned char *)iph);
510 put_unaligned(htons(ip_len), &(iph->tot_len));
511 iph->id = htons(atomic_inc_return(&ip_ident));
514 iph->protocol = IPPROTO_UDP;
516 put_unaligned(np->local_ip.ip, &(iph->saddr));
517 put_unaligned(np->remote_ip.ip, &(iph->daddr));
518 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
520 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
521 skb_reset_mac_header(skb);
522 skb->protocol = eth->h_proto = htons(ETH_P_IP);
525 ether_addr_copy(eth->h_source, np->dev->dev_addr);
526 ether_addr_copy(eth->h_dest, np->remote_mac);
530 netpoll_send_skb(np, skb);
532 EXPORT_SYMBOL(netpoll_send_udp);
534 #ifdef CONFIG_NETPOLL_TRAP
535 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
537 int size, type = ARPOP_REPLY;
540 struct sk_buff *send_skb;
541 struct netpoll *np, *tmp;
546 if (!netpoll_rx_processing(npinfo))
549 /* Before checking the packet, we do some early
550 inspection whether this is interesting at all */
551 spin_lock_irqsave(&npinfo->rx_lock, flags);
552 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
553 if (np->dev == skb->dev)
556 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
558 /* No netpoll struct is using this dev */
562 proto = ntohs(eth_hdr(skb)->h_proto);
563 if (proto == ETH_P_ARP) {
565 unsigned char *arp_ptr;
566 /* No arp on this interface */
567 if (skb->dev->flags & IFF_NOARP)
570 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
573 skb_reset_network_header(skb);
574 skb_reset_transport_header(skb);
577 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
578 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
579 arp->ar_pro != htons(ETH_P_IP) ||
580 arp->ar_op != htons(ARPOP_REQUEST))
583 arp_ptr = (unsigned char *)(arp+1);
584 /* save the location of the src hw addr */
586 arp_ptr += skb->dev->addr_len;
587 memcpy(&sip, arp_ptr, 4);
589 /* If we actually cared about dst hw addr,
590 it would get copied here */
591 arp_ptr += skb->dev->addr_len;
592 memcpy(&tip, arp_ptr, 4);
594 /* Should we ignore arp? */
595 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
598 size = arp_hdr_len(skb->dev);
600 spin_lock_irqsave(&npinfo->rx_lock, flags);
601 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
602 if (tip != np->local_ip.ip)
605 hlen = LL_RESERVED_SPACE(np->dev);
606 tlen = np->dev->needed_tailroom;
607 send_skb = find_skb(np, size + hlen + tlen, hlen);
611 skb_reset_network_header(send_skb);
612 arp = (struct arphdr *) skb_put(send_skb, size);
613 send_skb->dev = skb->dev;
614 send_skb->protocol = htons(ETH_P_ARP);
616 /* Fill the device header for the ARP frame */
617 if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
618 sha, np->dev->dev_addr,
619 send_skb->len) < 0) {
625 * Fill out the arp protocol part.
627 * we only support ethernet device type,
628 * which (according to RFC 1390) should
629 * always equal 1 (Ethernet).
632 arp->ar_hrd = htons(np->dev->type);
633 arp->ar_pro = htons(ETH_P_IP);
634 arp->ar_hln = np->dev->addr_len;
636 arp->ar_op = htons(type);
638 arp_ptr = (unsigned char *)(arp + 1);
639 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
640 arp_ptr += np->dev->addr_len;
641 memcpy(arp_ptr, &tip, 4);
643 memcpy(arp_ptr, sha, np->dev->addr_len);
644 arp_ptr += np->dev->addr_len;
645 memcpy(arp_ptr, &sip, 4);
647 netpoll_send_skb(np, send_skb);
649 /* If there are several rx_skb_hooks for the same
650 * address we're fine by sending a single reply
654 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
655 } else if( proto == ETH_P_IPV6) {
656 #if IS_ENABLED(CONFIG_IPV6)
660 struct icmp6hdr *icmp6h;
661 const struct in6_addr *saddr;
662 const struct in6_addr *daddr;
663 struct inet6_dev *in6_dev = NULL;
664 struct in6_addr *target;
666 in6_dev = in6_dev_get(skb->dev);
667 if (!in6_dev || !in6_dev->cnf.accept_ra)
670 if (!pskb_may_pull(skb, skb->len))
673 msg = (struct nd_msg *)skb_transport_header(skb);
675 __skb_push(skb, skb->data - skb_transport_header(skb));
677 if (ipv6_hdr(skb)->hop_limit != 255)
679 if (msg->icmph.icmp6_code != 0)
681 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
684 saddr = &ipv6_hdr(skb)->saddr;
685 daddr = &ipv6_hdr(skb)->daddr;
687 size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
689 spin_lock_irqsave(&npinfo->rx_lock, flags);
690 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
691 if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
694 hlen = LL_RESERVED_SPACE(np->dev);
695 tlen = np->dev->needed_tailroom;
696 send_skb = find_skb(np, size + hlen + tlen, hlen);
700 send_skb->protocol = htons(ETH_P_IPV6);
701 send_skb->dev = skb->dev;
703 skb_reset_network_header(send_skb);
704 hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
705 *(__be32*)hdr = htonl(0x60000000);
706 hdr->payload_len = htons(size);
707 hdr->nexthdr = IPPROTO_ICMPV6;
708 hdr->hop_limit = 255;
712 icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
713 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
714 icmp6h->icmp6_router = 0;
715 icmp6h->icmp6_solicited = 1;
717 target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
718 *target = msg->target;
719 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
724 if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
725 lladdr, np->dev->dev_addr,
726 send_skb->len) < 0) {
731 netpoll_send_skb(np, send_skb);
733 /* If there are several rx_skb_hooks for the same
734 * address, we're fine by sending a single reply
738 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
743 static bool pkt_is_ns(struct sk_buff *skb)
748 if (skb->protocol != htons(ETH_P_ARP))
750 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
753 msg = (struct nd_msg *)skb_transport_header(skb);
754 __skb_push(skb, skb->data - skb_transport_header(skb));
757 if (hdr->nexthdr != IPPROTO_ICMPV6)
759 if (hdr->hop_limit != 255)
761 if (msg->icmph.icmp6_code != 0)
763 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
769 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
771 int proto, len, ulen, data_len;
772 int hits = 0, offset;
773 const struct iphdr *iph;
775 struct netpoll *np, *tmp;
778 if (!netpoll_rx_processing(npinfo))
781 if (skb->dev->type != ARPHRD_ETHER)
784 /* check if netpoll clients need ARP */
785 if (skb->protocol == htons(ETH_P_ARP) && netpoll_trap()) {
786 skb_queue_tail(&npinfo->neigh_tx, skb);
788 } else if (pkt_is_ns(skb) && netpoll_trap()) {
789 skb_queue_tail(&npinfo->neigh_tx, skb);
793 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
794 skb = vlan_untag(skb);
799 proto = ntohs(eth_hdr(skb)->h_proto);
800 if (proto != ETH_P_IP && proto != ETH_P_IPV6)
802 if (skb->pkt_type == PACKET_OTHERHOST)
807 if (proto == ETH_P_IP) {
808 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
810 iph = (struct iphdr *)skb->data;
811 if (iph->ihl < 5 || iph->version != 4)
813 if (!pskb_may_pull(skb, iph->ihl*4))
815 iph = (struct iphdr *)skb->data;
816 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
819 len = ntohs(iph->tot_len);
820 if (skb->len < len || len < iph->ihl*4)
824 * Our transport medium may have padded the buffer out.
825 * Now We trim to the true length of the frame.
827 if (pskb_trim_rcsum(skb, len))
830 iph = (struct iphdr *)skb->data;
831 if (iph->protocol != IPPROTO_UDP)
835 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
836 offset = (unsigned char *)(uh + 1) - skb->data;
837 ulen = ntohs(uh->len);
838 data_len = skb->len - offset;
839 source = ntohs(uh->source);
843 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
845 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
846 if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
848 if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
850 if (np->local_port && np->local_port != ntohs(uh->dest))
853 np->rx_skb_hook(np, source, skb, offset, data_len);
857 #if IS_ENABLED(CONFIG_IPV6)
858 const struct ipv6hdr *ip6h;
860 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
862 ip6h = (struct ipv6hdr *)skb->data;
863 if (ip6h->version != 6)
865 len = ntohs(ip6h->payload_len);
868 if (len + sizeof(struct ipv6hdr) > skb->len)
870 if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
872 ip6h = ipv6_hdr(skb);
873 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
876 offset = (unsigned char *)(uh + 1) - skb->data;
877 ulen = ntohs(uh->len);
878 data_len = skb->len - offset;
879 source = ntohs(uh->source);
880 if (ulen != skb->len)
882 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
884 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
885 if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
887 if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
889 if (np->local_port && np->local_port != ntohs(uh->dest))
892 np->rx_skb_hook(np, source, skb, offset, data_len);
905 if (netpoll_trap()) {
913 static void netpoll_trap_setup_info(struct netpoll_info *npinfo)
915 INIT_LIST_HEAD(&npinfo->rx_np);
916 spin_lock_init(&npinfo->rx_lock);
917 skb_queue_head_init(&npinfo->neigh_tx);
920 static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
922 skb_queue_purge(&npinfo->neigh_tx);
925 static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
928 if (np->rx_skb_hook) {
929 spin_lock_irqsave(&npinfo->rx_lock, flags);
930 list_add_tail(&np->rx, &npinfo->rx_np);
931 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
935 static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
938 if (!list_empty(&npinfo->rx_np)) {
939 spin_lock_irqsave(&npinfo->rx_lock, flags);
941 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
945 #else /* !CONFIG_NETPOLL_TRAP */
946 static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo)
949 static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
953 void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
957 void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
960 #endif /* CONFIG_NETPOLL_TRAP */
962 void netpoll_print_options(struct netpoll *np)
964 np_info(np, "local port %d\n", np->local_port);
966 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
968 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
969 np_info(np, "interface '%s'\n", np->dev_name);
970 np_info(np, "remote port %d\n", np->remote_port);
972 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
974 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
975 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
977 EXPORT_SYMBOL(netpoll_print_options);
979 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
983 if (!strchr(str, ':') &&
984 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
988 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
989 #if IS_ENABLED(CONFIG_IPV6)
999 int netpoll_parse_options(struct netpoll *np, char *opt)
1001 char *cur=opt, *delim;
1003 bool ipversion_set = false;
1006 if ((delim = strchr(cur, '@')) == NULL)
1009 if (kstrtou16(cur, 10, &np->local_port))
1016 ipversion_set = true;
1017 if ((delim = strchr(cur, '/')) == NULL)
1020 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
1024 np->ipv6 = (bool)ipv6;
1030 /* parse out dev name */
1031 if ((delim = strchr(cur, ',')) == NULL)
1034 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
1041 if ((delim = strchr(cur, '@')) == NULL)
1044 if (*cur == ' ' || *cur == '\t')
1045 np_info(np, "warning: whitespace is not allowed\n");
1046 if (kstrtou16(cur, 10, &np->remote_port))
1053 if ((delim = strchr(cur, '/')) == NULL)
1056 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1059 else if (ipversion_set && np->ipv6 != (bool)ipv6)
1062 np->ipv6 = (bool)ipv6;
1067 if (!mac_pton(cur, np->remote_mac))
1071 netpoll_print_options(np);
1076 np_info(np, "couldn't parse config at '%s'!\n", cur);
1079 EXPORT_SYMBOL(netpoll_parse_options);
1081 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1083 struct netpoll_info *npinfo;
1084 const struct net_device_ops *ops;
1088 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
1089 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
1091 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
1092 !ndev->netdev_ops->ndo_poll_controller) {
1093 np_err(np, "%s doesn't support polling, aborting\n",
1099 if (!ndev->npinfo) {
1100 npinfo = kmalloc(sizeof(*npinfo), gfp);
1106 netpoll_trap_setup_info(npinfo);
1108 sema_init(&npinfo->dev_lock, 1);
1109 skb_queue_head_init(&npinfo->txq);
1110 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
1112 atomic_set(&npinfo->refcnt, 1);
1114 ops = np->dev->netdev_ops;
1115 if (ops->ndo_netpoll_setup) {
1116 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
1121 npinfo = rtnl_dereference(ndev->npinfo);
1122 atomic_inc(&npinfo->refcnt);
1125 npinfo->netpoll = np;
1127 netpoll_trap_setup(np, npinfo);
1129 /* last thing to do is link it to the net device structure */
1130 rcu_assign_pointer(ndev->npinfo, npinfo);
1139 EXPORT_SYMBOL_GPL(__netpoll_setup);
1141 int netpoll_setup(struct netpoll *np)
1143 struct net_device *ndev = NULL;
1144 struct in_device *in_dev;
1149 struct net *net = current->nsproxy->net_ns;
1150 ndev = __dev_get_by_name(net, np->dev_name);
1153 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
1159 if (netdev_master_upper_dev_get(ndev)) {
1160 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
1165 if (!netif_running(ndev)) {
1166 unsigned long atmost, atleast;
1168 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
1170 err = dev_open(ndev);
1173 np_err(np, "failed to open %s\n", ndev->name);
1178 atleast = jiffies + HZ/10;
1179 atmost = jiffies + carrier_timeout * HZ;
1180 while (!netif_carrier_ok(ndev)) {
1181 if (time_after(jiffies, atmost)) {
1182 np_notice(np, "timeout waiting for carrier\n");
1188 /* If carrier appears to come up instantly, we don't
1189 * trust it and pause so that we don't pump all our
1190 * queued console messages into the bitbucket.
1193 if (time_before(jiffies, atleast)) {
1194 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1200 if (!np->local_ip.ip) {
1202 in_dev = __in_dev_get_rtnl(ndev);
1204 if (!in_dev || !in_dev->ifa_list) {
1205 np_err(np, "no IP address for %s, aborting\n",
1207 err = -EDESTADDRREQ;
1211 np->local_ip.ip = in_dev->ifa_list->ifa_local;
1212 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
1214 #if IS_ENABLED(CONFIG_IPV6)
1215 struct inet6_dev *idev;
1217 err = -EDESTADDRREQ;
1218 idev = __in6_dev_get(ndev);
1220 struct inet6_ifaddr *ifp;
1222 read_lock_bh(&idev->lock);
1223 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1224 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
1226 np->local_ip.in6 = ifp->addr;
1230 read_unlock_bh(&idev->lock);
1233 np_err(np, "no IPv6 address for %s, aborting\n",
1237 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
1239 np_err(np, "IPv6 is not supported %s, aborting\n",
1247 /* fill up the skb queue */
1250 err = __netpoll_setup(np, ndev, GFP_KERNEL);
1263 EXPORT_SYMBOL(netpoll_setup);
1265 static int __init netpoll_init(void)
1267 skb_queue_head_init(&skb_pool);
1270 core_initcall(netpoll_init);
1272 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1274 struct netpoll_info *npinfo =
1275 container_of(rcu_head, struct netpoll_info, rcu);
1277 netpoll_trap_cleanup_info(npinfo);
1278 skb_queue_purge(&npinfo->txq);
1280 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1281 cancel_delayed_work(&npinfo->tx_work);
1283 /* clean after last, unfinished work */
1284 __skb_queue_purge(&npinfo->txq);
1285 /* now cancel it again */
1286 cancel_delayed_work(&npinfo->tx_work);
1290 void __netpoll_cleanup(struct netpoll *np)
1292 struct netpoll_info *npinfo;
1294 /* rtnl_dereference would be preferable here but
1295 * rcu_cleanup_netpoll path can put us in here safely without
1296 * holding the rtnl, so plain rcu_dereference it is
1298 npinfo = rtnl_dereference(np->dev->npinfo);
1302 netpoll_trap_cleanup(np, npinfo);
1304 synchronize_srcu(&netpoll_srcu);
1306 if (atomic_dec_and_test(&npinfo->refcnt)) {
1307 const struct net_device_ops *ops;
1309 ops = np->dev->netdev_ops;
1310 if (ops->ndo_netpoll_cleanup)
1311 ops->ndo_netpoll_cleanup(np->dev);
1313 rcu_assign_pointer(np->dev->npinfo, NULL);
1314 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
1317 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1319 static void netpoll_async_cleanup(struct work_struct *work)
1321 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
1324 __netpoll_cleanup(np);
1329 void __netpoll_free_async(struct netpoll *np)
1331 schedule_work(&np->cleanup_work);
1333 EXPORT_SYMBOL_GPL(__netpoll_free_async);
1335 void netpoll_cleanup(struct netpoll *np)
1340 __netpoll_cleanup(np);
1346 EXPORT_SYMBOL(netpoll_cleanup);
1348 #ifdef CONFIG_NETPOLL_TRAP
1349 int netpoll_trap(void)
1351 return atomic_read(&trapped);
1353 EXPORT_SYMBOL(netpoll_trap);
1355 void netpoll_set_trap(int trap)
1358 atomic_inc(&trapped);
1360 atomic_dec(&trapped);
1362 EXPORT_SYMBOL(netpoll_set_trap);
1363 #endif /* CONFIG_NETPOLL_TRAP */