2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/moduleparam.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/if_arp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/inet.h>
19 #include <linux/interrupt.h>
20 #include <linux/netpoll.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25 #include <linux/slab.h>
28 #include <asm/unaligned.h>
29 #include <trace/events/napi.h>
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
36 #define MAX_UDP_CHUNK 1460
39 static struct sk_buff_head skb_pool;
41 static atomic_t trapped;
43 #define USEC_PER_POLL 50
44 #define NETPOLL_RX_ENABLED 1
45 #define NETPOLL_RX_DROP 2
47 #define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
51 static void zap_completion_queue(void);
52 static void arp_reply(struct sk_buff *skb);
54 static unsigned int carrier_timeout = 4;
55 module_param(carrier_timeout, uint, 0644);
57 static void queue_process(struct work_struct *work)
59 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
64 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
66 const struct net_device_ops *ops = dev->netdev_ops;
67 struct netdev_queue *txq;
69 if (!netif_device_present(dev) || !netif_running(dev)) {
74 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
76 local_irq_save(flags);
77 __netif_tx_lock(txq, smp_processor_id());
78 if (netif_tx_queue_frozen_or_stopped(txq) ||
79 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
80 skb_queue_head(&npinfo->txq, skb);
81 __netif_tx_unlock(txq);
82 local_irq_restore(flags);
84 schedule_delayed_work(&npinfo->tx_work, HZ/10);
87 __netif_tx_unlock(txq);
88 local_irq_restore(flags);
92 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
93 unsigned short ulen, __be32 saddr, __be32 daddr)
97 if (uh->check == 0 || skb_csum_unnecessary(skb))
100 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
102 if (skb->ip_summed == CHECKSUM_COMPLETE &&
103 !csum_fold(csum_add(psum, skb->csum)))
108 return __skb_checksum_complete(skb);
112 * Check whether delayed processing was scheduled for our NIC. If so,
113 * we attempt to grab the poll lock and use ->poll() to pump the card.
114 * If this fails, either we've recursed in ->poll() or it's already
115 * running on another CPU.
117 * Note: we don't mask interrupts with this lock because we're using
118 * trylock here and interrupts are already disabled in the softirq
119 * case. Further, we test the poll_owner to avoid recursion on UP
120 * systems where the lock doesn't exist.
122 * In cases where there is bi-directional communications, reading only
123 * one message at a time can lead to packets being dropped by the
124 * network adapter, forcing superfluous retries and possibly timeouts.
125 * Thus, we set our budget to greater than 1.
127 static int poll_one_napi(struct netpoll_info *npinfo,
128 struct napi_struct *napi, int budget)
132 /* net_rx_action's ->poll() invocations and our's are
133 * synchronized by this test which is only made while
134 * holding the napi->poll_lock.
136 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
139 npinfo->rx_flags |= NETPOLL_RX_DROP;
140 atomic_inc(&trapped);
141 set_bit(NAPI_STATE_NPSVC, &napi->state);
143 work = napi->poll(napi, budget);
144 trace_napi_poll(napi);
146 clear_bit(NAPI_STATE_NPSVC, &napi->state);
147 atomic_dec(&trapped);
148 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
150 return budget - work;
153 static void poll_napi(struct net_device *dev)
155 struct napi_struct *napi;
158 list_for_each_entry(napi, &dev->napi_list, dev_list) {
159 if (napi->poll_owner != smp_processor_id() &&
160 spin_trylock(&napi->poll_lock)) {
161 budget = poll_one_napi(dev->npinfo, napi, budget);
162 spin_unlock(&napi->poll_lock);
170 static void service_arp_queue(struct netpoll_info *npi)
175 while ((skb = skb_dequeue(&npi->arp_tx)))
180 static void netpoll_poll_dev(struct net_device *dev)
182 const struct net_device_ops *ops;
184 if (!dev || !netif_running(dev))
187 ops = dev->netdev_ops;
188 if (!ops->ndo_poll_controller)
191 /* Process pending work on NIC */
192 ops->ndo_poll_controller(dev);
196 if (dev->priv_flags & IFF_SLAVE) {
198 struct net_device *bond_dev = dev->master;
200 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
202 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
207 service_arp_queue(dev->npinfo);
209 zap_completion_queue();
212 static void refill_skbs(void)
217 spin_lock_irqsave(&skb_pool.lock, flags);
218 while (skb_pool.qlen < MAX_SKBS) {
219 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
223 __skb_queue_tail(&skb_pool, skb);
225 spin_unlock_irqrestore(&skb_pool.lock, flags);
228 static void zap_completion_queue(void)
231 struct softnet_data *sd = &get_cpu_var(softnet_data);
233 if (sd->completion_queue) {
234 struct sk_buff *clist;
236 local_irq_save(flags);
237 clist = sd->completion_queue;
238 sd->completion_queue = NULL;
239 local_irq_restore(flags);
241 while (clist != NULL) {
242 struct sk_buff *skb = clist;
244 if (skb->destructor) {
245 atomic_inc(&skb->users);
246 dev_kfree_skb_any(skb); /* put this one back */
253 put_cpu_var(softnet_data);
256 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
261 zap_completion_queue();
265 skb = alloc_skb(len, GFP_ATOMIC);
267 skb = skb_dequeue(&skb_pool);
271 netpoll_poll_dev(np->dev);
277 atomic_set(&skb->users, 1);
278 skb_reserve(skb, reserve);
282 static int netpoll_owner_active(struct net_device *dev)
284 struct napi_struct *napi;
286 list_for_each_entry(napi, &dev->napi_list, dev_list) {
287 if (napi->poll_owner == smp_processor_id())
293 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
294 struct net_device *dev)
296 int status = NETDEV_TX_BUSY;
298 const struct net_device_ops *ops = dev->netdev_ops;
299 /* It is up to the caller to keep npinfo alive. */
300 struct netpoll_info *npinfo = np->dev->npinfo;
302 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
307 /* don't get messages out of order, and no recursion */
308 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
309 struct netdev_queue *txq;
312 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
314 local_irq_save(flags);
315 /* try until next clock tick */
316 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
317 tries > 0; --tries) {
318 if (__netif_tx_trylock(txq)) {
319 if (!netif_tx_queue_stopped(txq)) {
320 status = ops->ndo_start_xmit(skb, dev);
321 if (status == NETDEV_TX_OK)
322 txq_trans_update(txq);
324 __netif_tx_unlock(txq);
326 if (status == NETDEV_TX_OK)
331 /* tickle device maybe there is some cleanup */
332 netpoll_poll_dev(np->dev);
334 udelay(USEC_PER_POLL);
337 WARN_ONCE(!irqs_disabled(),
338 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
339 dev->name, ops->ndo_start_xmit);
341 local_irq_restore(flags);
344 if (status != NETDEV_TX_OK) {
345 skb_queue_tail(&npinfo->txq, skb);
346 schedule_delayed_work(&npinfo->tx_work,0);
349 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
351 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
353 int total_len, eth_len, ip_len, udp_len;
359 udp_len = len + sizeof(*udph);
360 ip_len = eth_len = udp_len + sizeof(*iph);
361 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
363 skb = find_skb(np, total_len, total_len - len);
367 skb_copy_to_linear_data(skb, msg, len);
370 skb_push(skb, sizeof(*udph));
371 skb_reset_transport_header(skb);
373 udph->source = htons(np->local_port);
374 udph->dest = htons(np->remote_port);
375 udph->len = htons(udp_len);
377 udph->check = csum_tcpudp_magic(np->local_ip,
379 udp_len, IPPROTO_UDP,
380 csum_partial(udph, udp_len, 0));
381 if (udph->check == 0)
382 udph->check = CSUM_MANGLED_0;
384 skb_push(skb, sizeof(*iph));
385 skb_reset_network_header(skb);
388 /* iph->version = 4; iph->ihl = 5; */
389 put_unaligned(0x45, (unsigned char *)iph);
391 put_unaligned(htons(ip_len), &(iph->tot_len));
395 iph->protocol = IPPROTO_UDP;
397 put_unaligned(np->local_ip, &(iph->saddr));
398 put_unaligned(np->remote_ip, &(iph->daddr));
399 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
401 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
402 skb_reset_mac_header(skb);
403 skb->protocol = eth->h_proto = htons(ETH_P_IP);
404 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
405 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
409 netpoll_send_skb(np, skb);
411 EXPORT_SYMBOL(netpoll_send_udp);
413 static void arp_reply(struct sk_buff *skb)
415 struct netpoll_info *npinfo = skb->dev->npinfo;
417 unsigned char *arp_ptr;
418 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
421 struct sk_buff *send_skb;
422 struct netpoll *np, *tmp;
426 if (list_empty(&npinfo->rx_np))
429 /* Before checking the packet, we do some early
430 inspection whether this is interesting at all */
431 spin_lock_irqsave(&npinfo->rx_lock, flags);
432 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
433 if (np->dev == skb->dev)
436 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
438 /* No netpoll struct is using this dev */
442 /* No arp on this interface */
443 if (skb->dev->flags & IFF_NOARP)
446 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
449 skb_reset_network_header(skb);
450 skb_reset_transport_header(skb);
453 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
454 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
455 arp->ar_pro != htons(ETH_P_IP) ||
456 arp->ar_op != htons(ARPOP_REQUEST))
459 arp_ptr = (unsigned char *)(arp+1);
460 /* save the location of the src hw addr */
462 arp_ptr += skb->dev->addr_len;
463 memcpy(&sip, arp_ptr, 4);
465 /* If we actually cared about dst hw addr,
466 it would get copied here */
467 arp_ptr += skb->dev->addr_len;
468 memcpy(&tip, arp_ptr, 4);
470 /* Should we ignore arp? */
471 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
474 size = arp_hdr_len(skb->dev);
476 spin_lock_irqsave(&npinfo->rx_lock, flags);
477 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
478 if (tip != np->local_ip)
481 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
482 LL_RESERVED_SPACE(np->dev));
486 skb_reset_network_header(send_skb);
487 arp = (struct arphdr *) skb_put(send_skb, size);
488 send_skb->dev = skb->dev;
489 send_skb->protocol = htons(ETH_P_ARP);
491 /* Fill the device header for the ARP frame */
492 if (dev_hard_header(send_skb, skb->dev, ptype,
493 sha, np->dev->dev_addr,
494 send_skb->len) < 0) {
500 * Fill out the arp protocol part.
502 * we only support ethernet device type,
503 * which (according to RFC 1390) should
504 * always equal 1 (Ethernet).
507 arp->ar_hrd = htons(np->dev->type);
508 arp->ar_pro = htons(ETH_P_IP);
509 arp->ar_hln = np->dev->addr_len;
511 arp->ar_op = htons(type);
513 arp_ptr = (unsigned char *)(arp + 1);
514 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
515 arp_ptr += np->dev->addr_len;
516 memcpy(arp_ptr, &tip, 4);
518 memcpy(arp_ptr, sha, np->dev->addr_len);
519 arp_ptr += np->dev->addr_len;
520 memcpy(arp_ptr, &sip, 4);
522 netpoll_send_skb(np, send_skb);
524 /* If there are several rx_hooks for the same address,
525 we're fine by sending a single reply */
528 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
531 int __netpoll_rx(struct sk_buff *skb)
533 int proto, len, ulen;
535 const struct iphdr *iph;
537 struct netpoll_info *npinfo = skb->dev->npinfo;
538 struct netpoll *np, *tmp;
540 if (list_empty(&npinfo->rx_np))
543 if (skb->dev->type != ARPHRD_ETHER)
546 /* check if netpoll clients need ARP */
547 if (skb->protocol == htons(ETH_P_ARP) &&
548 atomic_read(&trapped)) {
549 skb_queue_tail(&npinfo->arp_tx, skb);
553 proto = ntohs(eth_hdr(skb)->h_proto);
554 if (proto != ETH_P_IP)
556 if (skb->pkt_type == PACKET_OTHERHOST)
561 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
563 iph = (struct iphdr *)skb->data;
564 if (iph->ihl < 5 || iph->version != 4)
566 if (!pskb_may_pull(skb, iph->ihl*4))
568 iph = (struct iphdr *)skb->data;
569 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
572 len = ntohs(iph->tot_len);
573 if (skb->len < len || len < iph->ihl*4)
577 * Our transport medium may have padded the buffer out.
578 * Now We trim to the true length of the frame.
580 if (pskb_trim_rcsum(skb, len))
583 iph = (struct iphdr *)skb->data;
584 if (iph->protocol != IPPROTO_UDP)
588 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
589 ulen = ntohs(uh->len);
593 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
596 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
597 if (np->local_ip && np->local_ip != iph->daddr)
599 if (np->remote_ip && np->remote_ip != iph->saddr)
601 if (np->local_port && np->local_port != ntohs(uh->dest))
604 np->rx_hook(np, ntohs(uh->source),
606 ulen - sizeof(struct udphdr));
617 if (atomic_read(&trapped)) {
625 void netpoll_print_options(struct netpoll *np)
627 printk(KERN_INFO "%s: local port %d\n",
628 np->name, np->local_port);
629 printk(KERN_INFO "%s: local IP %pI4\n",
630 np->name, &np->local_ip);
631 printk(KERN_INFO "%s: interface '%s'\n",
632 np->name, np->dev_name);
633 printk(KERN_INFO "%s: remote port %d\n",
634 np->name, np->remote_port);
635 printk(KERN_INFO "%s: remote IP %pI4\n",
636 np->name, &np->remote_ip);
637 printk(KERN_INFO "%s: remote ethernet address %pM\n",
638 np->name, np->remote_mac);
640 EXPORT_SYMBOL(netpoll_print_options);
642 int netpoll_parse_options(struct netpoll *np, char *opt)
644 char *cur=opt, *delim;
647 if ((delim = strchr(cur, '@')) == NULL)
650 np->local_port = simple_strtol(cur, NULL, 10);
656 if ((delim = strchr(cur, '/')) == NULL)
659 np->local_ip = in_aton(cur);
665 /* parse out dev name */
666 if ((delim = strchr(cur, ',')) == NULL)
669 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
676 if ((delim = strchr(cur, '@')) == NULL)
679 if (*cur == ' ' || *cur == '\t')
680 printk(KERN_INFO "%s: warning: whitespace"
681 "is not allowed\n", np->name);
682 np->remote_port = simple_strtol(cur, NULL, 10);
688 if ((delim = strchr(cur, '/')) == NULL)
691 np->remote_ip = in_aton(cur);
696 if (!mac_pton(cur, np->remote_mac))
700 netpoll_print_options(np);
705 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
709 EXPORT_SYMBOL(netpoll_parse_options);
711 int __netpoll_setup(struct netpoll *np)
713 struct net_device *ndev = np->dev;
714 struct netpoll_info *npinfo;
715 const struct net_device_ops *ops;
719 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
720 !ndev->netdev_ops->ndo_poll_controller) {
721 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
722 np->name, np->dev_name);
728 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
734 npinfo->rx_flags = 0;
735 INIT_LIST_HEAD(&npinfo->rx_np);
737 spin_lock_init(&npinfo->rx_lock);
738 skb_queue_head_init(&npinfo->arp_tx);
739 skb_queue_head_init(&npinfo->txq);
740 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
742 atomic_set(&npinfo->refcnt, 1);
744 ops = np->dev->netdev_ops;
745 if (ops->ndo_netpoll_setup) {
746 err = ops->ndo_netpoll_setup(ndev, npinfo);
751 npinfo = ndev->npinfo;
752 atomic_inc(&npinfo->refcnt);
755 npinfo->netpoll = np;
758 spin_lock_irqsave(&npinfo->rx_lock, flags);
759 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
760 list_add_tail(&np->rx, &npinfo->rx_np);
761 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
764 /* last thing to do is link it to the net device structure */
765 RCU_INIT_POINTER(ndev->npinfo, npinfo);
774 EXPORT_SYMBOL_GPL(__netpoll_setup);
776 int netpoll_setup(struct netpoll *np)
778 struct net_device *ndev = NULL;
779 struct in_device *in_dev;
783 ndev = dev_get_by_name(&init_net, np->dev_name);
785 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
786 np->name, np->dev_name);
791 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
792 np->name, np->dev_name);
797 if (!netif_running(ndev)) {
798 unsigned long atmost, atleast;
800 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
801 np->name, np->dev_name);
804 err = dev_open(ndev);
808 printk(KERN_ERR "%s: failed to open %s\n",
809 np->name, ndev->name);
813 atleast = jiffies + HZ/10;
814 atmost = jiffies + carrier_timeout * HZ;
815 while (!netif_carrier_ok(ndev)) {
816 if (time_after(jiffies, atmost)) {
818 "%s: timeout waiting for carrier\n",
825 /* If carrier appears to come up instantly, we don't
826 * trust it and pause so that we don't pump all our
827 * queued console messages into the bitbucket.
830 if (time_before(jiffies, atleast)) {
831 printk(KERN_NOTICE "%s: carrier detect appears"
832 " untrustworthy, waiting 4 seconds\n",
840 in_dev = __in_dev_get_rcu(ndev);
842 if (!in_dev || !in_dev->ifa_list) {
844 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
845 np->name, np->dev_name);
850 np->local_ip = in_dev->ifa_list->ifa_local;
852 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
857 /* fill up the skb queue */
861 err = __netpoll_setup(np);
873 EXPORT_SYMBOL(netpoll_setup);
875 static int __init netpoll_init(void)
877 skb_queue_head_init(&skb_pool);
880 core_initcall(netpoll_init);
882 void __netpoll_cleanup(struct netpoll *np)
884 struct netpoll_info *npinfo;
887 npinfo = np->dev->npinfo;
891 if (!list_empty(&npinfo->rx_np)) {
892 spin_lock_irqsave(&npinfo->rx_lock, flags);
894 if (list_empty(&npinfo->rx_np))
895 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
896 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
899 if (atomic_dec_and_test(&npinfo->refcnt)) {
900 const struct net_device_ops *ops;
902 ops = np->dev->netdev_ops;
903 if (ops->ndo_netpoll_cleanup)
904 ops->ndo_netpoll_cleanup(np->dev);
906 RCU_INIT_POINTER(np->dev->npinfo, NULL);
908 /* avoid racing with NAPI reading npinfo */
909 synchronize_rcu_bh();
911 skb_queue_purge(&npinfo->arp_tx);
912 skb_queue_purge(&npinfo->txq);
913 cancel_delayed_work_sync(&npinfo->tx_work);
915 /* clean after last, unfinished work */
916 __skb_queue_purge(&npinfo->txq);
920 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
922 void netpoll_cleanup(struct netpoll *np)
928 __netpoll_cleanup(np);
934 EXPORT_SYMBOL(netpoll_cleanup);
936 int netpoll_trap(void)
938 return atomic_read(&trapped);
940 EXPORT_SYMBOL(netpoll_trap);
942 void netpoll_set_trap(int trap)
945 atomic_inc(&trapped);
947 atomic_dec(&trapped);
949 EXPORT_SYMBOL(netpoll_set_trap);