]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/core/drop_monitor.c
Merge tag 'for-3.5' of git://openrisc.net/jonas/linux
[karo-tx-linux.git] / net / core / drop_monitor.c
index 5c3c81a609e5d5b600a9491679e1f4843ec69df5..3252e7e0a0055ad07fa1c02979500650703f6820 100644 (file)
@@ -4,6 +4,8 @@
  * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/string.h>
@@ -22,6 +24,7 @@
 #include <linux/timer.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <net/genetlink.h>
 #include <net/netevent.h>
 
@@ -42,13 +45,14 @@ static void send_dm_alert(struct work_struct *unused);
  * netlink alerts
  */
 static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
        struct work_struct dm_alert_work;
-       struct sk_buff *skb;
+       struct sk_buff __rcu *skb;
        atomic_t dm_hit_count;
        struct timer_list send_timer;
+       int cpu;
 };
 
 struct dm_hw_stat_delta {
@@ -79,29 +83,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
        size_t al;
        struct net_dm_alert_msg *msg;
        struct nlattr *nla;
+       struct sk_buff *skb;
+       struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
        al += sizeof(struct nlattr);
 
-       data->skb = genlmsg_new(al, GFP_KERNEL);
-       genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
-                       0, NET_DM_CMD_ALERT);
-       nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
-       msg = nla_data(nla);
-       memset(msg, 0, al);
-       atomic_set(&data->dm_hit_count, dm_hit_limit);
+       skb = genlmsg_new(al, GFP_KERNEL);
+
+       if (skb) {
+               genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+                               0, NET_DM_CMD_ALERT);
+               nla = nla_reserve(skb, NLA_UNSPEC,
+                                 sizeof(struct net_dm_alert_msg));
+               msg = nla_data(nla);
+               memset(msg, 0, al);
+       } else
+               schedule_work_on(data->cpu, &data->dm_alert_work);
+
+       /*
+        * Don't need to lock this, since we are guaranteed to only
+        * run this on a single cpu at a time.
+        * Note also that we only update data->skb if the old and new skb
+        * pointers don't match.  This ensures that we don't continually call
+        * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+        */
+       if (skb != oskb) {
+               rcu_assign_pointer(data->skb, skb);
+
+               synchronize_rcu();
+
+               atomic_set(&data->dm_hit_count, dm_hit_limit);
+       }
+
 }
 
 static void send_dm_alert(struct work_struct *unused)
 {
        struct sk_buff *skb;
-       struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+       WARN_ON_ONCE(data->cpu != smp_processor_id());
 
        /*
         * Grab the skb we're about to send
         */
-       skb = data->skb;
+       skb = rcu_dereference_protected(data->skb, 1);
 
        /*
         * Replace it with a new one
@@ -111,8 +139,10 @@ static void send_dm_alert(struct work_struct *unused)
        /*
         * Ship it!
         */
-       genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+       if (skb)
+               genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
 
+       put_cpu_var(dm_cpu_data);
 }
 
 /*
@@ -123,9 +153,11 @@ static void send_dm_alert(struct work_struct *unused)
  */
 static void sched_send_work(unsigned long unused)
 {
-       struct per_cpu_dm_data *data =  &__get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
+
+       schedule_work_on(smp_processor_id(), &data->dm_alert_work);
 
-       schedule_work(&data->dm_alert_work);
+       put_cpu_var(dm_cpu_data);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,9 +166,16 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        struct nlmsghdr *nlh;
        struct nlattr *nla;
        int i;
-       struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+       struct sk_buff *dskb;
+       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
 
 
+       rcu_read_lock();
+       dskb = rcu_dereference(data->skb);
+
+       if (!dskb)
+               goto out;
+
        if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
                /*
                 * we're already at zero, discard this hit
@@ -144,7 +183,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
                goto out;
        }
 
-       nlh = (struct nlmsghdr *)data->skb->data;
+       nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
        for (i = 0; i < msg->entries; i++) {
@@ -158,7 +197,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        /*
         * We need to create a new entry
         */
-       __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+       __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
        nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
        memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
        msg->points[msg->entries].count = 1;
@@ -170,6 +209,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        }
 
 out:
+       rcu_read_unlock();
+       put_cpu_var(dm_cpu_data);
        return;
 }
 
@@ -214,7 +255,7 @@ static int set_all_monitor_traces(int state)
        struct dm_hw_stat_delta *new_stat = NULL;
        struct dm_hw_stat_delta *temp;
 
-       spin_lock(&trace_state_lock);
+       mutex_lock(&trace_state_mutex);
 
        if (state == trace_state) {
                rc = -EAGAIN;
@@ -223,9 +264,15 @@ static int set_all_monitor_traces(int state)
 
        switch (state) {
        case TRACE_ON:
+               if (!try_module_get(THIS_MODULE)) {
+                       rc = -ENODEV;
+                       break;
+               }
+
                rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
                rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
                break;
+
        case TRACE_OFF:
                rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
                rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
@@ -241,6 +288,9 @@ static int set_all_monitor_traces(int state)
                                kfree_rcu(new_stat, rcu);
                        }
                }
+
+               module_put(THIS_MODULE);
+
                break;
        default:
                rc = 1;
@@ -253,7 +303,7 @@ static int set_all_monitor_traces(int state)
                rc = -EINPROGRESS;
 
 out_unlock:
-       spin_unlock(&trace_state_lock);
+       mutex_unlock(&trace_state_mutex);
 
        return rc;
 }
@@ -296,12 +346,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
 
                new_stat->dev = dev;
                new_stat->last_rx = jiffies;
-               spin_lock(&trace_state_lock);
+               mutex_lock(&trace_state_mutex);
                list_add_rcu(&new_stat->list, &hw_stats_list);
-               spin_unlock(&trace_state_lock);
+               mutex_unlock(&trace_state_mutex);
                break;
        case NETDEV_UNREGISTER:
-               spin_lock(&trace_state_lock);
+               mutex_lock(&trace_state_mutex);
                list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
                        if (new_stat->dev == dev) {
                                new_stat->dev = NULL;
@@ -312,7 +362,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
                                }
                        }
                }
-               spin_unlock(&trace_state_lock);
+               mutex_unlock(&trace_state_mutex);
                break;
        }
 out:
@@ -343,10 +393,10 @@ static int __init init_net_drop_monitor(void)
        struct per_cpu_dm_data *data;
        int cpu, rc;
 
-       printk(KERN_INFO "Initializing network drop monitor service\n");
+       pr_info("Initializing network drop monitor service\n");
 
        if (sizeof(void *) > 8) {
-               printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n");
+               pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
                return -ENOSPC;
        }
 
@@ -354,27 +404,29 @@ static int __init init_net_drop_monitor(void)
                                           dropmon_ops,
                                           ARRAY_SIZE(dropmon_ops));
        if (rc) {
-               printk(KERN_ERR "Could not create drop monitor netlink family\n");
+               pr_err("Could not create drop monitor netlink family\n");
                return rc;
        }
 
        rc = register_netdevice_notifier(&dropmon_net_notifier);
        if (rc < 0) {
-               printk(KERN_CRIT "Failed to register netdevice notifier\n");
+               pr_crit("Failed to register netdevice notifier\n");
                goto out_unreg;
        }
 
        rc = 0;
 
-       for_each_present_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
-               reset_per_cpu_data(data);
+               data->cpu = cpu;
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
                init_timer(&data->send_timer);
                data->send_timer.data = cpu;
                data->send_timer.function = sched_send_work;
+               reset_per_cpu_data(data);
        }
 
+
        goto out;
 
 out_unreg:
@@ -383,4 +435,36 @@ out:
        return rc;
 }
 
-late_initcall(init_net_drop_monitor);
+static void exit_net_drop_monitor(void)
+{
+       struct per_cpu_dm_data *data;
+       int cpu;
+
+       BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+
+       /*
+        * Because of the module_get/put we do in the trace state change path
+        * we are guarnateed not to have any current users when we get here
+        * all we need to do is make sure that we don't have any running timers
+        * or pending schedule calls
+        */
+
+       for_each_possible_cpu(cpu) {
+               data = &per_cpu(dm_cpu_data, cpu);
+               del_timer_sync(&data->send_timer);
+               cancel_work_sync(&data->dm_alert_work);
+               /*
+                * At this point, we should have exclusive access
+                * to this struct and can free the skb inside it
+                */
+               kfree_skb(data->skb);
+       }
+
+       BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+}
+
+module_init(init_net_drop_monitor);
+module_exit(exit_net_drop_monitor);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");