]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sched: Allow for positional tg_tree walks
authorPaul Turner <pjt@google.com>
Thu, 21 Jul 2011 16:43:35 +0000 (09:43 -0700)
committerIngo Molnar <mingo@elte.hu>
Sun, 14 Aug 2011 10:03:38 +0000 (12:03 +0200)
Extend walk_tg_tree to accept a positional argument

static int walk_tg_tree_from(struct task_group *from,
     tg_visitor down, tg_visitor up, void *data)

Existing semantics are preserved, caller must hold rcu_lock() or sufficient
analogue.

Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.677889157@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 4bbabc2c4a778604cfb7cfe9383af5968eba453f..8ec1e7ac2894f0019f93e70fa3fa49c47b08e1b3 100644 (file)
@@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
 typedef int (*tg_visitor)(struct task_group *, void *);
 
 /*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
+ * Iterate task_group tree rooted at *from, calling @down when first entering a
+ * node and @up when leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
  */
-static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+static int walk_tg_tree_from(struct task_group *from,
+                            tg_visitor down, tg_visitor up, void *data)
 {
        struct task_group *parent, *child;
        int ret;
 
-       rcu_read_lock();
-       parent = &root_task_group;
+       parent = from;
+
 down:
        ret = (*down)(parent, data);
        if (ret)
-               goto out_unlock;
+               goto out;
        list_for_each_entry_rcu(child, &parent->children, siblings) {
                parent = child;
                goto down;
@@ -1613,19 +1616,29 @@ up:
                continue;
        }
        ret = (*up)(parent, data);
-       if (ret)
-               goto out_unlock;
+       if (ret || parent == from)
+               goto out;
 
        child = parent;
        parent = parent->parent;
        if (parent)
                goto up;
-out_unlock:
-       rcu_read_unlock();
-
+out:
        return ret;
 }
 
+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
+ */
+
+static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+{
+       return walk_tg_tree_from(&root_task_group, down, up, data);
+}
+
 static int tg_nop(struct task_group *tg, void *data)
 {
        return 0;
@@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
 
 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
 {
+       int ret;
+
        struct rt_schedulable_data data = {
                .tg = tg,
                .rt_period = period,
                .rt_runtime = runtime,
        };
 
-       return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
+       rcu_read_lock();
+       ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
+       rcu_read_unlock();
+
+       return ret;
 }
 
 static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
 
 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
 {
+       int ret;
        struct cfs_schedulable_data data = {
                .tg = tg,
                .period = period,
@@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
                do_div(data.quota, NSEC_PER_USEC);
        }
 
-       return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+       rcu_read_lock();
+       ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+       rcu_read_unlock();
+
+       return ret;
 }
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */