]> git.karo-electronics.de Git - linux-beck.git/commitdiff
perf_events: Move code around to prepare for cgroup
authorStephane Eranian <eranian@google.com>
Mon, 3 Jan 2011 16:20:01 +0000 (18:20 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 7 Jan 2011 14:08:50 +0000 (15:08 +0100)
In particular this patch move perf_event_exit_task() before
cgroup_exit() to allow for cgroup support. The cgroup_exit()
function detaches the cgroups attached to a task.

Other movements include hoisting some definitions and inlines
at the top of perf_event.c

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d22058b.cdace30a.4657.ffff95b1@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/exit.c
kernel/perf_event.c

index 676149a4ac5ff497367a484e2b66c01e915ccefd..8cb89045ecf3744d497957fee6de3c394362b3dc 100644 (file)
@@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code)
        exit_fs(tsk);
        check_stack_usage();
        exit_thread();
+
+       /*
+        * Flush inherited counters to the parent - before the parent
+        * gets woken up by child-exit notifications.
+        *
+        * because of cgroup mode, must be called before cgroup_exit()
+        */
+       perf_event_exit_task(tsk);
+
        cgroup_exit(tsk, 1);
 
        if (group_dead)
@@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code)
         * FIXME: do that only when needed, using sched_exit tracepoint
         */
        flush_ptrace_hw_breakpoint(tsk);
-       /*
-        * Flush inherited counters to the parent - before the parent
-        * gets woken up by child-exit notifications.
-        */
-       perf_event_exit_task(tsk);
 
        exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
index 11847bf1e8cc254db7f2a2a255511fd36eea4a68..2c14e3afdf0db29dc0ee7178b4fa52c63e0c2c01 100644 (file)
 
 #include <asm/irq_regs.h>
 
+enum event_type_t {
+       EVENT_FLEXIBLE = 0x1,
+       EVENT_PINNED = 0x2,
+       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+                            enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void)
        return "pmu";
 }
 
+static inline u64 perf_clock(void)
+{
+       return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
        int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
        put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-       return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -1193,12 +1205,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        return 0;
 }
 
-enum event_type_t {
-       EVENT_FLEXIBLE = 0x1,
-       EVENT_PINNED = 0x2,
-       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)