]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
xen: move xen_setup_runstate_info and get_runstate_snapshot to drivers/xen/time.c
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>
Tue, 28 May 2013 17:51:49 +0000 (17:51 +0000)
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>
Tue, 28 May 2013 17:51:49 +0000 (17:51 +0000)
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
CC: konrad.wilk@oracle.com
Changes in v2:
- leave do_stolen_accounting in arch/x86/xen/time.c;
- use the new common functions in arch/ia64/xen/time.c.

arch/ia64/xen/time.c
arch/x86/xen/time.c
drivers/xen/Makefile
drivers/xen/time.c [new file with mode: 0644]
include/xen/xen-ops.h

index 1f8244a78bee026948376340024c50c29f922c59..79a0b8cc389aff887beb30cce9754edf83a76091 100644 (file)
 
 #include "../kernel/fsyscall_gtod_data.h"
 
-static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
 static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
 static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
 
 /* taken from i386/kernel/time-xen.c */
 static void xen_init_missing_ticks_accounting(int cpu)
 {
-       struct vcpu_register_runstate_memory_area area;
-       struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
-       int rc;
+       xen_setup_runstate_info(&runstate);
 
-       memset(runstate, 0, sizeof(*runstate));
-
-       area.addr.v = runstate;
-       rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
-                               &area);
-       WARN_ON(rc && rc != -ENOSYS);
-
-       per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
-       per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
-                                           + runstate->time[RUNSTATE_offline];
-}
-
-/*
- * Runstate accounting
- */
-/* stolen from arch/x86/xen/time.c */
-static void get_runstate_snapshot(struct vcpu_runstate_info *res)
-{
-       u64 state_time;
-       struct vcpu_runstate_info *state;
-
-       BUG_ON(preemptible());
-
-       state = &__get_cpu_var(xen_runstate);
-
-       /*
-        * The runstate info is always updated by the hypervisor on
-        * the current CPU, so there's no need to use anything
-        * stronger than a compiler barrier when fetching it.
-        */
-       do {
-               state_time = state->state_entry_time;
-               rmb();
-               *res = *state;
-               rmb();
-       } while (state->state_entry_time != state_time);
+       per_cpu(xen_blocked_time, cpu) = runstate.time[RUNSTATE_blocked];
+       per_cpu(xen_stolen_time, cpu) = runstate.time[RUNSTATE_runnable]
+                                           + runstate.time[RUNSTATE_offline];
 }
 
 #define NS_PER_TICK (1000000000LL/HZ)
@@ -94,7 +58,7 @@ consider_steal_time(unsigned long new_itm)
        struct vcpu_runstate_info runstate;
        struct task_struct *p = current;
 
-       get_runstate_snapshot(&runstate);
+       xen_get_runstate_snapshot(&runstate);
 
        /*
         * Check for vcpu migration effect
@@ -202,7 +166,7 @@ static unsigned long long xen_sched_clock(void)
         */
        now = ia64_native_sched_clock();
 
-       get_runstate_snapshot(&runstate);
+       xen_get_runstate_snapshot(&runstate);
 
        WARN_ON(runstate.state != RUNSTATE_running);
 
index 3d88bfdf9e1c092a23e9d4be143630a074a92926..c0ca15ea71078f053e3046263294df4383daa316 100644 (file)
@@ -30,9 +30,6 @@
 #define TIMER_SLOP     100000
 #define NS_PER_TICK    (1000000000LL / HZ)
 
-/* runstate info updated by Xen */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
-
 /* snapshots of runstate info */
 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
 
@@ -40,77 +37,6 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
 static DEFINE_PER_CPU(u64, xen_residual_stolen);
 static DEFINE_PER_CPU(u64, xen_residual_blocked);
 
-/* return an consistent snapshot of 64-bit time/counter value */
-static u64 get64(const u64 *p)
-{
-       u64 ret;
-
-       if (BITS_PER_LONG < 64) {
-               u32 *p32 = (u32 *)p;
-               u32 h, l;
-
-               /*
-                * Read high then low, and then make sure high is
-                * still the same; this will only loop if low wraps
-                * and carries into high.
-                * XXX some clean way to make this endian-proof?
-                */
-               do {
-                       h = p32[1];
-                       barrier();
-                       l = p32[0];
-                       barrier();
-               } while (p32[1] != h);
-
-               ret = (((u64)h) << 32) | l;
-       } else
-               ret = *p;
-
-       return ret;
-}
-
-/*
- * Runstate accounting
- */
-static void get_runstate_snapshot(struct vcpu_runstate_info *res)
-{
-       u64 state_time;
-       struct vcpu_runstate_info *state;
-
-       BUG_ON(preemptible());
-
-       state = &__get_cpu_var(xen_runstate);
-
-       /*
-        * The runstate info is always updated by the hypervisor on
-        * the current CPU, so there's no need to use anything
-        * stronger than a compiler barrier when fetching it.
-        */
-       do {
-               state_time = get64(&state->state_entry_time);
-               barrier();
-               *res = *state;
-               barrier();
-       } while (get64(&state->state_entry_time) != state_time);
-}
-
-/* return true when a vcpu could run but has no real cpu to run on */
-bool xen_vcpu_stolen(int vcpu)
-{
-       return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
-}
-
-void xen_setup_runstate_info(int cpu)
-{
-       struct vcpu_register_runstate_memory_area area;
-
-       area.addr.v = &per_cpu(xen_runstate, cpu);
-
-       if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
-                              cpu, &area))
-               BUG();
-}
-
 static void do_stolen_accounting(void)
 {
        struct vcpu_runstate_info state;
@@ -118,7 +44,7 @@ static void do_stolen_accounting(void)
        s64 blocked, runnable, offline, stolen;
        cputime_t ticks;
 
-       get_runstate_snapshot(&state);
+       xen_get_runstate_snapshot(&state);
 
        WARN_ON(state.state != RUNSTATE_running);
 
index eabd0ee1c2bc2f298d0938515e204b42430e9852..2bf461a5244897b25e64901c9ad9185d55ecca2d 100644 (file)
@@ -3,7 +3,7 @@ obj-y   += manage.o
 obj-$(CONFIG_HOTPLUG_CPU)              += cpu_hotplug.o
 endif
 obj-$(CONFIG_X86)                      += fallback.o
-obj-y  += grant-table.o features.o events.o balloon.o
+obj-y  += grant-table.o features.o events.o balloon.o time.o
 obj-y  += xenbus/
 
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
new file mode 100644 (file)
index 0000000..c2e39d3
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Xen stolen ticks accounting.
+ */
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/math64.h>
+#include <linux/gfp.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/features.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+#include <xen/xen-ops.h>
+
+/* runstate info updated by Xen */
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+
+/* return an consistent snapshot of 64-bit time/counter value */
+static u64 get64(const u64 *p)
+{
+       u64 ret;
+
+       if (BITS_PER_LONG < 64) {
+               u32 *p32 = (u32 *)p;
+               u32 h, l;
+
+               /*
+                * Read high then low, and then make sure high is
+                * still the same; this will only loop if low wraps
+                * and carries into high.
+                * XXX some clean way to make this endian-proof?
+                */
+               do {
+                       h = p32[1];
+                       barrier();
+                       l = p32[0];
+                       barrier();
+               } while (p32[1] != h);
+
+               ret = (((u64)h) << 32) | l;
+       } else
+               ret = *p;
+
+       return ret;
+}
+
+/*
+ * Runstate accounting
+ */
+void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
+{
+       u64 state_time;
+       struct vcpu_runstate_info *state;
+
+       BUG_ON(preemptible());
+
+       state = &__get_cpu_var(xen_runstate);
+
+       /*
+        * The runstate info is always updated by the hypervisor on
+        * the current CPU, so there's no need to use anything
+        * stronger than a compiler barrier when fetching it.
+        */
+       do {
+               state_time = get64(&state->state_entry_time);
+               barrier();
+               *res = *state;
+               barrier();
+       } while (get64(&state->state_entry_time) != state_time);
+}
+
+/* return true when a vcpu could run but has no real cpu to run on */
+bool xen_vcpu_stolen(int vcpu)
+{
+       return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
+}
+
+void xen_setup_runstate_info(int cpu)
+{
+       struct vcpu_register_runstate_memory_area area;
+
+       area.addr.v = &per_cpu(xen_runstate, cpu);
+
+       if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
+                              cpu, &area))
+               BUG();
+}
+
index d6fe062cad6b3128b25104107a1c0468d442295f..4fd4e47704e4a878c0ab1084b117103ddae3dffd 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/percpu.h>
 #include <asm/xen/interface.h>
+#include <xen/interface/vcpu.h>
 
 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
 
@@ -16,6 +17,10 @@ void xen_mm_unpin_all(void);
 void xen_timer_resume(void);
 void xen_arch_resume(void);
 
+bool xen_vcpu_stolen(int vcpu);
+void xen_setup_runstate_info(int cpu);
+void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
+
 int xen_setup_shutdown_event(void);
 
 extern unsigned long *xen_contiguous_bitmap;