]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
ia64: Switch do_timer() to xtime_update()
authorTorben Hohn <torbenh@gmx.de>
Thu, 27 Jan 2011 14:59:56 +0000 (15:59 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 31 Jan 2011 13:55:45 +0000 (14:55 +0100)
local_cpu_data->itm_next = new_itm; does not need to be protected by
xtime_lock. xtime_update() takes the lock itself.

Signed-off-by: Torben Hohn <torbenh@gmx.de>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: johnstul@us.ibm.com
Cc: hch@infradead.org
Cc: yong.zhang0@gmail.com
LKML-Reference: <20110127145956.23248.49107.stgit@localhost>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/ia64/kernel/time.c
arch/ia64/xen/time.c

index 9702fa92489edb3f7aa04d21ad1a801afc8a1807..156ad803d5b7aedce2b059545cf1bde0cd44d32e 100644 (file)
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
 
                new_itm += local_cpu_data->itm_delta;
 
-               if (smp_processor_id() == time_keeper_id) {
-                       /*
-                        * Here we are in the timer irq handler. We have irqs locally
-                        * disabled, but we don't know if the timer_bh is running on
-                        * another CPU. We need to avoid to SMP race by acquiring the
-                        * xtime_lock.
-                        */
-                       write_seqlock(&xtime_lock);
-                       do_timer(1);
-                       local_cpu_data->itm_next = new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else
-                       local_cpu_data->itm_next = new_itm;
+               if (smp_processor_id() == time_keeper_id)
+                       xtime_update(1);
+
+               local_cpu_data->itm_next = new_itm;
 
                if (time_after(new_itm, ia64_get_itc()))
                        break;
@@ -222,7 +213,7 @@ skip_process_time_accounting:
                 * comfort, we increase the safety margin by
                 * intentionally dropping the next tick(s).  We do NOT
                 * update itm.next because that would force us to call
-                * do_timer() which in turn would let our clock run
+                * xtime_update() which in turn would let our clock run
                 * too fast (with the potentially devastating effect
                 * of losing monotony of time).
                 */
index c1c544513e8d0926a8a4bd879f0056240f067bb0..1f8244a78bee026948376340024c50c29f922c59 100644 (file)
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
                run_posix_cpu_timers(p);
                delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
 
-               if (cpu == time_keeper_id) {
-                       write_seqlock(&xtime_lock);
-                       do_timer(stolen + blocked);
-                       local_cpu_data->itm_next = delta_itm + new_itm;
-                       write_sequnlock(&xtime_lock);
-               } else {
-                       local_cpu_data->itm_next = delta_itm + new_itm;
-               }
+               if (cpu == time_keeper_id)
+                       xtime_update(stolen + blocked);
+
+               local_cpu_data->itm_next = delta_itm + new_itm;
+
                per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
                per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
        }