From: Ingo Molnar Date: Thu, 11 Jun 2009 15:55:42 +0000 (+0200) Subject: Merge branch 'linus' into perfcounters/core X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=940010c5a314a7bd9b498593bc6ba1718ac5aec5;p=linux-beck.git Merge branch 'linus' into perfcounters/core Conflicts: arch/x86/kernel/irqinit.c arch/x86/kernel/irqinit_64.c arch/x86/kernel/traps.c arch/x86/mm/fault.c include/linux/sched.h kernel/exit.c --- 940010c5a314a7bd9b498593bc6ba1718ac5aec5 diff --cc arch/x86/kernel/irqinit.c index 205bdd880d31,2e08b10ad51a..267c6624c77f --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@@ -154,7 -181,12 +181,17 @@@ static void __init apic_intr_init(void { smp_intr_init(); - #ifdef CONFIG_X86_LOCAL_APIC -#ifdef CONFIG_X86_64 ++#ifdef CONFIG_X86_THERMAL_VECTOR + alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); ++#endif ++#ifdef CONFIG_X86_THRESHOLD + alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); + #endif ++#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) ++ alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); ++#endif + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) /* self generated IPI for local APIC timer */ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); @@@ -164,14 -196,20 +201,12 @@@ /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); + + /* Performance monitoring interrupts: */ # ifdef CONFIG_PERF_COUNTERS - alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); # endif - # ifdef CONFIG_X86_MCE_P4THERMAL -#endif - -#ifdef CONFIG_X86_32 -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) -- /* thermal monitor LVT interrupt */ -- alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); - # endif -#endif #endif } diff --cc arch/x86/mm/fault.c index 5c6d816f30b4,5ec7ae366615..c6acc6326374 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@@ -3,41 -3,16 +3,17 @@@ * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include - - #include - - #include - #include - #include - #include - #include - #include - #include + #include /* STACK_END_MAGIC */ + #include /* test_thread_flag(), ... */ + #include /* oops_begin/end, ... */ + #include /* search_exception_table */ + #include /* max_low_pfn */ + #include /* __kprobes, ... */ + #include /* kmmio_handler, ... */ ++#include /* perf_swcounter_event */ + + #include /* dotraplinkage, ... */ + #include /* pgd_*(), ... */ /* * Page fault error code bits: diff --cc include/linux/sched.h index bc9326dcdde1,d1399660b776..28c774ff3cc7 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -96,9 -97,8 +97,9 @@@ struct exec_domain struct futex_pi_state; struct robust_list_head; struct bio; - struct bts_tracer; struct fs_struct; + struct bts_context; +struct perf_counter_context; /* * List of flags we want to share for kernel threads, @@@ -136,9 -137,8 +138,9 @@@ DECLARE_PER_CPU(unsigned long, process_ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); - extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); + extern void calc_global_load(void); +extern u64 cpu_nr_migrations(int cpu); extern unsigned long get_parent_ip(unsigned long addr); diff --cc kernel/Makefile index e914ca992d70,a35eee3436de..90b53f6dc226 --- a/kernel/Makefile +++ b/kernel/Makefile @@@ -93,9 -93,9 +93,10 @@@ obj-$(CONFIG_LATENCYTOP) += latencytop. obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ + obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --cc kernel/exit.c index 99ad4063ee4a,cab535c427b8..49cdf6946f34 --- a/kernel/exit.c +++ b/kernel/exit.c @@@ -48,8 -48,7 +48,8 @@@ #include #include #include +#include - #include + #include #include #include diff --cc kernel/fork.c index f4466ca37ece,bb762b4dd217..4430eb1376f2 --- a/kernel/fork.c +++ b/kernel/fork.c @@@ -61,9 -61,7 +61,8 @@@ #include #include #include - #include #include +#include #include #include diff --cc kernel/sched.c index 8d43347a0c0d,14c447ae5d53..5b3f6ec1b0b3 --- a/kernel/sched.c +++ b/kernel/sched.c @@@ -2885,30 -2912,74 +2941,83 @@@ unsigned long nr_iowait(void return sum; } - unsigned long nr_active(void) + /* Variables and functions for calc_load */ + static atomic_long_t calc_load_tasks; + static unsigned long calc_load_update; + unsigned long avenrun[3]; + EXPORT_SYMBOL(avenrun); + + /** + * get_avenrun - get the load average array + * @loads: pointer to dest load array + * @offset: offset to add + * @shift: shift count to shift the result left + * + * These values are estimates at best, so no need for locking. + */ + void get_avenrun(unsigned long *loads, unsigned long offset, int shift) + { + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; + } + + static unsigned long + calc_load(unsigned long load, unsigned long exp, unsigned long active) { - unsigned long i, running = 0, uninterruptible = 0; + load *= exp; + load += active * (FIXED_1 - exp); + return load >> FSHIFT; + } - for_each_online_cpu(i) { - running += cpu_rq(i)->nr_running; - uninterruptible += cpu_rq(i)->nr_uninterruptible; - } + /* + * calc_load - update the avenrun load estimates 10 ticks after the + * CPUs have updated calc_load_tasks. + */ + void calc_global_load(void) + { + unsigned long upd = calc_load_update + 10; + long active; - if (unlikely((long)uninterruptible < 0)) - uninterruptible = 0; + if (time_before(jiffies, upd)) + return; - return running + uninterruptible; + active = atomic_long_read(&calc_load_tasks); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun[0] = calc_load(avenrun[0], EXP_1, active); + avenrun[1] = calc_load(avenrun[1], EXP_5, active); + avenrun[2] = calc_load(avenrun[2], EXP_15, active); + + calc_load_update += LOAD_FREQ; + } + + /* + * Either called from update_cpu_load() or from a cpu going idle + */ + static void calc_load_account_active(struct rq *this_rq) + { + long nr_active, delta; + + nr_active = this_rq->nr_running; + nr_active += (long) this_rq->nr_uninterruptible; + + if (nr_active != this_rq->calc_load_active) { + delta = nr_active - this_rq->calc_load_active; + this_rq->calc_load_active = nr_active; + atomic_long_add(delta, &calc_load_tasks); + } } +/* + * Externally visible per-cpu scheduler statistics: + * cpu_nr_migrations(cpu) - number of migrations into that cpu + */ +u64 cpu_nr_migrations(int cpu) +{ + return cpu_rq(cpu)->nr_migrations_in; +} + /* * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC).