]> git.karo-electronics.de Git - linux-beck.git/commitdiff
x86: Convert cpu_core_map to be a per cpu variable
authorMike Travis <travis@sgi.com>
Tue, 16 Oct 2007 08:24:04 +0000 (01:24 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 16:42:50 +0000 (09:42 -0700)
This is from an earlier message from 'Christoph Lameter':

    cpu_core_map is currently an array defined using NR_CPUS. This means that
    we overallocate since we will rarely really use maximum configured cpu.

    If we put the cpu_core_map into the per cpu area then it will be allocated
    for each processor as it comes online.

    This means that the core map cannot be accessed until the per cpu area
    has been allocated. Xen does a weird thing here looping over all processors
    and zeroing the masks that are not yet allocated and that will be zeroed
    when they are allocated. I commented the code out.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
12 files changed:
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/mce_amd_64.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/smpboot_32.c
arch/x86/kernel/smpboot_64.c
arch/x86/xen/smp.c
include/asm-x86/smp_32.h
include/asm-x86/smp_64.h
include/asm-x86/topology_32.h
include/asm-x86/topology_64.h

index ffd01e5dcb52c2cacf61f8dee9cf2f450543b8a7..2ca43ba32bc0ea50e99c4f872dfac269bde7ee8f 100644 (file)
@@ -595,7 +595,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        dmi_check_system(sw_any_bug_dmi_table);
        if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-               policy->cpus = cpu_core_map[cpu];
+               policy->cpus = per_cpu(cpu_core_map, cpu);
        }
 #endif
 
index b273b69cfddf0532d18b4db6646fafd077cdc31e..c06ac680c9cace052135ed7e790118a7b4e71c86 100644 (file)
@@ -57,7 +57,7 @@ static struct powernow_k8_data *powernow_data[NR_CPUS];
 static int cpu_family = CPU_OPTERON;
 
 #ifndef CONFIG_SMP
-static cpumask_t cpu_core_map[1];
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
 #endif
 
 /* Return a frequency in MHz, given an input fid */
@@ -667,7 +667,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
 
        dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
        data->powernow_table = powernow_table;
-       if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
                print_basics(data);
 
        for (j = 0; j < data->numps; j++)
@@ -821,7 +821,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
 
        /* fill in data */
        data->numps = data->acpi_data.state_count;
-       if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
                print_basics(data);
        powernow_k8_acpi_pst_values(data, 0);
 
@@ -1214,7 +1214,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (cpu_family == CPU_HW_PSTATE)
                pol->cpus = cpumask_of_cpu(pol->cpu);
        else
-               pol->cpus = cpu_core_map[pol->cpu];
+               pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
 
        /* Take a crude guess here.
@@ -1281,7 +1281,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
        cpumask_t oldmask = current->cpus_allowed;
        unsigned int khz = 0;
 
-       data = powernow_data[first_cpu(cpu_core_map[cpu])];
+       data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
 
        if (!data)
                return -EINVAL;
index 1e31b6caffb1651def36e96acf4cd06e4df48d4e..879a0f789b1e223026ec820c873cca43a70dec89 100644 (file)
@@ -122,7 +122,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #ifdef CONFIG_X86_HT
        if (c->x86_max_cores * smp_num_siblings > 1) {
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
+               seq_printf(m, "siblings\t: %d\n",
+                               cpus_weight(per_cpu(cpu_core_map, n)));
                seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
                seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        }
index 2f8a7f18b0fea31d9e9c90b829de6f1548b6b584..805b62b1e0dfa3c77adf0c68fb1cab083297379f 100644 (file)
@@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 #ifdef CONFIG_SMP
        if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = first_cpu(cpu_core_map[cpu]);
+               i = first_cpu(per_cpu(cpu_core_map, cpu));
 
                /* first core not up yet */
                if (cpu_data[i].cpu_core_id)
@@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               b->cpus = cpu_core_map[cpu];
+               b->cpus = per_cpu(cpu_core_map, cpu);
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
@@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        b->cpus = CPU_MASK_ALL;
 #else
-       b->cpus = cpu_core_map[cpu];
+       b->cpus = per_cpu(cpu_core_map, cpu);
 #endif
        err = kobject_register(&b->kobj);
        if (err)
index b7da90e79c78d6660102a115ac38d7f6ea8b3831..85b5b6310accd1af4100b73054bcaf345619d086 100644 (file)
@@ -1070,7 +1070,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (smp_num_siblings * c->x86_max_cores > 1) {
                int cpu = c - cpu_data;
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
+               seq_printf(m, "siblings\t: %d\n",
+                              cpus_weight(per_cpu(cpu_core_map, cpu)));
                seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
                seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        }
index e4f61d1c6248d8116810e122651f3fb109b5b78e..4cbab48ba86548ebfeb788b5d096ad2e0a596508 100644 (file)
@@ -74,8 +74,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map __read_mostly;
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
@@ -321,8 +321,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
                                cpu_set(i, cpu_sibling_map[cpu]);
                                cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
                                cpu_set(cpu, c[i].llc_shared_map);
                        }
@@ -334,7 +334,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
                c[cpu].booted_cores = 1;
                return;
        }
@@ -346,8 +346,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                        cpu_set(cpu, c[i].llc_shared_map);
                }
                if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
@@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                                           " Using dummy APIC emulation.\n");
                map_cpu_to_logical_apicid();
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1009,7 +1009,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1024,7 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1107,11 +1107,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
         */
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               cpus_clear(per_cpu(cpu_core_map, cpu));
        }
 
        cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 
        smpboot_setup_io_apic();
 
@@ -1148,9 +1148,9 @@ void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = cpu_data;
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
-               /*
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
                 * last thread sibling in this cpu core going down
                 */
                if (cpus_weight(cpu_sibling_map[cpu]) == 1)
@@ -1160,7 +1160,7 @@ void remove_siblinginfo(int cpu)
        for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
                cpu_clear(cpu, cpu_sibling_map[sibling]);
        cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
+       cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);
index 720a7d1f8862be153b8e356e582c57727cbdaaef..6723c8622828829c3e18ef447b1ab9cb04d2d0ab 100644 (file)
@@ -95,8 +95,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /*
  * Trampoline 80x86 program as an array.
@@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
@@ -264,8 +264,8 @@ static inline void set_cpu_sibling_map(int cpu)
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
                                cpu_set(i, cpu_sibling_map[cpu]);
                                cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
                                cpu_set(cpu, c[i].llc_shared_map);
                        }
@@ -277,7 +277,7 @@ static inline void set_cpu_sibling_map(int cpu)
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
                c[cpu].booted_cores = 1;
                return;
        }
@@ -289,8 +289,8 @@ static inline void set_cpu_sibling_map(int cpu)
                        cpu_set(cpu, c[i].llc_shared_map);
                }
                if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
@@ -736,7 +736,7 @@ static __init void disable_smp(void)
        else
                phys_cpu_present_map = physid_mask_of_physid(0);
        cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -971,8 +971,8 @@ static void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = cpu_data;
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
                /*
                 * last thread sibling in this cpu core going down
                 */
@@ -983,7 +983,7 @@ static void remove_siblinginfo(int cpu)
        for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
                cpu_clear(cpu, cpu_sibling_map[sibling]);
        cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
+       cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);
index 557b8e24706a94e3511b9a802f333dbb5a2f573f..539d42530fc49ebcdc9170d2abc99ea433e647f8 100644 (file)
@@ -148,7 +148,12 @@ void __init xen_smp_prepare_boot_cpu(void)
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               /*
+                * cpu_core_map lives in a per cpu area that is cleared
+                * when the per cpu array is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        xen_setup_vcpu_info_placement();
@@ -160,7 +165,12 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               /*
+                * cpu_core_ map will be zeroed when the per
+                * cpu area is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        smp_store_cpu_info(0);
index 1f73bde165b166759b1a4f1331f3058673716738..01ab31bb262a57ce9518b4063a23dc0a5da78286 100644 (file)
@@ -31,7 +31,7 @@ extern void smp_alloc_memory(void);
 extern int pic_mode;
 extern int smp_num_siblings;
 extern cpumask_t cpu_sibling_map[];
-extern cpumask_t cpu_core_map[];
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 
 extern void (*mtrr_hook) (void);
 extern void zap_low_mappings (void);
index 3f303d2365ed68d9b207b43976bd1c3aa2282e51..65f8448644151ed13b2eac23df5b5988400b0cdf 100644 (file)
@@ -39,7 +39,12 @@ extern int smp_num_siblings;
 extern void smp_send_reschedule(int cpu);
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+/*
+ * cpu_core_map lives in a per cpu area
+ *
+ * extern cpumask_t cpu_core_map[NR_CPUS];
+ */
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 extern u8 cpu_llc_id[NR_CPUS];
 
 #define SMP_TRAMPOLINE_BASE 0x6000
index 19b2dafd0c81b1f822e8fa34d8b65c91cd0378c6..7b68dbcd0eb015fa13683821888ada8d85c74123 100644 (file)
@@ -30,7 +30,7 @@
 #ifdef CONFIG_X86_HT
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
+#define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
 #define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
 #endif
 
index 36e52fba796075bfd4c45b845af67fe49e08a7ef..b8590dff34c8d8957d0451dd604bb4910d794a8b 100644 (file)
@@ -58,7 +58,7 @@ extern int __node_distance(int, int);
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
+#define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
 #define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
 #define mc_capable()                   (boot_cpu_data.x86_max_cores > 1)
 #define smt_capable()                  (smp_num_siblings > 1)