]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
s390/topology: cleanup topology code
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 12 Nov 2012 09:03:25 +0000 (10:03 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 12 Nov 2012 15:24:49 +0000 (16:24 +0100)
Mainly merge all different per-cpu arrays into a single array which
holds all topology information per logical cpu.
Also fix the broken core vs socket variable naming and simplify the
locking a bit.
When running in environments without topology information also
invent book, socket and core ids, so that not all ids are zero.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/topology.h
arch/s390/kernel/topology.c

index 9935cbd6a46fdee89e8cdfd65c7f9acc0de474b9..05425b18c0aab7af89205a716f2ba05cec89cd0a 100644 (file)
@@ -8,32 +8,34 @@ struct cpu;
 
 #ifdef CONFIG_SCHED_BOOK
 
-extern unsigned char cpu_socket_id[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+struct cpu_topology_s390 {
+       unsigned short core_id;
+       unsigned short socket_id;
+       unsigned short book_id;
+       cpumask_t core_mask;
+       cpumask_t book_mask;
+};
+
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu)                  (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu)             (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu)                  (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu)             (&cpu_topology[cpu].book_mask)
 
-extern unsigned char cpu_core_id[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+#define mc_capable() 1
 
 static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       return &cpu_core_map[cpu];
+       return &cpu_topology[cpu].core_mask;
 }
 
-#define topology_core_id(cpu)          (cpu_core_id[cpu])
-#define topology_core_cpumask(cpu)     (&cpu_core_map[cpu])
-#define mc_capable()                   (1)
-
-extern unsigned char cpu_book_id[NR_CPUS];
-extern cpumask_t cpu_book_map[NR_CPUS];
-
 static inline const struct cpumask *cpu_book_mask(int cpu)
 {
-       return &cpu_book_map[cpu];
+       return &cpu_topology[cpu].book_mask;
 }
 
-#define topology_book_id(cpu)          (cpu_book_id[cpu])
-#define topology_book_cpumask(cpu)     (&cpu_book_map[cpu])
-
 int topology_cpu_init(struct cpu *);
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
index dd55f7c2010448a5b842c741ac6d93fc673591da..b69ff24ce1fd50a3978c2ffe21e3405ddc235fe7 100644 (file)
@@ -29,48 +29,38 @@ struct mask_info {
        cpumask_t mask;
 };
 
-static int topology_enabled = 1;
+static void set_topology_timer(void);
 static void topology_work_fn(struct work_struct *work);
 static struct sysinfo_15_1_x *tl_info;
-static void set_topology_timer(void);
-static DECLARE_WORK(topology_work, topology_work_fn);
-/* topology_lock protects the core linked list */
-static DEFINE_SPINLOCK(topology_lock);
 
-static struct mask_info core_info;
-cpumask_t cpu_core_map[NR_CPUS];
-unsigned char cpu_core_id[NR_CPUS];
-unsigned char cpu_socket_id[NR_CPUS];
+static int topology_enabled = 1;
+static DECLARE_WORK(topology_work, topology_work_fn);
 
+/* topology_lock protects the socket and book linked lists */
+static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info socket_info;
 static struct mask_info book_info;
-cpumask_t cpu_book_map[NR_CPUS];
-unsigned char cpu_book_id[NR_CPUS];
+
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
        cpumask_t mask;
 
-       cpumask_clear(&mask);
-       if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
-               cpumask_copy(&mask, cpumask_of(cpu));
+       cpumask_copy(&mask, cpumask_of(cpu));
+       if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
                return mask;
+       for (; info; info = info->next) {
+               if (cpumask_test_cpu(cpu, &info->mask))
+                       return info->mask;
        }
-       while (info) {
-               if (cpumask_test_cpu(cpu, &info->mask)) {
-                       mask = info->mask;
-                       break;
-               }
-               info = info->next;
-       }
-       if (cpumask_empty(&mask))
-               cpumask_copy(&mask, cpumask_of(cpu));
        return mask;
 }
 
 static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
                                          struct mask_info *book,
-                                         struct mask_info *core,
-                                         int one_core_per_cpu)
+                                         struct mask_info *socket,
+                                         int one_socket_per_cpu)
 {
        unsigned int cpu;
 
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 
                rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
                lcpu = smp_find_processor_id(rcpu);
-               if (lcpu >= 0) {
-                       cpumask_set_cpu(lcpu, &book->mask);
-                       cpu_book_id[lcpu] = book->id;
-                       cpumask_set_cpu(lcpu, &core->mask);
-                       cpu_core_id[lcpu] = rcpu;
-                       if (one_core_per_cpu) {
-                               cpu_socket_id[lcpu] = rcpu;
-                               core = core->next;
-                       } else {
-                               cpu_socket_id[lcpu] = core->id;
-                       }
-                       smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+               if (lcpu < 0)
+                       continue;
+               cpumask_set_cpu(lcpu, &book->mask);
+               cpu_topology[lcpu].book_id = book->id;
+               cpumask_set_cpu(lcpu, &socket->mask);
+               cpu_topology[lcpu].core_id = rcpu;
+               if (one_socket_per_cpu) {
+                       cpu_topology[lcpu].socket_id = rcpu;
+                       socket = socket->next;
+               } else {
+                       cpu_topology[lcpu].socket_id = socket->id;
                }
+               smp_cpu_set_polarization(lcpu, tl_cpu->pp);
        }
-       return core;
+       return socket;
 }
 
 static void clear_masks(void)
 {
        struct mask_info *info;
 
-       info = &core_info;
+       info = &socket_info;
        while (info) {
                cpumask_clear(&info->mask);
                info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
        return (union topology_entry *)((struct topology_container *)tle + 1);
 }
 
-static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
 {
-       struct mask_info *core = &core_info;
+       struct mask_info *socket = &socket_info;
        struct mask_info *book = &book_info;
        union topology_entry *tle, *end;
 
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
                        book->id = tle->container.id;
                        break;
                case 1:
-                       core = core->next;
-                       core->id = tle->container.id;
+                       socket = socket->next;
+                       socket->id = tle->container.id;
                        break;
                case 0:
-                       add_cpus_to_mask(&tle->cpu, book, core, 0);
+                       add_cpus_to_mask(&tle->cpu, book, socket, 0);
                        break;
                default:
                        clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
        }
 }
 
-static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
 {
-       struct mask_info *core = &core_info;
+       struct mask_info *socket = &socket_info;
        struct mask_info *book = &book_info;
        union topology_entry *tle, *end;
 
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
                        book->id = tle->container.id;
                        break;
                case 0:
-                       core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+                       socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
                        break;
                default:
                        clear_masks();
@@ -174,22 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
        }
 }
 
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void tl_to_masks(struct sysinfo_15_1_x *info)
 {
        struct cpuid cpu_id;
 
        get_cpu_id(&cpu_id);
-       spin_lock_irq(&topology_lock);
        clear_masks();
        switch (cpu_id.machine) {
        case 0x2097:
        case 0x2098:
-               __tl_to_cores_z10(info);
+               __tl_to_masks_z10(info);
                break;
        default:
-               __tl_to_cores_generic(info);
+               __tl_to_masks_generic(info);
        }
-       spin_unlock_irq(&topology_lock);
 }
 
 static void topology_update_polarization_simple(void)
@@ -232,17 +220,19 @@ int topology_set_cpu_management(int fc)
        return rc;
 }
 
-static void update_cpu_core_map(void)
+static void update_cpu_masks(void)
 {
-       unsigned long flags;
        int cpu;
 
-       spin_lock_irqsave(&topology_lock, flags);
        for_each_possible_cpu(cpu) {
-               cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-               cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+               cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
+               cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+               if (!MACHINE_HAS_TOPOLOGY) {
+                       cpu_topology[cpu].core_id = cpu;
+                       cpu_topology[cpu].socket_id = cpu;
+                       cpu_topology[cpu].book_id = cpu;
+               }
        }
-       spin_unlock_irqrestore(&topology_lock, flags);
 }
 
 void store_topology(struct sysinfo_15_1_x *info)
@@ -256,22 +246,30 @@ void store_topology(struct sysinfo_15_1_x *info)
 int arch_update_cpu_topology(void)
 {
        struct sysinfo_15_1_x *info = tl_info;
+       unsigned long flags;
        struct device *dev;
+       int changed = 0;
        int cpu;
 
+       spin_lock_irqsave(&topology_lock, flags);
        if (!MACHINE_HAS_TOPOLOGY) {
-               update_cpu_core_map();
+               update_cpu_masks();
                topology_update_polarization_simple();
-               return 0;
+               goto out;
        }
+       changed = 1;
        store_topology(info);
-       tl_to_cores(info);
-       update_cpu_core_map();
-       for_each_online_cpu(cpu) {
-               dev = get_cpu_device(cpu);
-               kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+       tl_to_masks(info);
+       update_cpu_masks();
+out:
+       spin_unlock_irqrestore(&topology_lock, flags);
+       if (changed) {
+               for_each_online_cpu(cpu) {
+                       dev = get_cpu_device(cpu);
+                       kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+               }
        }
-       return 1;
+       return changed;
 }
 
 static void topology_work_fn(struct work_struct *work)
@@ -355,7 +353,7 @@ void __init s390_init_cpu_topology(void)
        for (i = 0; i < TOPOLOGY_NR_MAG; i++)
                printk(KERN_CONT " %d", info->mag[i]);
        printk(KERN_CONT " / %d\n", info->mnest);
-       alloc_masks(info, &core_info, 1);
+       alloc_masks(info, &socket_info, 1);
        alloc_masks(info, &book_info, 2);
 }
 
@@ -448,13 +446,17 @@ int topology_cpu_init(struct cpu *cpu)
 
 static int __init topology_init(void)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&topology_lock, flags);
        if (!MACHINE_HAS_TOPOLOGY) {
                topology_update_polarization_simple();
                goto out;
        }
        set_topology_timer();
 out:
-       update_cpu_core_map();
+       update_cpu_masks();
+       spin_unlock_irqrestore(&topology_lock, flags);
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);