2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/device.h>
10 #include <linux/bootmem.h>
11 #include <linux/sched.h>
12 #include <linux/workqueue.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <asm/delay.h>
16 #include <asm/s390_ext.h>
17 #include <asm/sysinfo.h>
22 #define PTF_HORIZONTAL (0UL)
23 #define PTF_VERTICAL (1UL)
24 #define PTF_CHECK (2UL)
27 unsigned char reserved0[4];
30 unsigned char reserved1;
31 unsigned short origin;
32 unsigned long mask[CPU_BITS / BITS_PER_LONG];
36 unsigned char reserved[8];
42 struct tl_container container;
46 unsigned char reserved0[2];
47 unsigned short length;
48 unsigned char mag[NR_MAG];
49 unsigned char reserved1;
51 unsigned char reserved2[4];
52 union tl_entry tle[0];
56 struct core_info *next;
60 static void topology_work_fn(struct work_struct *work);
61 static struct tl_info *tl_info;
62 static struct core_info core_info;
63 static int machine_has_topology;
64 static int machine_has_topology_irq;
65 static struct timer_list topology_timer;
66 static void set_topology_timer(void);
67 static DECLARE_WORK(topology_work, topology_work_fn);
68 /* topology_lock protects the core linked list */
69 static DEFINE_SPINLOCK(topology_lock);
71 cpumask_t cpu_core_map[NR_CPUS];
73 cpumask_t cpu_coregroup_map(unsigned int cpu)
75 struct core_info *core = &core_info;
80 if (!machine_has_topology)
81 return cpu_present_map;
82 spin_lock_irqsave(&topology_lock, flags);
84 if (cpu_isset(cpu, core->mask)) {
90 spin_unlock_irqrestore(&topology_lock, flags);
92 mask = cpumask_of_cpu(cpu);
96 const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
98 return &cpu_core_map[cpu];
101 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
105 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
107 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
109 unsigned int rcpu, lcpu;
111 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
112 for_each_present_cpu(lcpu) {
113 if (__cpu_logical_map[lcpu] == rcpu) {
114 cpu_set(lcpu, core->mask);
115 smp_cpu_polarization[lcpu] = tl_cpu->pp;
121 static void clear_cores(void)
123 struct core_info *core = &core_info;
126 cpus_clear(core->mask);
131 static union tl_entry *next_tle(union tl_entry *tle)
134 return (union tl_entry *)((struct tl_container *)tle + 1);
136 return (union tl_entry *)((struct tl_cpu *)tle + 1);
139 static void tl_to_cores(struct tl_info *info)
141 union tl_entry *tle, *end;
142 struct core_info *core = &core_info;
144 spin_lock_irq(&topology_lock);
147 end = (union tl_entry *)((unsigned long)info + info->length);
159 add_cpus_to_core(&tle->cpu, core);
163 machine_has_topology = 0;
168 spin_unlock_irq(&topology_lock);
171 static void topology_update_polarization_simple(void)
175 mutex_lock(&smp_cpu_state_mutex);
176 for_each_present_cpu(cpu)
177 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
178 mutex_unlock(&smp_cpu_state_mutex);
181 static int ptf(unsigned long fc)
186 " .insn rre,0xb9a20000,%1,%1\n"
194 int topology_set_cpu_management(int fc)
199 if (!machine_has_topology)
202 rc = ptf(PTF_VERTICAL);
204 rc = ptf(PTF_HORIZONTAL);
207 for_each_present_cpu(cpu)
208 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
212 static void update_cpu_core_map(void)
216 for_each_present_cpu(cpu)
217 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
220 void arch_update_cpu_topology(void)
222 struct tl_info *info = tl_info;
223 struct sys_device *sysdev;
226 if (!machine_has_topology) {
227 update_cpu_core_map();
228 topology_update_polarization_simple();
231 stsi(info, 15, 1, 2);
233 update_cpu_core_map();
234 for_each_online_cpu(cpu) {
235 sysdev = get_cpu_sysdev(cpu);
236 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
240 static void topology_work_fn(struct work_struct *work)
242 arch_reinit_sched_domains();
245 void topology_schedule_update(void)
247 schedule_work(&topology_work);
250 static void topology_timer_fn(unsigned long ignored)
253 topology_schedule_update();
254 set_topology_timer();
257 static void set_topology_timer(void)
259 topology_timer.function = topology_timer_fn;
260 topology_timer.data = 0;
261 topology_timer.expires = jiffies + 60 * HZ;
262 add_timer(&topology_timer);
265 static void topology_interrupt(__u16 code)
267 schedule_work(&topology_work);
270 static int __init init_topology_update(void)
275 if (!machine_has_topology) {
276 topology_update_polarization_simple();
279 init_timer_deferrable(&topology_timer);
280 if (machine_has_topology_irq) {
281 rc = register_external_interrupt(0x2005, topology_interrupt);
287 set_topology_timer();
289 update_cpu_core_map();
292 __initcall(init_topology_update);
294 void __init s390_init_cpu_topology(void)
296 unsigned long long facility_bits;
297 struct tl_info *info;
298 struct core_info *core;
302 if (stfle(&facility_bits, 1) <= 0)
304 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
306 machine_has_topology = 1;
308 if (facility_bits & (1ULL << 51))
309 machine_has_topology_irq = 1;
311 tl_info = alloc_bootmem_pages(PAGE_SIZE);
313 stsi(info, 15, 1, 2);
315 nr_cores = info->mag[NR_MAG - 2];
316 for (i = 0; i < info->mnest - 2; i++)
317 nr_cores *= info->mag[NR_MAG - 3 - i];
319 printk(KERN_INFO "CPU topology:");
320 for (i = 0; i < NR_MAG; i++)
321 printk(" %d", info->mag[i]);
322 printk(" / %d\n", info->mnest);
325 for (i = 0; i < nr_cores; i++) {
326 core->next = alloc_bootmem(sizeof(struct core_info));
333 machine_has_topology = 0;
334 machine_has_topology_irq = 0;