2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/bootmem.h>
14 #include <linux/sched.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 #include <linux/cpuset.h>
19 #include <asm/delay.h>
20 #include <asm/s390_ext.h>
21 #include <asm/sysinfo.h>
26 #define PTF_HORIZONTAL (0UL)
27 #define PTF_VERTICAL (1UL)
28 #define PTF_CHECK (2UL)
31 unsigned char reserved0[4];
34 unsigned char reserved1;
35 unsigned short origin;
36 unsigned long mask[CPU_BITS / BITS_PER_LONG];
40 unsigned char reserved[7];
47 struct tl_container container;
51 unsigned char reserved0[2];
52 unsigned short length;
53 unsigned char mag[NR_MAG];
54 unsigned char reserved1;
56 unsigned char reserved2[4];
57 union tl_entry tle[0];
61 struct mask_info *next;
66 static int topology_enabled = 1;
67 static void topology_work_fn(struct work_struct *work);
68 static struct tl_info *tl_info;
69 static struct timer_list topology_timer;
70 static void set_topology_timer(void);
71 static DECLARE_WORK(topology_work, topology_work_fn);
72 /* topology_lock protects the core linked list */
73 static DEFINE_SPINLOCK(topology_lock);
75 static struct mask_info core_info;
76 cpumask_t cpu_core_map[NR_CPUS];
77 unsigned char cpu_core_id[NR_CPUS];
79 #ifdef CONFIG_SCHED_BOOK
80 static struct mask_info book_info;
81 cpumask_t cpu_book_map[NR_CPUS];
82 unsigned char cpu_book_id[NR_CPUS];
85 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
90 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
91 return cpu_possible_map;
93 if (cpu_isset(cpu, info->mask)) {
100 mask = cpumask_of_cpu(cpu);
104 static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
105 struct mask_info *core)
109 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
111 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
113 unsigned int rcpu, lcpu;
115 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
116 for_each_present_cpu(lcpu) {
117 if (cpu_logical_map(lcpu) != rcpu)
119 #ifdef CONFIG_SCHED_BOOK
120 cpu_set(lcpu, book->mask);
121 cpu_book_id[lcpu] = book->id;
123 cpu_set(lcpu, core->mask);
124 cpu_core_id[lcpu] = core->id;
125 smp_cpu_polarization[lcpu] = tl_cpu->pp;
130 static void clear_masks(void)
132 struct mask_info *info;
136 cpus_clear(info->mask);
139 #ifdef CONFIG_SCHED_BOOK
142 cpus_clear(info->mask);
148 static union tl_entry *next_tle(union tl_entry *tle)
151 return (union tl_entry *)((struct tl_container *)tle + 1);
153 return (union tl_entry *)((struct tl_cpu *)tle + 1);
156 static void tl_to_cores(struct tl_info *info)
158 #ifdef CONFIG_SCHED_BOOK
159 struct mask_info *book = &book_info;
161 struct mask_info *book = NULL;
163 struct mask_info *core = &core_info;
164 union tl_entry *tle, *end;
167 spin_lock_irq(&topology_lock);
170 end = (union tl_entry *)((unsigned long)info + info->length);
173 #ifdef CONFIG_SCHED_BOOK
176 book->id = tle->container.id;
181 core->id = tle->container.id;
184 add_cpus_to_mask(&tle->cpu, book, core);
193 spin_unlock_irq(&topology_lock);
196 static void topology_update_polarization_simple(void)
200 mutex_lock(&smp_cpu_state_mutex);
201 for_each_possible_cpu(cpu)
202 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
203 mutex_unlock(&smp_cpu_state_mutex);
206 static int ptf(unsigned long fc)
211 " .insn rre,0xb9a20000,%1,%1\n"
219 int topology_set_cpu_management(int fc)
224 if (!MACHINE_HAS_TOPOLOGY)
227 rc = ptf(PTF_VERTICAL);
229 rc = ptf(PTF_HORIZONTAL);
232 for_each_possible_cpu(cpu)
233 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
237 static void update_cpu_core_map(void)
242 spin_lock_irqsave(&topology_lock, flags);
243 for_each_possible_cpu(cpu) {
244 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
245 #ifdef CONFIG_SCHED_BOOK
246 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
249 spin_unlock_irqrestore(&topology_lock, flags);
252 static void store_topology(struct tl_info *info)
254 #ifdef CONFIG_SCHED_BOOK
257 rc = stsi(info, 15, 1, 3);
261 stsi(info, 15, 1, 2);
264 int arch_update_cpu_topology(void)
266 struct tl_info *info = tl_info;
267 struct sys_device *sysdev;
270 if (!MACHINE_HAS_TOPOLOGY) {
271 update_cpu_core_map();
272 topology_update_polarization_simple();
275 store_topology(info);
277 update_cpu_core_map();
278 for_each_online_cpu(cpu) {
279 sysdev = get_cpu_sysdev(cpu);
280 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
285 static void topology_work_fn(struct work_struct *work)
287 rebuild_sched_domains();
290 void topology_schedule_update(void)
292 schedule_work(&topology_work);
295 static void topology_timer_fn(unsigned long ignored)
298 topology_schedule_update();
299 set_topology_timer();
302 static void set_topology_timer(void)
304 topology_timer.function = topology_timer_fn;
305 topology_timer.data = 0;
306 topology_timer.expires = jiffies + 60 * HZ;
307 add_timer(&topology_timer);
310 static int __init early_parse_topology(char *p)
312 if (strncmp(p, "off", 3))
314 topology_enabled = 0;
317 early_param("topology", early_parse_topology);
319 static int __init init_topology_update(void)
324 if (!MACHINE_HAS_TOPOLOGY) {
325 topology_update_polarization_simple();
328 init_timer_deferrable(&topology_timer);
329 set_topology_timer();
331 update_cpu_core_map();
334 __initcall(init_topology_update);
336 static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
340 nr_masks = info->mag[NR_MAG - offset];
341 for (i = 0; i < info->mnest - offset; i++)
342 nr_masks *= info->mag[NR_MAG - offset - 1 - i];
343 nr_masks = max(nr_masks, 1);
344 for (i = 0; i < nr_masks; i++) {
345 mask->next = alloc_bootmem(sizeof(struct mask_info));
350 void __init s390_init_cpu_topology(void)
352 struct tl_info *info;
355 if (!MACHINE_HAS_TOPOLOGY)
357 tl_info = alloc_bootmem_pages(PAGE_SIZE);
359 store_topology(info);
360 pr_info("The CPU configuration topology of the machine is:");
361 for (i = 0; i < NR_MAG; i++)
362 printk(" %d", info->mag[i]);
363 printk(" / %d\n", info->mnest);
364 alloc_masks(info, &core_info, 2);
365 #ifdef CONFIG_SCHED_BOOK
366 alloc_masks(info, &book_info, 3);