]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/topology.c
Merge tag 'drm-misc-next-2017-03-12' of git://anongit.freedesktop.org/git/drm-misc...
[karo-tx-linux.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/acpi.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
21 #include <linux/of.h>
22 #include <linux/sched.h>
23 #include <linux/sched/topology.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/cpufreq.h>
27
28 #include <asm/cpu.h>
29 #include <asm/cputype.h>
30 #include <asm/topology.h>
31
32 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
33 static DEFINE_MUTEX(cpu_scale_mutex);
34
35 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
36 {
37         return per_cpu(cpu_scale, cpu);
38 }
39
40 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
41 {
42         per_cpu(cpu_scale, cpu) = capacity;
43 }
44
45 static ssize_t cpu_capacity_show(struct device *dev,
46                                  struct device_attribute *attr,
47                                  char *buf)
48 {
49         struct cpu *cpu = container_of(dev, struct cpu, dev);
50
51         return sprintf(buf, "%lu\n",
52                         arch_scale_cpu_capacity(NULL, cpu->dev.id));
53 }
54
55 static ssize_t cpu_capacity_store(struct device *dev,
56                                   struct device_attribute *attr,
57                                   const char *buf,
58                                   size_t count)
59 {
60         struct cpu *cpu = container_of(dev, struct cpu, dev);
61         int this_cpu = cpu->dev.id, i;
62         unsigned long new_capacity;
63         ssize_t ret;
64
65         if (count) {
66                 ret = kstrtoul(buf, 0, &new_capacity);
67                 if (ret)
68                         return ret;
69                 if (new_capacity > SCHED_CAPACITY_SCALE)
70                         return -EINVAL;
71
72                 mutex_lock(&cpu_scale_mutex);
73                 for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
74                         set_capacity_scale(i, new_capacity);
75                 mutex_unlock(&cpu_scale_mutex);
76         }
77
78         return count;
79 }
80
81 static DEVICE_ATTR_RW(cpu_capacity);
82
83 static int register_cpu_capacity_sysctl(void)
84 {
85         int i;
86         struct device *cpu;
87
88         for_each_possible_cpu(i) {
89                 cpu = get_cpu_device(i);
90                 if (!cpu) {
91                         pr_err("%s: too early to get CPU%d device!\n",
92                                __func__, i);
93                         continue;
94                 }
95                 device_create_file(cpu, &dev_attr_cpu_capacity);
96         }
97
98         return 0;
99 }
100 subsys_initcall(register_cpu_capacity_sysctl);
101
102 static u32 capacity_scale;
103 static u32 *raw_capacity;
104 static bool cap_parsing_failed;
105
106 static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
107 {
108         int ret;
109         u32 cpu_capacity;
110
111         if (cap_parsing_failed)
112                 return;
113
114         ret = of_property_read_u32(cpu_node,
115                                    "capacity-dmips-mhz",
116                                    &cpu_capacity);
117         if (!ret) {
118                 if (!raw_capacity) {
119                         raw_capacity = kcalloc(num_possible_cpus(),
120                                                sizeof(*raw_capacity),
121                                                GFP_KERNEL);
122                         if (!raw_capacity) {
123                                 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
124                                 cap_parsing_failed = true;
125                                 return;
126                         }
127                 }
128                 capacity_scale = max(cpu_capacity, capacity_scale);
129                 raw_capacity[cpu] = cpu_capacity;
130                 pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
131                         cpu_node->full_name, raw_capacity[cpu]);
132         } else {
133                 if (raw_capacity) {
134                         pr_err("cpu_capacity: missing %s raw capacity\n",
135                                 cpu_node->full_name);
136                         pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
137                 }
138                 cap_parsing_failed = true;
139                 kfree(raw_capacity);
140         }
141 }
142
143 static void normalize_cpu_capacity(void)
144 {
145         u64 capacity;
146         int cpu;
147
148         if (!raw_capacity || cap_parsing_failed)
149                 return;
150
151         pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
152         mutex_lock(&cpu_scale_mutex);
153         for_each_possible_cpu(cpu) {
154                 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
155                          cpu, raw_capacity[cpu]);
156                 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
157                         / capacity_scale;
158                 set_capacity_scale(cpu, capacity);
159                 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
160                         cpu, arch_scale_cpu_capacity(NULL, cpu));
161         }
162         mutex_unlock(&cpu_scale_mutex);
163 }
164
165 #ifdef CONFIG_CPU_FREQ
166 static cpumask_var_t cpus_to_visit;
167 static bool cap_parsing_done;
168 static void parsing_done_workfn(struct work_struct *work);
169 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
170
171 static int
172 init_cpu_capacity_callback(struct notifier_block *nb,
173                            unsigned long val,
174                            void *data)
175 {
176         struct cpufreq_policy *policy = data;
177         int cpu;
178
179         if (cap_parsing_failed || cap_parsing_done)
180                 return 0;
181
182         switch (val) {
183         case CPUFREQ_NOTIFY:
184                 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
185                                 cpumask_pr_args(policy->related_cpus),
186                                 cpumask_pr_args(cpus_to_visit));
187                 cpumask_andnot(cpus_to_visit,
188                                cpus_to_visit,
189                                policy->related_cpus);
190                 for_each_cpu(cpu, policy->related_cpus) {
191                         raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
192                                             policy->cpuinfo.max_freq / 1000UL;
193                         capacity_scale = max(raw_capacity[cpu], capacity_scale);
194                 }
195                 if (cpumask_empty(cpus_to_visit)) {
196                         normalize_cpu_capacity();
197                         kfree(raw_capacity);
198                         pr_debug("cpu_capacity: parsing done\n");
199                         cap_parsing_done = true;
200                         schedule_work(&parsing_done_work);
201                 }
202         }
203         return 0;
204 }
205
206 static struct notifier_block init_cpu_capacity_notifier = {
207         .notifier_call = init_cpu_capacity_callback,
208 };
209
210 static int __init register_cpufreq_notifier(void)
211 {
212         /*
213          * on ACPI-based systems we need to use the default cpu capacity
214          * until we have the necessary code to parse the cpu capacity, so
215          * skip registering cpufreq notifier.
216          */
217         if (!acpi_disabled || cap_parsing_failed)
218                 return -EINVAL;
219
220         if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
221                 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
222                 return -ENOMEM;
223         }
224         cpumask_copy(cpus_to_visit, cpu_possible_mask);
225
226         return cpufreq_register_notifier(&init_cpu_capacity_notifier,
227                                          CPUFREQ_POLICY_NOTIFIER);
228 }
229 core_initcall(register_cpufreq_notifier);
230
231 static void parsing_done_workfn(struct work_struct *work)
232 {
233         cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
234                                          CPUFREQ_POLICY_NOTIFIER);
235 }
236
237 #else
238 static int __init free_raw_capacity(void)
239 {
240         kfree(raw_capacity);
241
242         return 0;
243 }
244 core_initcall(free_raw_capacity);
245 #endif
246
247 static int __init get_cpu_for_node(struct device_node *node)
248 {
249         struct device_node *cpu_node;
250         int cpu;
251
252         cpu_node = of_parse_phandle(node, "cpu", 0);
253         if (!cpu_node)
254                 return -1;
255
256         for_each_possible_cpu(cpu) {
257                 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
258                         parse_cpu_capacity(cpu_node, cpu);
259                         of_node_put(cpu_node);
260                         return cpu;
261                 }
262         }
263
264         pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
265
266         of_node_put(cpu_node);
267         return -1;
268 }
269
270 static int __init parse_core(struct device_node *core, int cluster_id,
271                              int core_id)
272 {
273         char name[10];
274         bool leaf = true;
275         int i = 0;
276         int cpu;
277         struct device_node *t;
278
279         do {
280                 snprintf(name, sizeof(name), "thread%d", i);
281                 t = of_get_child_by_name(core, name);
282                 if (t) {
283                         leaf = false;
284                         cpu = get_cpu_for_node(t);
285                         if (cpu >= 0) {
286                                 cpu_topology[cpu].cluster_id = cluster_id;
287                                 cpu_topology[cpu].core_id = core_id;
288                                 cpu_topology[cpu].thread_id = i;
289                         } else {
290                                 pr_err("%s: Can't get CPU for thread\n",
291                                        t->full_name);
292                                 of_node_put(t);
293                                 return -EINVAL;
294                         }
295                         of_node_put(t);
296                 }
297                 i++;
298         } while (t);
299
300         cpu = get_cpu_for_node(core);
301         if (cpu >= 0) {
302                 if (!leaf) {
303                         pr_err("%s: Core has both threads and CPU\n",
304                                core->full_name);
305                         return -EINVAL;
306                 }
307
308                 cpu_topology[cpu].cluster_id = cluster_id;
309                 cpu_topology[cpu].core_id = core_id;
310         } else if (leaf) {
311                 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
312                 return -EINVAL;
313         }
314
315         return 0;
316 }
317
318 static int __init parse_cluster(struct device_node *cluster, int depth)
319 {
320         char name[10];
321         bool leaf = true;
322         bool has_cores = false;
323         struct device_node *c;
324         static int cluster_id __initdata;
325         int core_id = 0;
326         int i, ret;
327
328         /*
329          * First check for child clusters; we currently ignore any
330          * information about the nesting of clusters and present the
331          * scheduler with a flat list of them.
332          */
333         i = 0;
334         do {
335                 snprintf(name, sizeof(name), "cluster%d", i);
336                 c = of_get_child_by_name(cluster, name);
337                 if (c) {
338                         leaf = false;
339                         ret = parse_cluster(c, depth + 1);
340                         of_node_put(c);
341                         if (ret != 0)
342                                 return ret;
343                 }
344                 i++;
345         } while (c);
346
347         /* Now check for cores */
348         i = 0;
349         do {
350                 snprintf(name, sizeof(name), "core%d", i);
351                 c = of_get_child_by_name(cluster, name);
352                 if (c) {
353                         has_cores = true;
354
355                         if (depth == 0) {
356                                 pr_err("%s: cpu-map children should be clusters\n",
357                                        c->full_name);
358                                 of_node_put(c);
359                                 return -EINVAL;
360                         }
361
362                         if (leaf) {
363                                 ret = parse_core(c, cluster_id, core_id++);
364                         } else {
365                                 pr_err("%s: Non-leaf cluster with core %s\n",
366                                        cluster->full_name, name);
367                                 ret = -EINVAL;
368                         }
369
370                         of_node_put(c);
371                         if (ret != 0)
372                                 return ret;
373                 }
374                 i++;
375         } while (c);
376
377         if (leaf && !has_cores)
378                 pr_warn("%s: empty cluster\n", cluster->full_name);
379
380         if (leaf)
381                 cluster_id++;
382
383         return 0;
384 }
385
386 static int __init parse_dt_topology(void)
387 {
388         struct device_node *cn, *map;
389         int ret = 0;
390         int cpu;
391
392         cn = of_find_node_by_path("/cpus");
393         if (!cn) {
394                 pr_err("No CPU information found in DT\n");
395                 return 0;
396         }
397
398         /*
399          * When topology is provided cpu-map is essentially a root
400          * cluster with restricted subnodes.
401          */
402         map = of_get_child_by_name(cn, "cpu-map");
403         if (!map) {
404                 cap_parsing_failed = true;
405                 goto out;
406         }
407
408         ret = parse_cluster(map, 0);
409         if (ret != 0)
410                 goto out_map;
411
412         normalize_cpu_capacity();
413
414         /*
415          * Check that all cores are in the topology; the SMP code will
416          * only mark cores described in the DT as possible.
417          */
418         for_each_possible_cpu(cpu)
419                 if (cpu_topology[cpu].cluster_id == -1)
420                         ret = -EINVAL;
421
422 out_map:
423         of_node_put(map);
424 out:
425         of_node_put(cn);
426         return ret;
427 }
428
429 /*
430  * cpu topology table
431  */
432 struct cpu_topology cpu_topology[NR_CPUS];
433 EXPORT_SYMBOL_GPL(cpu_topology);
434
435 const struct cpumask *cpu_coregroup_mask(int cpu)
436 {
437         return &cpu_topology[cpu].core_sibling;
438 }
439
440 static void update_siblings_masks(unsigned int cpuid)
441 {
442         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
443         int cpu;
444
445         /* update core and thread sibling masks */
446         for_each_possible_cpu(cpu) {
447                 cpu_topo = &cpu_topology[cpu];
448
449                 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
450                         continue;
451
452                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
453                 if (cpu != cpuid)
454                         cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
455
456                 if (cpuid_topo->core_id != cpu_topo->core_id)
457                         continue;
458
459                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
460                 if (cpu != cpuid)
461                         cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
462         }
463 }
464
465 void store_cpu_topology(unsigned int cpuid)
466 {
467         struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
468         u64 mpidr;
469
470         if (cpuid_topo->cluster_id != -1)
471                 goto topology_populated;
472
473         mpidr = read_cpuid_mpidr();
474
475         /* Uniprocessor systems can rely on default topology values */
476         if (mpidr & MPIDR_UP_BITMASK)
477                 return;
478
479         /* Create cpu topology mapping based on MPIDR. */
480         if (mpidr & MPIDR_MT_BITMASK) {
481                 /* Multiprocessor system : Multi-threads per core */
482                 cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
483                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
484                 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
485                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
486         } else {
487                 /* Multiprocessor system : Single-thread per core */
488                 cpuid_topo->thread_id  = -1;
489                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
490                 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
491                                          MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
492                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
493         }
494
495         pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
496                  cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
497                  cpuid_topo->thread_id, mpidr);
498
499 topology_populated:
500         update_siblings_masks(cpuid);
501 }
502
503 static void __init reset_cpu_topology(void)
504 {
505         unsigned int cpu;
506
507         for_each_possible_cpu(cpu) {
508                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
509
510                 cpu_topo->thread_id = -1;
511                 cpu_topo->core_id = 0;
512                 cpu_topo->cluster_id = -1;
513
514                 cpumask_clear(&cpu_topo->core_sibling);
515                 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
516                 cpumask_clear(&cpu_topo->thread_sibling);
517                 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
518         }
519 }
520
521 void __init init_cpu_topology(void)
522 {
523         reset_cpu_topology();
524
525         /*
526          * Discard anything that was parsed if we hit an error so we
527          * don't use partial information.
528          */
529         if (of_have_populated_dt() && parse_dt_topology())
530                 reset_cpu_topology();
531 }