]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/mm/numa.c
Merge remote-tracking branch 'omap-pending/for-next'
[karo-tx-linux.git] / arch / powerpc / mm / numa.c
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #define pr_fmt(fmt) "numa: " fmt
12
13 #include <linux/threads.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/mmzone.h>
18 #include <linux/export.h>
19 #include <linux/nodemask.h>
20 #include <linux/cpu.h>
21 #include <linux/notifier.h>
22 #include <linux/memblock.h>
23 #include <linux/of.h>
24 #include <linux/pfn.h>
25 #include <linux/cpuset.h>
26 #include <linux/node.h>
27 #include <linux/stop_machine.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <asm/cputhreads.h>
33 #include <asm/sparsemem.h>
34 #include <asm/prom.h>
35 #include <asm/smp.h>
36 #include <asm/cputhreads.h>
37 #include <asm/topology.h>
38 #include <asm/firmware.h>
39 #include <asm/paca.h>
40 #include <asm/hvcall.h>
41 #include <asm/setup.h>
42 #include <asm/vdso.h>
43
44 static int numa_enabled = 1;
45
46 static char *cmdline __initdata;
47
48 static int numa_debug;
49 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
50
51 int numa_cpu_lookup_table[NR_CPUS];
52 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
53 struct pglist_data *node_data[MAX_NUMNODES];
54
55 EXPORT_SYMBOL(numa_cpu_lookup_table);
56 EXPORT_SYMBOL(node_to_cpumask_map);
57 EXPORT_SYMBOL(node_data);
58
59 static int min_common_depth;
60 static int n_mem_addr_cells, n_mem_size_cells;
61 static int form1_affinity;
62
63 #define MAX_DISTANCE_REF_POINTS 4
64 static int distance_ref_points_depth;
65 static const __be32 *distance_ref_points;
66 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
67
68 /*
69  * Allocate node_to_cpumask_map based on number of available nodes
70  * Requires node_possible_map to be valid.
71  *
72  * Note: cpumask_of_node() is not valid until after this is done.
73  */
74 static void __init setup_node_to_cpumask_map(void)
75 {
76         unsigned int node;
77
78         /* setup nr_node_ids if not done yet */
79         if (nr_node_ids == MAX_NUMNODES)
80                 setup_nr_node_ids();
81
82         /* allocate the map */
83         for (node = 0; node < nr_node_ids; node++)
84                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
85
86         /* cpumask_of_node() will now work */
87         dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88 }
89
90 static int __init fake_numa_create_new_node(unsigned long end_pfn,
91                                                 unsigned int *nid)
92 {
93         unsigned long long mem;
94         char *p = cmdline;
95         static unsigned int fake_nid;
96         static unsigned long long curr_boundary;
97
98         /*
99          * Modify node id, iff we started creating NUMA nodes
100          * We want to continue from where we left of the last time
101          */
102         if (fake_nid)
103                 *nid = fake_nid;
104         /*
105          * In case there are no more arguments to parse, the
106          * node_id should be the same as the last fake node id
107          * (we've handled this above).
108          */
109         if (!p)
110                 return 0;
111
112         mem = memparse(p, &p);
113         if (!mem)
114                 return 0;
115
116         if (mem < curr_boundary)
117                 return 0;
118
119         curr_boundary = mem;
120
121         if ((end_pfn << PAGE_SHIFT) > mem) {
122                 /*
123                  * Skip commas and spaces
124                  */
125                 while (*p == ',' || *p == ' ' || *p == '\t')
126                         p++;
127
128                 cmdline = p;
129                 fake_nid++;
130                 *nid = fake_nid;
131                 dbg("created new fake_node with id %d\n", fake_nid);
132                 return 1;
133         }
134         return 0;
135 }
136
137 static void reset_numa_cpu_lookup_table(void)
138 {
139         unsigned int cpu;
140
141         for_each_possible_cpu(cpu)
142                 numa_cpu_lookup_table[cpu] = -1;
143 }
144
145 static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
146 {
147         numa_cpu_lookup_table[cpu] = node;
148 }
149
150 static void map_cpu_to_node(int cpu, int node)
151 {
152         update_numa_cpu_lookup_table(cpu, node);
153
154         dbg("adding cpu %d to node %d\n", cpu, node);
155
156         if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
157                 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
158 }
159
160 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
161 static void unmap_cpu_from_node(unsigned long cpu)
162 {
163         int node = numa_cpu_lookup_table[cpu];
164
165         dbg("removing cpu %lu from node %d\n", cpu, node);
166
167         if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
168                 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
169         } else {
170                 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
171                        cpu, node);
172         }
173 }
174 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
175
176 /* must hold reference to node during call */
177 static const __be32 *of_get_associativity(struct device_node *dev)
178 {
179         return of_get_property(dev, "ibm,associativity", NULL);
180 }
181
182 /*
183  * Returns the property linux,drconf-usable-memory if
184  * it exists (the property exists only in kexec/kdump kernels,
185  * added by kexec-tools)
186  */
187 static const __be32 *of_get_usable_memory(struct device_node *memory)
188 {
189         const __be32 *prop;
190         u32 len;
191         prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
192         if (!prop || len < sizeof(unsigned int))
193                 return NULL;
194         return prop;
195 }
196
197 int __node_distance(int a, int b)
198 {
199         int i;
200         int distance = LOCAL_DISTANCE;
201
202         if (!form1_affinity)
203                 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
204
205         for (i = 0; i < distance_ref_points_depth; i++) {
206                 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
207                         break;
208
209                 /* Double the distance for each NUMA level */
210                 distance *= 2;
211         }
212
213         return distance;
214 }
215 EXPORT_SYMBOL(__node_distance);
216
217 static void initialize_distance_lookup_table(int nid,
218                 const __be32 *associativity)
219 {
220         int i;
221
222         if (!form1_affinity)
223                 return;
224
225         for (i = 0; i < distance_ref_points_depth; i++) {
226                 const __be32 *entry;
227
228                 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
229                 distance_lookup_table[nid][i] = of_read_number(entry, 1);
230         }
231 }
232
233 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
234  * info is found.
235  */
236 static int associativity_to_nid(const __be32 *associativity)
237 {
238         int nid = -1;
239
240         if (min_common_depth == -1)
241                 goto out;
242
243         if (of_read_number(associativity, 1) >= min_common_depth)
244                 nid = of_read_number(&associativity[min_common_depth], 1);
245
246         /* POWER4 LPAR uses 0xffff as invalid node */
247         if (nid == 0xffff || nid >= MAX_NUMNODES)
248                 nid = -1;
249
250         if (nid > 0 &&
251                 of_read_number(associativity, 1) >= distance_ref_points_depth) {
252                 /*
253                  * Skip the length field and send start of associativity array
254                  */
255                 initialize_distance_lookup_table(nid, associativity + 1);
256         }
257
258 out:
259         return nid;
260 }
261
262 /* Returns the nid associated with the given device tree node,
263  * or -1 if not found.
264  */
265 static int of_node_to_nid_single(struct device_node *device)
266 {
267         int nid = -1;
268         const __be32 *tmp;
269
270         tmp = of_get_associativity(device);
271         if (tmp)
272                 nid = associativity_to_nid(tmp);
273         return nid;
274 }
275
276 /* Walk the device tree upwards, looking for an associativity id */
277 int of_node_to_nid(struct device_node *device)
278 {
279         struct device_node *tmp;
280         int nid = -1;
281
282         of_node_get(device);
283         while (device) {
284                 nid = of_node_to_nid_single(device);
285                 if (nid != -1)
286                         break;
287
288                 tmp = device;
289                 device = of_get_parent(tmp);
290                 of_node_put(tmp);
291         }
292         of_node_put(device);
293
294         return nid;
295 }
296 EXPORT_SYMBOL_GPL(of_node_to_nid);
297
298 static int __init find_min_common_depth(void)
299 {
300         int depth;
301         struct device_node *root;
302
303         if (firmware_has_feature(FW_FEATURE_OPAL))
304                 root = of_find_node_by_path("/ibm,opal");
305         else
306                 root = of_find_node_by_path("/rtas");
307         if (!root)
308                 root = of_find_node_by_path("/");
309
310         /*
311          * This property is a set of 32-bit integers, each representing
312          * an index into the ibm,associativity nodes.
313          *
314          * With form 0 affinity the first integer is for an SMP configuration
315          * (should be all 0's) and the second is for a normal NUMA
316          * configuration. We have only one level of NUMA.
317          *
318          * With form 1 affinity the first integer is the most significant
319          * NUMA boundary and the following are progressively less significant
320          * boundaries. There can be more than one level of NUMA.
321          */
322         distance_ref_points = of_get_property(root,
323                                         "ibm,associativity-reference-points",
324                                         &distance_ref_points_depth);
325
326         if (!distance_ref_points) {
327                 dbg("NUMA: ibm,associativity-reference-points not found.\n");
328                 goto err;
329         }
330
331         distance_ref_points_depth /= sizeof(int);
332
333         if (firmware_has_feature(FW_FEATURE_OPAL) ||
334             firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
335                 dbg("Using form 1 affinity\n");
336                 form1_affinity = 1;
337         }
338
339         if (form1_affinity) {
340                 depth = of_read_number(distance_ref_points, 1);
341         } else {
342                 if (distance_ref_points_depth < 2) {
343                         printk(KERN_WARNING "NUMA: "
344                                 "short ibm,associativity-reference-points\n");
345                         goto err;
346                 }
347
348                 depth = of_read_number(&distance_ref_points[1], 1);
349         }
350
351         /*
352          * Warn and cap if the hardware supports more than
353          * MAX_DISTANCE_REF_POINTS domains.
354          */
355         if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
356                 printk(KERN_WARNING "NUMA: distance array capped at "
357                         "%d entries\n", MAX_DISTANCE_REF_POINTS);
358                 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
359         }
360
361         of_node_put(root);
362         return depth;
363
364 err:
365         of_node_put(root);
366         return -1;
367 }
368
369 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
370 {
371         struct device_node *memory = NULL;
372
373         memory = of_find_node_by_type(memory, "memory");
374         if (!memory)
375                 panic("numa.c: No memory nodes found!");
376
377         *n_addr_cells = of_n_addr_cells(memory);
378         *n_size_cells = of_n_size_cells(memory);
379         of_node_put(memory);
380 }
381
382 static unsigned long read_n_cells(int n, const __be32 **buf)
383 {
384         unsigned long result = 0;
385
386         while (n--) {
387                 result = (result << 32) | of_read_number(*buf, 1);
388                 (*buf)++;
389         }
390         return result;
391 }
392
393 /*
394  * Read the next memblock list entry from the ibm,dynamic-memory property
395  * and return the information in the provided of_drconf_cell structure.
396  */
397 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
398 {
399         const __be32 *cp;
400
401         drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
402
403         cp = *cellp;
404         drmem->drc_index = of_read_number(cp, 1);
405         drmem->reserved = of_read_number(&cp[1], 1);
406         drmem->aa_index = of_read_number(&cp[2], 1);
407         drmem->flags = of_read_number(&cp[3], 1);
408
409         *cellp = cp + 4;
410 }
411
412 /*
413  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
414  *
415  * The layout of the ibm,dynamic-memory property is a number N of memblock
416  * list entries followed by N memblock list entries.  Each memblock list entry
417  * contains information as laid out in the of_drconf_cell struct above.
418  */
419 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
420 {
421         const __be32 *prop;
422         u32 len, entries;
423
424         prop = of_get_property(memory, "ibm,dynamic-memory", &len);
425         if (!prop || len < sizeof(unsigned int))
426                 return 0;
427
428         entries = of_read_number(prop++, 1);
429
430         /* Now that we know the number of entries, revalidate the size
431          * of the property read in to ensure we have everything
432          */
433         if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
434                 return 0;
435
436         *dm = prop;
437         return entries;
438 }
439
440 /*
441  * Retrieve and validate the ibm,lmb-size property for drconf memory
442  * from the device tree.
443  */
444 static u64 of_get_lmb_size(struct device_node *memory)
445 {
446         const __be32 *prop;
447         u32 len;
448
449         prop = of_get_property(memory, "ibm,lmb-size", &len);
450         if (!prop || len < sizeof(unsigned int))
451                 return 0;
452
453         return read_n_cells(n_mem_size_cells, &prop);
454 }
455
456 struct assoc_arrays {
457         u32     n_arrays;
458         u32     array_sz;
459         const __be32 *arrays;
460 };
461
462 /*
463  * Retrieve and validate the list of associativity arrays for drconf
464  * memory from the ibm,associativity-lookup-arrays property of the
465  * device tree..
466  *
467  * The layout of the ibm,associativity-lookup-arrays property is a number N
468  * indicating the number of associativity arrays, followed by a number M
469  * indicating the size of each associativity array, followed by a list
470  * of N associativity arrays.
471  */
472 static int of_get_assoc_arrays(struct device_node *memory,
473                                struct assoc_arrays *aa)
474 {
475         const __be32 *prop;
476         u32 len;
477
478         prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
479         if (!prop || len < 2 * sizeof(unsigned int))
480                 return -1;
481
482         aa->n_arrays = of_read_number(prop++, 1);
483         aa->array_sz = of_read_number(prop++, 1);
484
485         /* Now that we know the number of arrays and size of each array,
486          * revalidate the size of the property read in.
487          */
488         if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
489                 return -1;
490
491         aa->arrays = prop;
492         return 0;
493 }
494
495 /*
496  * This is like of_node_to_nid_single() for memory represented in the
497  * ibm,dynamic-reconfiguration-memory node.
498  */
499 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
500                                    struct assoc_arrays *aa)
501 {
502         int default_nid = 0;
503         int nid = default_nid;
504         int index;
505
506         if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
507             !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
508             drmem->aa_index < aa->n_arrays) {
509                 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
510                 nid = of_read_number(&aa->arrays[index], 1);
511
512                 if (nid == 0xffff || nid >= MAX_NUMNODES)
513                         nid = default_nid;
514
515                 if (nid > 0) {
516                         index = drmem->aa_index * aa->array_sz;
517                         initialize_distance_lookup_table(nid,
518                                                         &aa->arrays[index]);
519                 }
520         }
521
522         return nid;
523 }
524
525 /*
526  * Figure out to which domain a cpu belongs and stick it there.
527  * Return the id of the domain used.
528  */
529 static int numa_setup_cpu(unsigned long lcpu)
530 {
531         int nid = -1;
532         struct device_node *cpu;
533
534         /*
535          * If a valid cpu-to-node mapping is already available, use it
536          * directly instead of querying the firmware, since it represents
537          * the most recent mapping notified to us by the platform (eg: VPHN).
538          */
539         if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
540                 map_cpu_to_node(lcpu, nid);
541                 return nid;
542         }
543
544         cpu = of_get_cpu_node(lcpu, NULL);
545
546         if (!cpu) {
547                 WARN_ON(1);
548                 if (cpu_present(lcpu))
549                         goto out_present;
550                 else
551                         goto out;
552         }
553
554         nid = of_node_to_nid_single(cpu);
555
556 out_present:
557         if (nid < 0 || !node_online(nid))
558                 nid = first_online_node;
559
560         map_cpu_to_node(lcpu, nid);
561         of_node_put(cpu);
562 out:
563         return nid;
564 }
565
566 static void verify_cpu_node_mapping(int cpu, int node)
567 {
568         int base, sibling, i;
569
570         /* Verify that all the threads in the core belong to the same node */
571         base = cpu_first_thread_sibling(cpu);
572
573         for (i = 0; i < threads_per_core; i++) {
574                 sibling = base + i;
575
576                 if (sibling == cpu || cpu_is_offline(sibling))
577                         continue;
578
579                 if (cpu_to_node(sibling) != node) {
580                         WARN(1, "CPU thread siblings %d and %d don't belong"
581                                 " to the same node!\n", cpu, sibling);
582                         break;
583                 }
584         }
585 }
586
587 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
588                              void *hcpu)
589 {
590         unsigned long lcpu = (unsigned long)hcpu;
591         int ret = NOTIFY_DONE, nid;
592
593         switch (action) {
594         case CPU_UP_PREPARE:
595         case CPU_UP_PREPARE_FROZEN:
596                 nid = numa_setup_cpu(lcpu);
597                 verify_cpu_node_mapping((int)lcpu, nid);
598                 ret = NOTIFY_OK;
599                 break;
600 #ifdef CONFIG_HOTPLUG_CPU
601         case CPU_DEAD:
602         case CPU_DEAD_FROZEN:
603         case CPU_UP_CANCELED:
604         case CPU_UP_CANCELED_FROZEN:
605                 unmap_cpu_from_node(lcpu);
606                 ret = NOTIFY_OK;
607                 break;
608 #endif
609         }
610         return ret;
611 }
612
613 /*
614  * Check and possibly modify a memory region to enforce the memory limit.
615  *
616  * Returns the size the region should have to enforce the memory limit.
617  * This will either be the original value of size, a truncated value,
618  * or zero. If the returned value of size is 0 the region should be
619  * discarded as it lies wholly above the memory limit.
620  */
621 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
622                                                       unsigned long size)
623 {
624         /*
625          * We use memblock_end_of_DRAM() in here instead of memory_limit because
626          * we've already adjusted it for the limit and it takes care of
627          * having memory holes below the limit.  Also, in the case of
628          * iommu_is_off, memory_limit is not set but is implicitly enforced.
629          */
630
631         if (start + size <= memblock_end_of_DRAM())
632                 return size;
633
634         if (start >= memblock_end_of_DRAM())
635                 return 0;
636
637         return memblock_end_of_DRAM() - start;
638 }
639
640 /*
641  * Reads the counter for a given entry in
642  * linux,drconf-usable-memory property
643  */
644 static inline int __init read_usm_ranges(const __be32 **usm)
645 {
646         /*
647          * For each lmb in ibm,dynamic-memory a corresponding
648          * entry in linux,drconf-usable-memory property contains
649          * a counter followed by that many (base, size) duple.
650          * read the counter from linux,drconf-usable-memory
651          */
652         return read_n_cells(n_mem_size_cells, usm);
653 }
654
655 /*
656  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
657  * node.  This assumes n_mem_{addr,size}_cells have been set.
658  */
659 static void __init parse_drconf_memory(struct device_node *memory)
660 {
661         const __be32 *uninitialized_var(dm), *usm;
662         unsigned int n, rc, ranges, is_kexec_kdump = 0;
663         unsigned long lmb_size, base, size, sz;
664         int nid;
665         struct assoc_arrays aa = { .arrays = NULL };
666
667         n = of_get_drconf_memory(memory, &dm);
668         if (!n)
669                 return;
670
671         lmb_size = of_get_lmb_size(memory);
672         if (!lmb_size)
673                 return;
674
675         rc = of_get_assoc_arrays(memory, &aa);
676         if (rc)
677                 return;
678
679         /* check if this is a kexec/kdump kernel */
680         usm = of_get_usable_memory(memory);
681         if (usm != NULL)
682                 is_kexec_kdump = 1;
683
684         for (; n != 0; --n) {
685                 struct of_drconf_cell drmem;
686
687                 read_drconf_cell(&drmem, &dm);
688
689                 /* skip this block if the reserved bit is set in flags (0x80)
690                    or if the block is not assigned to this partition (0x8) */
691                 if ((drmem.flags & DRCONF_MEM_RESERVED)
692                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
693                         continue;
694
695                 base = drmem.base_addr;
696                 size = lmb_size;
697                 ranges = 1;
698
699                 if (is_kexec_kdump) {
700                         ranges = read_usm_ranges(&usm);
701                         if (!ranges) /* there are no (base, size) duple */
702                                 continue;
703                 }
704                 do {
705                         if (is_kexec_kdump) {
706                                 base = read_n_cells(n_mem_addr_cells, &usm);
707                                 size = read_n_cells(n_mem_size_cells, &usm);
708                         }
709                         nid = of_drconf_to_nid_single(&drmem, &aa);
710                         fake_numa_create_new_node(
711                                 ((base + size) >> PAGE_SHIFT),
712                                            &nid);
713                         node_set_online(nid);
714                         sz = numa_enforce_memory_limit(base, size);
715                         if (sz)
716                                 memblock_set_node(base, sz,
717                                                   &memblock.memory, nid);
718                 } while (--ranges);
719         }
720 }
721
722 static int __init parse_numa_properties(void)
723 {
724         struct device_node *memory;
725         int default_nid = 0;
726         unsigned long i;
727
728         if (numa_enabled == 0) {
729                 printk(KERN_WARNING "NUMA disabled by user\n");
730                 return -1;
731         }
732
733         min_common_depth = find_min_common_depth();
734
735         if (min_common_depth < 0)
736                 return min_common_depth;
737
738         dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
739
740         /*
741          * Even though we connect cpus to numa domains later in SMP
742          * init, we need to know the node ids now. This is because
743          * each node to be onlined must have NODE_DATA etc backing it.
744          */
745         for_each_present_cpu(i) {
746                 struct device_node *cpu;
747                 int nid;
748
749                 cpu = of_get_cpu_node(i, NULL);
750                 BUG_ON(!cpu);
751                 nid = of_node_to_nid_single(cpu);
752                 of_node_put(cpu);
753
754                 /*
755                  * Don't fall back to default_nid yet -- we will plug
756                  * cpus into nodes once the memory scan has discovered
757                  * the topology.
758                  */
759                 if (nid < 0)
760                         continue;
761                 node_set_online(nid);
762         }
763
764         get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
765
766         for_each_node_by_type(memory, "memory") {
767                 unsigned long start;
768                 unsigned long size;
769                 int nid;
770                 int ranges;
771                 const __be32 *memcell_buf;
772                 unsigned int len;
773
774                 memcell_buf = of_get_property(memory,
775                         "linux,usable-memory", &len);
776                 if (!memcell_buf || len <= 0)
777                         memcell_buf = of_get_property(memory, "reg", &len);
778                 if (!memcell_buf || len <= 0)
779                         continue;
780
781                 /* ranges in cell */
782                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
783 new_range:
784                 /* these are order-sensitive, and modify the buffer pointer */
785                 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
786                 size = read_n_cells(n_mem_size_cells, &memcell_buf);
787
788                 /*
789                  * Assumption: either all memory nodes or none will
790                  * have associativity properties.  If none, then
791                  * everything goes to default_nid.
792                  */
793                 nid = of_node_to_nid_single(memory);
794                 if (nid < 0)
795                         nid = default_nid;
796
797                 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
798                 node_set_online(nid);
799
800                 if (!(size = numa_enforce_memory_limit(start, size))) {
801                         if (--ranges)
802                                 goto new_range;
803                         else
804                                 continue;
805                 }
806
807                 memblock_set_node(start, size, &memblock.memory, nid);
808
809                 if (--ranges)
810                         goto new_range;
811         }
812
813         /*
814          * Now do the same thing for each MEMBLOCK listed in the
815          * ibm,dynamic-memory property in the
816          * ibm,dynamic-reconfiguration-memory node.
817          */
818         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
819         if (memory)
820                 parse_drconf_memory(memory);
821
822         return 0;
823 }
824
825 static void __init setup_nonnuma(void)
826 {
827         unsigned long top_of_ram = memblock_end_of_DRAM();
828         unsigned long total_ram = memblock_phys_mem_size();
829         unsigned long start_pfn, end_pfn;
830         unsigned int nid = 0;
831         struct memblock_region *reg;
832
833         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
834                top_of_ram, total_ram);
835         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
836                (top_of_ram - total_ram) >> 20);
837
838         for_each_memblock(memory, reg) {
839                 start_pfn = memblock_region_memory_base_pfn(reg);
840                 end_pfn = memblock_region_memory_end_pfn(reg);
841
842                 fake_numa_create_new_node(end_pfn, &nid);
843                 memblock_set_node(PFN_PHYS(start_pfn),
844                                   PFN_PHYS(end_pfn - start_pfn),
845                                   &memblock.memory, nid);
846                 node_set_online(nid);
847         }
848 }
849
850 void __init dump_numa_cpu_topology(void)
851 {
852         unsigned int node;
853         unsigned int cpu, count;
854
855         if (min_common_depth == -1 || !numa_enabled)
856                 return;
857
858         for_each_online_node(node) {
859                 printk(KERN_DEBUG "Node %d CPUs:", node);
860
861                 count = 0;
862                 /*
863                  * If we used a CPU iterator here we would miss printing
864                  * the holes in the cpumap.
865                  */
866                 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
867                         if (cpumask_test_cpu(cpu,
868                                         node_to_cpumask_map[node])) {
869                                 if (count == 0)
870                                         printk(" %u", cpu);
871                                 ++count;
872                         } else {
873                                 if (count > 1)
874                                         printk("-%u", cpu - 1);
875                                 count = 0;
876                         }
877                 }
878
879                 if (count > 1)
880                         printk("-%u", nr_cpu_ids - 1);
881                 printk("\n");
882         }
883 }
884
885 static void __init dump_numa_memory_topology(void)
886 {
887         unsigned int node;
888         unsigned int count;
889
890         if (min_common_depth == -1 || !numa_enabled)
891                 return;
892
893         for_each_online_node(node) {
894                 unsigned long i;
895
896                 printk(KERN_DEBUG "Node %d Memory:", node);
897
898                 count = 0;
899
900                 for (i = 0; i < memblock_end_of_DRAM();
901                      i += (1 << SECTION_SIZE_BITS)) {
902                         if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
903                                 if (count == 0)
904                                         printk(" 0x%lx", i);
905                                 ++count;
906                         } else {
907                                 if (count > 0)
908                                         printk("-0x%lx", i);
909                                 count = 0;
910                         }
911                 }
912
913                 if (count > 0)
914                         printk("-0x%lx", i);
915                 printk("\n");
916         }
917 }
918
919 static struct notifier_block ppc64_numa_nb = {
920         .notifier_call = cpu_numa_callback,
921         .priority = 1 /* Must run before sched domains notifier. */
922 };
923
924 /* Initialize NODE_DATA for a node on the local memory */
925 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
926 {
927         u64 spanned_pages = end_pfn - start_pfn;
928         const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
929         u64 nd_pa;
930         void *nd;
931         int tnid;
932
933         if (spanned_pages)
934                 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
935                         nid, start_pfn << PAGE_SHIFT,
936                         (end_pfn << PAGE_SHIFT) - 1);
937         else
938                 pr_info("Initmem setup node %d\n", nid);
939
940         nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
941         nd = __va(nd_pa);
942
943         /* report and initialize */
944         pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
945                 nd_pa, nd_pa + nd_size - 1);
946         tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
947         if (tnid != nid)
948                 pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
949
950         node_data[nid] = nd;
951         memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
952         NODE_DATA(nid)->node_id = nid;
953         NODE_DATA(nid)->node_start_pfn = start_pfn;
954         NODE_DATA(nid)->node_spanned_pages = spanned_pages;
955 }
956
957 void __init initmem_init(void)
958 {
959         int nid, cpu;
960
961         max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
962         max_pfn = max_low_pfn;
963
964         if (parse_numa_properties())
965                 setup_nonnuma();
966         else
967                 dump_numa_memory_topology();
968
969         memblock_dump_all();
970
971         /*
972          * Reduce the possible NUMA nodes to the online NUMA nodes,
973          * since we do not support node hotplug. This ensures that  we
974          * lower the maximum NUMA node ID to what is actually present.
975          */
976         nodes_and(node_possible_map, node_possible_map, node_online_map);
977
978         for_each_online_node(nid) {
979                 unsigned long start_pfn, end_pfn;
980
981                 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
982                 setup_node_data(nid, start_pfn, end_pfn);
983                 sparse_memory_present_with_active_regions(nid);
984         }
985
986         sparse_init();
987
988         setup_node_to_cpumask_map();
989
990         reset_numa_cpu_lookup_table();
991         register_cpu_notifier(&ppc64_numa_nb);
992         /*
993          * We need the numa_cpu_lookup_table to be accurate for all CPUs,
994          * even before we online them, so that we can use cpu_to_{node,mem}
995          * early in boot, cf. smp_prepare_cpus().
996          */
997         for_each_present_cpu(cpu) {
998                 numa_setup_cpu((unsigned long)cpu);
999         }
1000 }
1001
1002 static int __init early_numa(char *p)
1003 {
1004         if (!p)
1005                 return 0;
1006
1007         if (strstr(p, "off"))
1008                 numa_enabled = 0;
1009
1010         if (strstr(p, "debug"))
1011                 numa_debug = 1;
1012
1013         p = strstr(p, "fake=");
1014         if (p)
1015                 cmdline = p + strlen("fake=");
1016
1017         return 0;
1018 }
1019 early_param("numa", early_numa);
1020
1021 static bool topology_updates_enabled = true;
1022
1023 static int __init early_topology_updates(char *p)
1024 {
1025         if (!p)
1026                 return 0;
1027
1028         if (!strcmp(p, "off")) {
1029                 pr_info("Disabling topology updates\n");
1030                 topology_updates_enabled = false;
1031         }
1032
1033         return 0;
1034 }
1035 early_param("topology_updates", early_topology_updates);
1036
1037 #ifdef CONFIG_MEMORY_HOTPLUG
1038 /*
1039  * Find the node associated with a hot added memory section for
1040  * memory represented in the device tree by the property
1041  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1042  */
1043 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1044                                      unsigned long scn_addr)
1045 {
1046         const __be32 *dm;
1047         unsigned int drconf_cell_cnt, rc;
1048         unsigned long lmb_size;
1049         struct assoc_arrays aa;
1050         int nid = -1;
1051
1052         drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1053         if (!drconf_cell_cnt)
1054                 return -1;
1055
1056         lmb_size = of_get_lmb_size(memory);
1057         if (!lmb_size)
1058                 return -1;
1059
1060         rc = of_get_assoc_arrays(memory, &aa);
1061         if (rc)
1062                 return -1;
1063
1064         for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1065                 struct of_drconf_cell drmem;
1066
1067                 read_drconf_cell(&drmem, &dm);
1068
1069                 /* skip this block if it is reserved or not assigned to
1070                  * this partition */
1071                 if ((drmem.flags & DRCONF_MEM_RESERVED)
1072                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1073                         continue;
1074
1075                 if ((scn_addr < drmem.base_addr)
1076                     || (scn_addr >= (drmem.base_addr + lmb_size)))
1077                         continue;
1078
1079                 nid = of_drconf_to_nid_single(&drmem, &aa);
1080                 break;
1081         }
1082
1083         return nid;
1084 }
1085
1086 /*
1087  * Find the node associated with a hot added memory section for memory
1088  * represented in the device tree as a node (i.e. memory@XXXX) for
1089  * each memblock.
1090  */
1091 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1092 {
1093         struct device_node *memory;
1094         int nid = -1;
1095
1096         for_each_node_by_type(memory, "memory") {
1097                 unsigned long start, size;
1098                 int ranges;
1099                 const __be32 *memcell_buf;
1100                 unsigned int len;
1101
1102                 memcell_buf = of_get_property(memory, "reg", &len);
1103                 if (!memcell_buf || len <= 0)
1104                         continue;
1105
1106                 /* ranges in cell */
1107                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1108
1109                 while (ranges--) {
1110                         start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1111                         size = read_n_cells(n_mem_size_cells, &memcell_buf);
1112
1113                         if ((scn_addr < start) || (scn_addr >= (start + size)))
1114                                 continue;
1115
1116                         nid = of_node_to_nid_single(memory);
1117                         break;
1118                 }
1119
1120                 if (nid >= 0)
1121                         break;
1122         }
1123
1124         of_node_put(memory);
1125
1126         return nid;
1127 }
1128
1129 /*
1130  * Find the node associated with a hot added memory section.  Section
1131  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1132  * sections are fully contained within a single MEMBLOCK.
1133  */
1134 int hot_add_scn_to_nid(unsigned long scn_addr)
1135 {
1136         struct device_node *memory = NULL;
1137         int nid, found = 0;
1138
1139         if (!numa_enabled || (min_common_depth < 0))
1140                 return first_online_node;
1141
1142         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1143         if (memory) {
1144                 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1145                 of_node_put(memory);
1146         } else {
1147                 nid = hot_add_node_scn_to_nid(scn_addr);
1148         }
1149
1150         if (nid < 0 || !node_online(nid))
1151                 nid = first_online_node;
1152
1153         if (NODE_DATA(nid)->node_spanned_pages)
1154                 return nid;
1155
1156         for_each_online_node(nid) {
1157                 if (NODE_DATA(nid)->node_spanned_pages) {
1158                         found = 1;
1159                         break;
1160                 }
1161         }
1162
1163         BUG_ON(!found);
1164         return nid;
1165 }
1166
1167 static u64 hot_add_drconf_memory_max(void)
1168 {
1169         struct device_node *memory = NULL;
1170         unsigned int drconf_cell_cnt = 0;
1171         u64 lmb_size = 0;
1172         const __be32 *dm = NULL;
1173
1174         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1175         if (memory) {
1176                 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1177                 lmb_size = of_get_lmb_size(memory);
1178                 of_node_put(memory);
1179         }
1180         return lmb_size * drconf_cell_cnt;
1181 }
1182
1183 /*
1184  * memory_hotplug_max - return max address of memory that may be added
1185  *
1186  * This is currently only used on systems that support drconfig memory
1187  * hotplug.
1188  */
1189 u64 memory_hotplug_max(void)
1190 {
1191         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1192 }
1193 #endif /* CONFIG_MEMORY_HOTPLUG */
1194
1195 /* Virtual Processor Home Node (VPHN) support */
1196 #ifdef CONFIG_PPC_SPLPAR
1197
1198 #include "vphn.h"
1199
1200 struct topology_update_data {
1201         struct topology_update_data *next;
1202         unsigned int cpu;
1203         int old_nid;
1204         int new_nid;
1205 };
1206
1207 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1208 static cpumask_t cpu_associativity_changes_mask;
1209 static int vphn_enabled;
1210 static int prrn_enabled;
1211 static void reset_topology_timer(void);
1212
1213 /*
1214  * Store the current values of the associativity change counters in the
1215  * hypervisor.
1216  */
1217 static void setup_cpu_associativity_change_counters(void)
1218 {
1219         int cpu;
1220
1221         /* The VPHN feature supports a maximum of 8 reference points */
1222         BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1223
1224         for_each_possible_cpu(cpu) {
1225                 int i;
1226                 u8 *counts = vphn_cpu_change_counts[cpu];
1227                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1228
1229                 for (i = 0; i < distance_ref_points_depth; i++)
1230                         counts[i] = hypervisor_counts[i];
1231         }
1232 }
1233
1234 /*
1235  * The hypervisor maintains a set of 8 associativity change counters in
1236  * the VPA of each cpu that correspond to the associativity levels in the
1237  * ibm,associativity-reference-points property. When an associativity
1238  * level changes, the corresponding counter is incremented.
1239  *
1240  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1241  * node associativity levels have changed.
1242  *
1243  * Returns the number of cpus with unhandled associativity changes.
1244  */
1245 static int update_cpu_associativity_changes_mask(void)
1246 {
1247         int cpu;
1248         cpumask_t *changes = &cpu_associativity_changes_mask;
1249
1250         for_each_possible_cpu(cpu) {
1251                 int i, changed = 0;
1252                 u8 *counts = vphn_cpu_change_counts[cpu];
1253                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1254
1255                 for (i = 0; i < distance_ref_points_depth; i++) {
1256                         if (hypervisor_counts[i] != counts[i]) {
1257                                 counts[i] = hypervisor_counts[i];
1258                                 changed = 1;
1259                         }
1260                 }
1261                 if (changed) {
1262                         cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1263                         cpu = cpu_last_thread_sibling(cpu);
1264                 }
1265         }
1266
1267         return cpumask_weight(changes);
1268 }
1269
1270 /*
1271  * Retrieve the new associativity information for a virtual processor's
1272  * home node.
1273  */
1274 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1275 {
1276         long rc;
1277         long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1278         u64 flags = 1;
1279         int hwcpu = get_hard_smp_processor_id(cpu);
1280
1281         rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1282         vphn_unpack_associativity(retbuf, associativity);
1283
1284         return rc;
1285 }
1286
1287 static long vphn_get_associativity(unsigned long cpu,
1288                                         __be32 *associativity)
1289 {
1290         long rc;
1291
1292         rc = hcall_vphn(cpu, associativity);
1293
1294         switch (rc) {
1295         case H_FUNCTION:
1296                 printk(KERN_INFO
1297                         "VPHN is not supported. Disabling polling...\n");
1298                 stop_topology_update();
1299                 break;
1300         case H_HARDWARE:
1301                 printk(KERN_ERR
1302                         "hcall_vphn() experienced a hardware fault "
1303                         "preventing VPHN. Disabling polling...\n");
1304                 stop_topology_update();
1305         }
1306
1307         return rc;
1308 }
1309
1310 /*
1311  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1312  * characteristics change. This function doesn't perform any locking and is
1313  * only safe to call from stop_machine().
1314  */
1315 static int update_cpu_topology(void *data)
1316 {
1317         struct topology_update_data *update;
1318         unsigned long cpu;
1319
1320         if (!data)
1321                 return -EINVAL;
1322
1323         cpu = smp_processor_id();
1324
1325         for (update = data; update; update = update->next) {
1326                 int new_nid = update->new_nid;
1327                 if (cpu != update->cpu)
1328                         continue;
1329
1330                 unmap_cpu_from_node(cpu);
1331                 map_cpu_to_node(cpu, new_nid);
1332                 set_cpu_numa_node(cpu, new_nid);
1333                 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1334                 vdso_getcpu_init();
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int update_lookup_table(void *data)
1341 {
1342         struct topology_update_data *update;
1343
1344         if (!data)
1345                 return -EINVAL;
1346
1347         /*
1348          * Upon topology update, the numa-cpu lookup table needs to be updated
1349          * for all threads in the core, including offline CPUs, to ensure that
1350          * future hotplug operations respect the cpu-to-node associativity
1351          * properly.
1352          */
1353         for (update = data; update; update = update->next) {
1354                 int nid, base, j;
1355
1356                 nid = update->new_nid;
1357                 base = cpu_first_thread_sibling(update->cpu);
1358
1359                 for (j = 0; j < threads_per_core; j++) {
1360                         update_numa_cpu_lookup_table(base + j, nid);
1361                 }
1362         }
1363
1364         return 0;
1365 }
1366
1367 /*
1368  * Update the node maps and sysfs entries for each cpu whose home node
1369  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1370  */
1371 int arch_update_cpu_topology(void)
1372 {
1373         unsigned int cpu, sibling, changed = 0;
1374         struct topology_update_data *updates, *ud;
1375         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1376         cpumask_t updated_cpus;
1377         struct device *dev;
1378         int weight, new_nid, i = 0;
1379
1380         if (!prrn_enabled && !vphn_enabled)
1381                 return 0;
1382
1383         weight = cpumask_weight(&cpu_associativity_changes_mask);
1384         if (!weight)
1385                 return 0;
1386
1387         updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1388         if (!updates)
1389                 return 0;
1390
1391         cpumask_clear(&updated_cpus);
1392
1393         for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1394                 /*
1395                  * If siblings aren't flagged for changes, updates list
1396                  * will be too short. Skip on this update and set for next
1397                  * update.
1398                  */
1399                 if (!cpumask_subset(cpu_sibling_mask(cpu),
1400                                         &cpu_associativity_changes_mask)) {
1401                         pr_info("Sibling bits not set for associativity "
1402                                         "change, cpu%d\n", cpu);
1403                         cpumask_or(&cpu_associativity_changes_mask,
1404                                         &cpu_associativity_changes_mask,
1405                                         cpu_sibling_mask(cpu));
1406                         cpu = cpu_last_thread_sibling(cpu);
1407                         continue;
1408                 }
1409
1410                 /* Use associativity from first thread for all siblings */
1411                 vphn_get_associativity(cpu, associativity);
1412                 new_nid = associativity_to_nid(associativity);
1413                 if (new_nid < 0 || !node_online(new_nid))
1414                         new_nid = first_online_node;
1415
1416                 if (new_nid == numa_cpu_lookup_table[cpu]) {
1417                         cpumask_andnot(&cpu_associativity_changes_mask,
1418                                         &cpu_associativity_changes_mask,
1419                                         cpu_sibling_mask(cpu));
1420                         cpu = cpu_last_thread_sibling(cpu);
1421                         continue;
1422                 }
1423
1424                 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1425                         ud = &updates[i++];
1426                         ud->cpu = sibling;
1427                         ud->new_nid = new_nid;
1428                         ud->old_nid = numa_cpu_lookup_table[sibling];
1429                         cpumask_set_cpu(sibling, &updated_cpus);
1430                         if (i < weight)
1431                                 ud->next = &updates[i];
1432                 }
1433                 cpu = cpu_last_thread_sibling(cpu);
1434         }
1435
1436         pr_debug("Topology update for the following CPUs:\n");
1437         if (cpumask_weight(&updated_cpus)) {
1438                 for (ud = &updates[0]; ud; ud = ud->next) {
1439                         pr_debug("cpu %d moving from node %d "
1440                                           "to %d\n", ud->cpu,
1441                                           ud->old_nid, ud->new_nid);
1442                 }
1443         }
1444
1445         /*
1446          * In cases where we have nothing to update (because the updates list
1447          * is too short or because the new topology is same as the old one),
1448          * skip invoking update_cpu_topology() via stop-machine(). This is
1449          * necessary (and not just a fast-path optimization) since stop-machine
1450          * can end up electing a random CPU to run update_cpu_topology(), and
1451          * thus trick us into setting up incorrect cpu-node mappings (since
1452          * 'updates' is kzalloc()'ed).
1453          *
1454          * And for the similar reason, we will skip all the following updating.
1455          */
1456         if (!cpumask_weight(&updated_cpus))
1457                 goto out;
1458
1459         stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1460
1461         /*
1462          * Update the numa-cpu lookup table with the new mappings, even for
1463          * offline CPUs. It is best to perform this update from the stop-
1464          * machine context.
1465          */
1466         stop_machine(update_lookup_table, &updates[0],
1467                                         cpumask_of(raw_smp_processor_id()));
1468
1469         for (ud = &updates[0]; ud; ud = ud->next) {
1470                 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1471                 register_cpu_under_node(ud->cpu, ud->new_nid);
1472
1473                 dev = get_cpu_device(ud->cpu);
1474                 if (dev)
1475                         kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1476                 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1477                 changed = 1;
1478         }
1479
1480 out:
1481         kfree(updates);
1482         return changed;
1483 }
1484
1485 static void topology_work_fn(struct work_struct *work)
1486 {
1487         rebuild_sched_domains();
1488 }
1489 static DECLARE_WORK(topology_work, topology_work_fn);
1490
1491 static void topology_schedule_update(void)
1492 {
1493         schedule_work(&topology_work);
1494 }
1495
1496 static void topology_timer_fn(unsigned long ignored)
1497 {
1498         if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1499                 topology_schedule_update();
1500         else if (vphn_enabled) {
1501                 if (update_cpu_associativity_changes_mask() > 0)
1502                         topology_schedule_update();
1503                 reset_topology_timer();
1504         }
1505 }
1506 static struct timer_list topology_timer =
1507         TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1508
1509 static void reset_topology_timer(void)
1510 {
1511         topology_timer.data = 0;
1512         topology_timer.expires = jiffies + 60 * HZ;
1513         mod_timer(&topology_timer, topology_timer.expires);
1514 }
1515
1516 #ifdef CONFIG_SMP
1517
1518 static void stage_topology_update(int core_id)
1519 {
1520         cpumask_or(&cpu_associativity_changes_mask,
1521                 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1522         reset_topology_timer();
1523 }
1524
1525 static int dt_update_callback(struct notifier_block *nb,
1526                                 unsigned long action, void *data)
1527 {
1528         struct of_reconfig_data *update = data;
1529         int rc = NOTIFY_DONE;
1530
1531         switch (action) {
1532         case OF_RECONFIG_UPDATE_PROPERTY:
1533                 if (!of_prop_cmp(update->dn->type, "cpu") &&
1534                     !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1535                         u32 core_id;
1536                         of_property_read_u32(update->dn, "reg", &core_id);
1537                         stage_topology_update(core_id);
1538                         rc = NOTIFY_OK;
1539                 }
1540                 break;
1541         }
1542
1543         return rc;
1544 }
1545
1546 static struct notifier_block dt_update_nb = {
1547         .notifier_call = dt_update_callback,
1548 };
1549
1550 #endif
1551
1552 /*
1553  * Start polling for associativity changes.
1554  */
1555 int start_topology_update(void)
1556 {
1557         int rc = 0;
1558
1559         if (firmware_has_feature(FW_FEATURE_PRRN)) {
1560                 if (!prrn_enabled) {
1561                         prrn_enabled = 1;
1562                         vphn_enabled = 0;
1563 #ifdef CONFIG_SMP
1564                         rc = of_reconfig_notifier_register(&dt_update_nb);
1565 #endif
1566                 }
1567         } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1568                    lppaca_shared_proc(get_lppaca())) {
1569                 if (!vphn_enabled) {
1570                         prrn_enabled = 0;
1571                         vphn_enabled = 1;
1572                         setup_cpu_associativity_change_counters();
1573                         init_timer_deferrable(&topology_timer);
1574                         reset_topology_timer();
1575                 }
1576         }
1577
1578         return rc;
1579 }
1580
1581 /*
1582  * Disable polling for VPHN associativity changes.
1583  */
1584 int stop_topology_update(void)
1585 {
1586         int rc = 0;
1587
1588         if (prrn_enabled) {
1589                 prrn_enabled = 0;
1590 #ifdef CONFIG_SMP
1591                 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1592 #endif
1593         } else if (vphn_enabled) {
1594                 vphn_enabled = 0;
1595                 rc = del_timer_sync(&topology_timer);
1596         }
1597
1598         return rc;
1599 }
1600
1601 int prrn_is_enabled(void)
1602 {
1603         return prrn_enabled;
1604 }
1605
1606 static int topology_read(struct seq_file *file, void *v)
1607 {
1608         if (vphn_enabled || prrn_enabled)
1609                 seq_puts(file, "on\n");
1610         else
1611                 seq_puts(file, "off\n");
1612
1613         return 0;
1614 }
1615
1616 static int topology_open(struct inode *inode, struct file *file)
1617 {
1618         return single_open(file, topology_read, NULL);
1619 }
1620
1621 static ssize_t topology_write(struct file *file, const char __user *buf,
1622                               size_t count, loff_t *off)
1623 {
1624         char kbuf[4]; /* "on" or "off" plus null. */
1625         int read_len;
1626
1627         read_len = count < 3 ? count : 3;
1628         if (copy_from_user(kbuf, buf, read_len))
1629                 return -EINVAL;
1630
1631         kbuf[read_len] = '\0';
1632
1633         if (!strncmp(kbuf, "on", 2))
1634                 start_topology_update();
1635         else if (!strncmp(kbuf, "off", 3))
1636                 stop_topology_update();
1637         else
1638                 return -EINVAL;
1639
1640         return count;
1641 }
1642
1643 static const struct file_operations topology_ops = {
1644         .read = seq_read,
1645         .write = topology_write,
1646         .open = topology_open,
1647         .release = single_release
1648 };
1649
1650 static int topology_update_init(void)
1651 {
1652         /* Do not poll for changes if disabled at boot */
1653         if (topology_updates_enabled)
1654                 start_topology_update();
1655
1656         if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1657                 return -ENOMEM;
1658
1659         return 0;
1660 }
1661 device_initcall(topology_update_init);
1662 #endif /* CONFIG_PPC_SPLPAR */