2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mmzone.h>
18 #include <linux/bootmem.h>
19 #include <linux/module.h>
20 #include <linux/node.h>
21 #include <linux/cpu.h>
22 #include <linux/ioport.h>
23 #include <linux/irq.h>
24 #include <linux/kexec.h>
25 #include <linux/pci.h>
26 #include <linux/initrd.h>
28 #include <linux/highmem.h>
29 #include <linux/smp.h>
30 #include <linux/timex.h>
31 #include <asm/setup.h>
32 #include <asm/sections.h>
33 #include <asm/cacheflush.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mmu_context.h>
36 #include <hv/hypervisor.h>
37 #include <arch/interrupts.h>
39 /* <linux/smp.h> doesn't provide this definition. */
41 #define setup_max_cpus 1
44 static inline int ABS(int x) { return x >= 0 ? x : -x; }
46 /* Chip information */
47 char chip_model[64] __write_once;
49 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
50 EXPORT_SYMBOL(node_data);
52 /* We only create bootmem data on node 0. */
53 static bootmem_data_t __initdata node0_bdata;
55 /* Information on the NUMA nodes that we compute early */
56 unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
57 unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
58 unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
59 unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
60 unsigned long __initdata node_free_pfn[MAX_NUMNODES];
63 /* Page frame index of end of lowmem on each controller. */
64 unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
66 /* Number of pages that can be mapped into lowmem. */
67 static unsigned long __initdata mappable_physpages;
70 /* Data on which physical memory controller corresponds to which NUMA node */
71 int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
74 /* Map information from VAs to PAs */
75 unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
76 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
77 EXPORT_SYMBOL(pbase_map);
79 /* Map information from PAs to VAs */
80 void *vbase_map[NR_PA_HIGHBIT_VALUES]
81 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
82 EXPORT_SYMBOL(vbase_map);
85 /* Node number as a function of the high PA bits */
86 int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
87 EXPORT_SYMBOL(highbits_to_node);
89 static unsigned int __initdata maxmem_pfn = -1U;
90 static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
91 [0 ... MAX_NUMNODES-1] = -1U
93 static nodemask_t __initdata isolnodes;
96 enum { DEFAULT_PCI_RESERVE_MB = 64 };
97 static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
98 unsigned long __initdata pci_reserve_start_pfn = -1U;
99 unsigned long __initdata pci_reserve_end_pfn = -1U;
102 static int __init setup_maxmem(char *str)
105 if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 ||
109 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
110 (HPAGE_SHIFT - PAGE_SHIFT);
111 pr_info("Forcing RAM used to no more than %dMB\n",
112 maxmem_pfn >> (20 - PAGE_SHIFT));
115 early_param("maxmem", setup_maxmem);
117 static int __init setup_maxnodemem(char *str)
120 long maxnodemem_mb, node;
122 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
123 if (node >= MAX_NUMNODES || *endp != ':' ||
124 strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
127 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
128 (HPAGE_SHIFT - PAGE_SHIFT);
129 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
130 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
133 early_param("maxnodemem", setup_maxnodemem);
135 static int __init setup_isolnodes(char *str)
137 char buf[MAX_NUMNODES * 5];
138 if (str == NULL || nodelist_parse(str, isolnodes) != 0)
141 nodelist_scnprintf(buf, sizeof(buf), isolnodes);
142 pr_info("Set isolnodes value to '%s'\n", buf);
145 early_param("isolnodes", setup_isolnodes);
148 static int __init setup_pci_reserve(char* str)
152 if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
157 pr_info("Reserving %dMB for PCIE root complex mappings\n",
161 early_param("pci_reserve", setup_pci_reserve);
166 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
167 * This can be used to increase (or decrease) the vmalloc area.
169 static int __init parse_vmalloc(char *arg)
174 VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
176 /* See validate_va() for more on this test. */
177 if ((long)_VMALLOC_START >= 0)
178 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
179 VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
183 early_param("vmalloc", parse_vmalloc);
186 #ifdef CONFIG_HIGHMEM
188 * Determine for each controller where its lowmem is mapped and how much of
189 * it is mapped there. On controller zero, the first few megabytes are
190 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
191 * start our data mappings higher up, but for now we don't bother, to avoid
192 * additional confusion.
194 * One question is whether, on systems with more than 768 Mb and
195 * controllers of different sizes, to map in a proportionate amount of
196 * each one, or to try to map the same amount from each controller.
197 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
198 * respectively, do we map 256MB from each, or do we map 128 MB, 512
199 * MB, and 128 MB respectively?) For now we use a proportionate
200 * solution like the latter.
202 * The VA/PA mapping demands that we align our decisions at 16 MB
203 * boundaries so that we can rapidly convert VA to PA.
205 static void *__init setup_pa_va_mapping(void)
207 unsigned long curr_pages = 0;
208 unsigned long vaddr = PAGE_OFFSET;
209 nodemask_t highonlynodes = isolnodes;
212 memset(pbase_map, -1, sizeof(pbase_map));
213 memset(vbase_map, -1, sizeof(vbase_map));
215 /* Node zero cannot be isolated for LOWMEM purposes. */
216 node_clear(0, highonlynodes);
218 /* Count up the number of pages on non-highonlynodes controllers. */
219 mappable_physpages = 0;
220 for_each_online_node(i) {
221 if (!node_isset(i, highonlynodes))
222 mappable_physpages +=
223 node_end_pfn[i] - node_start_pfn[i];
226 for_each_online_node(i) {
227 unsigned long start = node_start_pfn[i];
228 unsigned long end = node_end_pfn[i];
229 unsigned long size = end - start;
230 unsigned long vaddr_end;
232 if (node_isset(i, highonlynodes)) {
233 /* Mark this controller as having no lowmem. */
234 node_lowmem_end_pfn[i] = start;
239 if (mappable_physpages > MAXMEM_PFN) {
240 vaddr_end = PAGE_OFFSET +
241 (((u64)curr_pages * MAXMEM_PFN /
245 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
247 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
248 unsigned long this_pfn =
249 start + (j << HUGETLB_PAGE_ORDER);
250 pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
251 if (vbase_map[__pfn_to_highbits(this_pfn)] ==
253 vbase_map[__pfn_to_highbits(this_pfn)] =
254 (void *)(vaddr & HPAGE_MASK);
256 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
257 BUG_ON(node_lowmem_end_pfn[i] > end);
260 /* Return highest address of any mapped memory. */
261 return (void *)vaddr;
263 #endif /* CONFIG_HIGHMEM */
266 * Register our most important memory mappings with the debug stub.
268 * This is up to 4 mappings for lowmem, one mapping per memory
269 * controller, plus one for our text segment.
271 static void __cpuinit store_permanent_mappings(void)
275 for_each_online_node(i) {
276 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
277 #ifdef CONFIG_HIGHMEM
278 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
280 HV_PhysAddr high_mapped_pa = node_end_pfn[i];
283 unsigned long pages = high_mapped_pa - node_start_pfn[i];
284 HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
285 hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
288 hv_store_mapping((HV_VirtAddr)_stext,
289 (uint32_t)(_einittext - _stext), 0);
293 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
294 * and node_online_map, doing suitable sanity-checking.
295 * Also set min_low_pfn, max_low_pfn, and max_pfn.
297 static void __init setup_memory(void)
300 int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
301 #ifdef CONFIG_HIGHMEM
307 #if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
311 /* We are using a char to hold the cpu_2_node[] mapping */
312 BUILD_BUG_ON(MAX_NUMNODES > 127);
314 /* Discover the ranges of memory available to us */
316 unsigned long start, size, end, highbits;
317 HV_PhysAddrRange range = hv_inquire_physical(i);
320 #ifdef CONFIG_FLATMEM
322 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
323 range.size, range.start + range.size);
328 if ((unsigned long)range.start) {
329 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
330 range.start, range.start + range.size);
334 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
335 (range.size & (HPAGE_SIZE-1)) != 0) {
336 unsigned long long start_pa = range.start;
337 unsigned long long orig_size = range.size;
338 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
339 range.size -= (range.start - start_pa);
340 range.size &= HPAGE_MASK;
341 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
342 " now %#llx-%#llx\n",
343 start_pa, start_pa + orig_size,
344 range.start, range.start + range.size);
346 highbits = __pa_to_highbits(range.start);
347 if (highbits >= NR_PA_HIGHBIT_VALUES) {
348 pr_err("PA high bits too high: %#llx..%#llx\n",
349 range.start, range.start + range.size);
352 if (highbits_seen[highbits]) {
353 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
354 range.start, range.start + range.size);
357 highbits_seen[highbits] = 1;
358 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
359 int max_size = maxnodemem_pfn[i];
361 pr_err("Maxnodemem reduced node %d to"
362 " %d pages\n", i, max_size);
363 range.size = PFN_PHYS(max_size);
365 pr_err("Maxnodemem disabled node %d\n", i);
369 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
370 int max_size = maxmem_pfn - num_physpages;
372 pr_err("Maxmem reduced node %d to %d pages\n",
374 range.size = PFN_PHYS(max_size);
376 pr_err("Maxmem disabled node %d\n", i);
380 if (i >= MAX_NUMNODES) {
381 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
382 i, range.size, range.size + range.start);
386 start = range.start >> PAGE_SHIFT;
387 size = range.size >> PAGE_SHIFT;
391 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
392 (range.start + range.size)) {
393 pr_err("PAs too high to represent: %#llx..%#llx\n",
394 range.start, range.start + range.size);
400 * Blocks that overlap the pci reserved region must
401 * have enough space to hold the maximum percpu data
402 * region at the top of the range. If there isn't
403 * enough space above the reserved region, just
406 if (start <= pci_reserve_start_pfn &&
407 end > pci_reserve_start_pfn) {
408 unsigned int per_cpu_size =
409 __per_cpu_end - __per_cpu_start;
410 unsigned int percpu_pages =
411 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
412 if (end < pci_reserve_end_pfn + percpu_pages) {
413 end = pci_reserve_start_pfn;
414 pr_err("PCI mapping region reduced node %d to"
415 " %ld pages\n", i, end - start);
420 for (j = __pfn_to_highbits(start);
421 j <= __pfn_to_highbits(end - 1); j++)
422 highbits_to_node[j] = i;
424 node_start_pfn[i] = start;
425 node_end_pfn[i] = end;
426 node_controller[i] = range.controller;
427 num_physpages += size;
430 /* Mark node as online */
431 node_set(i, node_online_map);
432 node_set(i, node_possible_map);
437 * For 4KB pages, mem_map "struct page" data is 1% of the size
438 * of the physical memory, so can be quite big (640 MB for
439 * four 16G zones). These structures must be mapped in
440 * lowmem, and since we currently cap out at about 768 MB,
441 * it's impractical to try to use this much address space.
442 * For now, arbitrarily cap the amount of physical memory
443 * we're willing to use at 8 million pages (32GB of 4KB pages).
445 cap = 8 * 1024 * 1024; /* 8 million pages */
446 if (num_physpages > cap) {
447 int num_nodes = num_online_nodes();
448 int cap_each = cap / num_nodes;
449 unsigned long dropped_pages = 0;
450 for (i = 0; i < num_nodes; ++i) {
451 int size = node_end_pfn[i] - node_start_pfn[i];
452 if (size > cap_each) {
453 dropped_pages += (size - cap_each);
454 node_end_pfn[i] = node_start_pfn[i] + cap_each;
457 num_physpages -= dropped_pages;
458 pr_warning("Only using %ldMB memory;"
459 " ignoring %ldMB.\n",
460 num_physpages >> (20 - PAGE_SHIFT),
461 dropped_pages >> (20 - PAGE_SHIFT));
462 pr_warning("Consider using a larger page size.\n");
466 /* Heap starts just above the last loaded address. */
467 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
469 #ifdef CONFIG_HIGHMEM
470 /* Find where we map lowmem from each controller. */
471 high_memory = setup_pa_va_mapping();
473 /* Set max_low_pfn based on what node 0 can directly address. */
474 max_low_pfn = node_lowmem_end_pfn[0];
476 lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
477 MAXMEM_PFN : mappable_physpages;
478 highmem_pages = (long) (num_physpages - lowmem_pages);
480 pr_notice("%ldMB HIGHMEM available.\n",
481 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
482 pr_notice("%ldMB LOWMEM available.\n",
483 pages_to_mb(lowmem_pages));
485 /* Set max_low_pfn based on what node 0 can directly address. */
486 max_low_pfn = node_end_pfn[0];
489 if (node_end_pfn[0] > MAXMEM_PFN) {
490 pr_warning("Only using %ldMB LOWMEM.\n",
492 pr_warning("Use a HIGHMEM enabled kernel.\n");
493 max_low_pfn = MAXMEM_PFN;
494 max_pfn = MAXMEM_PFN;
495 num_physpages = MAXMEM_PFN;
496 node_end_pfn[0] = MAXMEM_PFN;
498 pr_notice("%ldMB memory available.\n",
499 pages_to_mb(node_end_pfn[0]));
501 for (i = 1; i < MAX_NUMNODES; ++i) {
502 node_start_pfn[i] = 0;
505 high_memory = __va(node_end_pfn[0]);
508 for (i = 0; i < MAX_NUMNODES; ++i) {
509 int pages = node_end_pfn[i] - node_start_pfn[i];
510 lowmem_pages += pages;
512 high_memory = pfn_to_kaddr(node_end_pfn[i]);
514 pr_notice("%ldMB memory available.\n",
515 pages_to_mb(lowmem_pages));
520 static void __init setup_bootmem_allocator(void)
522 unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
524 /* Provide a node 0 bdata. */
525 NODE_DATA(0)->bdata = &node0_bdata;
528 /* Don't let boot memory alias the PCI region. */
529 last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
531 last_alloc_pfn = max_low_pfn;
535 * Initialize the boot-time allocator (with low memory only):
536 * The first argument says where to put the bitmap, and the
537 * second says where the end of allocatable memory is.
539 bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
542 * Let the bootmem allocator use all the space we've given it
543 * except for its own bitmap.
545 first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
546 if (first_alloc_pfn >= last_alloc_pfn)
547 early_panic("Not enough memory on controller 0 for bootmem\n");
549 free_bootmem(PFN_PHYS(first_alloc_pfn),
550 PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
553 if (crashk_res.start != crashk_res.end)
554 reserve_bootmem(crashk_res.start,
555 crashk_res.end - crashk_res.start + 1, 0);
560 void *__init alloc_remap(int nid, unsigned long size)
562 int pages = node_end_pfn[nid] - node_start_pfn[nid];
563 void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
564 BUG_ON(size != pages * sizeof(struct page));
565 memset(map, 0, size);
569 static int __init percpu_size(void)
571 int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
572 #ifdef CONFIG_MODULES
573 if (size < PERCPU_ENOUGH_ROOM)
574 size = PERCPU_ENOUGH_ROOM;
576 /* In several places we assume the per-cpu data fits on a huge page. */
577 BUG_ON(kdata_huge && size > HPAGE_SIZE);
581 static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
583 void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
584 unsigned long pfn = kaddr_to_pfn(kva);
585 BUG_ON(goal && PFN_PHYS(pfn) != goal);
589 static void __init zone_sizes_init(void)
591 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
592 unsigned long node_percpu[MAX_NUMNODES] = { 0 };
593 int size = percpu_size();
594 int num_cpus = smp_height * smp_width;
597 for (i = 0; i < num_cpus; ++i)
598 node_percpu[cpu_to_node(i)] += size;
600 for_each_online_node(i) {
601 unsigned long start = node_start_pfn[i];
602 unsigned long end = node_end_pfn[i];
603 #ifdef CONFIG_HIGHMEM
604 unsigned long lowmem_end = node_lowmem_end_pfn[i];
606 unsigned long lowmem_end = end;
608 int memmap_size = (end - start) * sizeof(struct page);
609 node_free_pfn[i] = start;
612 * Set aside pages for per-cpu data and the mem_map array.
614 * Since the per-cpu data requires special homecaching,
615 * if we are in kdata_huge mode, we put it at the end of
616 * the lowmem region. If we're not in kdata_huge mode,
617 * we take the per-cpu pages from the bottom of the
618 * controller, since that avoids fragmenting a huge page
619 * that users might want. We always take the memmap
620 * from the bottom of the controller, since with
621 * kdata_huge that lets it be under a huge TLB entry.
623 * If the user has requested isolnodes for a controller,
624 * though, there'll be no lowmem, so we just alloc_bootmem
625 * the memmap. There will be no percpu memory either.
627 if (__pfn_to_highbits(start) == 0) {
628 /* In low PAs, allocate via bootmem. */
629 unsigned long goal = 0;
631 alloc_bootmem_pfn(memmap_size, goal);
633 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
636 alloc_bootmem_pfn(node_percpu[i], goal);
637 } else if (cpu_isset(i, isolnodes)) {
638 node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
639 BUG_ON(node_percpu[i] != 0);
641 /* In high PAs, just reserve some pages. */
642 node_memmap_pfn[i] = node_free_pfn[i];
643 node_free_pfn[i] += PFN_UP(memmap_size);
645 node_percpu_pfn[i] = node_free_pfn[i];
646 node_free_pfn[i] += PFN_UP(node_percpu[i]);
649 lowmem_end - PFN_UP(node_percpu[i]);
653 #ifdef CONFIG_HIGHMEM
654 if (start > lowmem_end) {
655 zones_size[ZONE_NORMAL] = 0;
656 zones_size[ZONE_HIGHMEM] = end - start;
658 zones_size[ZONE_NORMAL] = lowmem_end - start;
659 zones_size[ZONE_HIGHMEM] = end - lowmem_end;
662 zones_size[ZONE_NORMAL] = end - start;
666 * Everyone shares node 0's bootmem allocator, but
667 * we use alloc_remap(), above, to put the actual
668 * struct page array on the individual controllers,
669 * which is most of the data that we actually care about.
670 * We can't place bootmem allocators on the other
671 * controllers since the bootmem allocator can only
672 * operate on 32-bit physical addresses.
674 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
676 free_area_init_node(i, zones_size, start, NULL);
677 printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n",
678 PFN_UP(node_percpu[i]));
680 /* Track the type of memory on each node */
681 if (zones_size[ZONE_NORMAL])
682 node_set_state(i, N_NORMAL_MEMORY);
683 #ifdef CONFIG_HIGHMEM
685 node_set_state(i, N_HIGH_MEMORY);
694 /* which logical CPUs are on which nodes */
695 struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
696 EXPORT_SYMBOL(node_2_cpu_mask);
698 /* which node each logical CPU is on */
699 char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
700 EXPORT_SYMBOL(cpu_2_node);
702 /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
703 static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
705 if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
708 return cpu_to_node(cpu);
711 /* Return number of immediately-adjacent tiles sharing the same NUMA node. */
712 static int __init node_neighbors(int node, int cpu,
713 struct cpumask *unbound_cpus)
720 if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
722 if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
724 if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
726 if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
731 static void __init setup_numa_mapping(void)
733 int distance[MAX_NUMNODES][NR_CPUS];
735 int cpu, node, cpus, i, x, y;
736 int num_nodes = num_online_nodes();
737 struct cpumask unbound_cpus;
738 nodemask_t default_nodes;
740 cpumask_clear(&unbound_cpus);
742 /* Get set of nodes we will use for defaults */
743 nodes_andnot(default_nodes, node_online_map, isolnodes);
744 if (nodes_empty(default_nodes)) {
745 BUG_ON(!node_isset(0, node_online_map));
746 pr_err("Forcing NUMA node zero available as a default node\n");
747 node_set(0, default_nodes);
750 /* Populate the distance[] array */
751 memset(distance, -1, sizeof(distance));
753 for (coord.y = 0; coord.y < smp_height; ++coord.y) {
754 for (coord.x = 0; coord.x < smp_width;
756 BUG_ON(cpu >= nr_cpu_ids);
757 if (!cpu_possible(cpu)) {
758 cpu_2_node[cpu] = -1;
761 for_each_node_mask(node, default_nodes) {
762 HV_MemoryControllerInfo info =
763 hv_inquire_memory_controller(
764 coord, node_controller[node]);
765 distance[node][cpu] =
766 ABS(info.coord.x) + ABS(info.coord.y);
768 cpumask_set_cpu(cpu, &unbound_cpus);
774 * Round-robin through the NUMA nodes until all the cpus are
775 * assigned. We could be more clever here (e.g. create four
776 * sorted linked lists on the same set of cpu nodes, and pull
777 * off them in round-robin sequence, removing from all four
778 * lists each time) but given the relatively small numbers
779 * involved, O(n^2) seem OK for a one-time cost.
781 node = first_node(default_nodes);
782 while (!cpumask_empty(&unbound_cpus)) {
784 int best_distance = INT_MAX;
785 for (cpu = 0; cpu < cpus; ++cpu) {
786 if (cpumask_test_cpu(cpu, &unbound_cpus)) {
788 * Compute metric, which is how much
789 * closer the cpu is to this memory
790 * controller than the others, shifted
791 * up, and then the number of
792 * neighbors already in the node as an
793 * epsilon adjustment to try to keep
796 int d = distance[node][cpu] * num_nodes;
797 for_each_node_mask(i, default_nodes) {
799 d -= distance[i][cpu];
801 d *= 8; /* allow space for epsilon */
802 d -= node_neighbors(node, cpu, &unbound_cpus);
803 if (d < best_distance) {
809 BUG_ON(best_cpu < 0);
810 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
811 cpu_2_node[best_cpu] = node;
812 cpumask_clear_cpu(best_cpu, &unbound_cpus);
813 node = next_node(node, default_nodes);
814 if (node == MAX_NUMNODES)
815 node = first_node(default_nodes);
818 /* Print out node assignments and set defaults for disabled cpus */
820 for (y = 0; y < smp_height; ++y) {
821 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
822 for (x = 0; x < smp_width; ++x, ++cpu) {
823 if (cpu_to_node(cpu) < 0) {
825 cpu_2_node[cpu] = first_node(default_nodes);
827 pr_cont(" %d", cpu_to_node(cpu));
834 static struct cpu cpu_devices[NR_CPUS];
836 static int __init topology_init(void)
840 for_each_online_node(i)
841 register_one_node(i);
843 for (i = 0; i < smp_height * smp_width; ++i)
844 register_cpu(&cpu_devices[i], i);
849 subsys_initcall(topology_init);
851 #else /* !CONFIG_NUMA */
853 #define setup_numa_mapping() do { } while (0)
855 #endif /* CONFIG_NUMA */
858 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
859 * @boot: Is this the boot cpu?
861 * Called from setup_arch() on the boot cpu, or online_secondary().
863 void __cpuinit setup_cpu(int boot)
865 /* The boot cpu sets up its permanent mappings much earlier. */
867 store_permanent_mappings();
869 /* Allow asynchronous TLB interrupts. */
870 #if CHIP_HAS_TILE_DMA()
871 arch_local_irq_unmask(INT_DMATLB_MISS);
872 arch_local_irq_unmask(INT_DMATLB_ACCESS);
874 #if CHIP_HAS_SN_PROC()
875 arch_local_irq_unmask(INT_SNITLB_MISS);
878 arch_local_irq_unmask(INT_SINGLE_STEP_K);
882 * Allow user access to many generic SPRs, like the cycle
883 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
885 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
888 /* Static network is not restricted. */
889 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
891 #if CHIP_HAS_SN_PROC()
892 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
893 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
897 * Set the MPL for interrupt control 0 & 1 to the corresponding
898 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
899 * SPRs, as well as the interrupt mask.
901 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
902 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
904 /* Initialize IRQ support for this cpu. */
907 #ifdef CONFIG_HARDWALL
908 /* Reset the network state on this cpu. */
909 reset_network_state();
913 static int __initdata set_initramfs_file;
914 static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
916 static int __init setup_initramfs_file(char *str)
920 strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
921 set_initramfs_file = 1;
925 early_param("initramfs_file", setup_initramfs_file);
928 * We look for an additional "initramfs.cpio.gz" file in the hvfs.
929 * If there is one, we allocate some memory for it and it will be
930 * unpacked to the initramfs after any built-in initramfs_data.
932 static void __init load_hv_initrd(void)
938 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
939 if (fd == HV_ENOENT) {
940 if (set_initramfs_file)
941 pr_warning("No such hvfs initramfs file '%s'\n",
946 stat = hv_fs_fstat(fd);
947 BUG_ON(stat.size < 0);
948 if (stat.flags & HV_FS_ISDIR) {
949 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
953 initrd = alloc_bootmem_pages(stat.size);
954 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
955 if (rc != stat.size) {
956 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
957 stat.size, initramfs_file, rc);
958 free_initrd_mem((unsigned long) initrd, stat.size);
961 initrd_start = (unsigned long) initrd;
962 initrd_end = initrd_start + stat.size;
965 void __init free_initrd_mem(unsigned long begin, unsigned long end)
967 free_bootmem(__pa(begin), end - begin);
970 static void __init validate_hv(void)
973 * It may already be too late, but let's check our built-in
974 * configuration against what the hypervisor is providing.
976 unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
977 int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
978 int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
979 HV_ASIDRange asid_range;
982 HV_Topology topology = hv_inquire_topology();
983 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
984 if (topology.width != 1 || topology.height != 1) {
985 pr_warning("Warning: booting UP kernel on %dx%d grid;"
986 " will ignore all but first tile.\n",
987 topology.width, topology.height);
991 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
992 early_panic("Hypervisor glue size %ld is too big!\n",
994 if (hv_page_size != PAGE_SIZE)
995 early_panic("Hypervisor page size %#x != our %#lx\n",
996 hv_page_size, PAGE_SIZE);
997 if (hv_hpage_size != HPAGE_SIZE)
998 early_panic("Hypervisor huge page size %#x != our %#lx\n",
999 hv_hpage_size, HPAGE_SIZE);
1003 * Some hypervisor APIs take a pointer to a bitmap array
1004 * whose size is at least the number of cpus on the chip.
1005 * We use a struct cpumask for this, so it must be big enough.
1007 if ((smp_height * smp_width) > nr_cpu_ids)
1008 early_panic("Hypervisor %d x %d grid too big for Linux"
1009 " NR_CPUS %d\n", smp_height, smp_width,
1014 * Check that we're using allowed ASIDs, and initialize the
1015 * various asid variables to their appropriate initial states.
1017 asid_range = hv_inquire_asid(0);
1018 __get_cpu_var(current_asid) = min_asid = asid_range.start;
1019 max_asid = asid_range.start + asid_range.size - 1;
1021 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1022 sizeof(chip_model)) < 0) {
1023 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1024 strlcpy(chip_model, "unknown", sizeof(chip_model));
1028 static void __init validate_va(void)
1030 #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1032 * Similarly, make sure we're only using allowed VAs.
1033 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
1034 * and 0 .. KERNEL_HIGH_VADDR.
1035 * In addition, make sure we CAN'T use the end of memory, since
1036 * we use the last chunk of each pgd for the pgd_list.
1038 int i, user_kernel_ok = 0;
1039 unsigned long max_va = 0;
1040 unsigned long list_va =
1041 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1043 for (i = 0; ; ++i) {
1044 HV_VirtAddrRange range = hv_inquire_virtual(i);
1045 if (range.size == 0)
1047 if (range.start <= MEM_USER_INTRPT &&
1048 range.start + range.size >= MEM_HV_INTRPT)
1050 if (range.start == 0)
1051 max_va = range.size;
1052 BUG_ON(range.start + range.size > list_va);
1054 if (!user_kernel_ok)
1055 early_panic("Hypervisor not configured for user/kernel VAs\n");
1057 early_panic("Hypervisor not configured for low VAs\n");
1058 if (max_va < KERNEL_HIGH_VADDR)
1059 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1060 max_va, KERNEL_HIGH_VADDR);
1062 /* Kernel PCs must have their high bit set; see intvec.S. */
1063 if ((long)VMALLOC_START >= 0)
1065 "Linux VMALLOC region below the 2GB line (%#lx)!\n"
1066 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1067 "or smaller VMALLOC_RESERVE.\n",
1073 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1074 * to cache data on at a page level, i.e. what cpus can be placed in
1075 * the LOTAR field of a PTE. It is equivalent to the set of possible
1076 * cpus plus any other cpus that are willing to share their cache.
1077 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1079 struct cpumask __write_once cpu_lotar_map;
1080 EXPORT_SYMBOL(cpu_lotar_map);
1082 #if CHIP_HAS_CBOX_HOME_MAP()
1084 * hash_for_home_map lists all the tiles that hash-for-home data
1085 * will be cached on. Note that this may includes tiles that are not
1086 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1087 * device is being shared between multiple supervisors).
1088 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1090 struct cpumask hash_for_home_map;
1091 EXPORT_SYMBOL(hash_for_home_map);
1095 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1096 * flush on our behalf. It is set to cpu_possible_map OR'ed with
1097 * hash_for_home_map, and it is what should be passed to
1098 * hv_flush_remote() to flush all caches. Note that if there are
1099 * dedicated hypervisor driver tiles that have authorized use of their
1100 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1101 * cpu_cacheable_map, as they are a special case.
1103 struct cpumask __write_once cpu_cacheable_map;
1104 EXPORT_SYMBOL(cpu_cacheable_map);
1106 static __initdata struct cpumask disabled_map;
1108 static int __init disabled_cpus(char *str)
1110 int boot_cpu = smp_processor_id();
1112 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1114 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1115 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1116 cpumask_clear_cpu(boot_cpu, &disabled_map);
1121 early_param("disabled_cpus", disabled_cpus);
1123 void __init print_disabled_cpus(void)
1125 if (!cpumask_empty(&disabled_map)) {
1127 cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1128 pr_info("CPUs not available for Linux: %s\n", buf);
1132 static void __init setup_cpu_maps(void)
1134 struct cpumask hv_disabled_map, cpu_possible_init;
1135 int boot_cpu = smp_processor_id();
1138 /* Learn which cpus are allowed by the hypervisor. */
1139 rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1140 (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1141 sizeof(cpu_cacheable_map));
1143 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1144 if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1145 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1147 /* Compute the cpus disabled by the hvconfig file. */
1148 cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1150 /* Include them with the cpus disabled by "disabled_cpus". */
1151 cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1154 * Disable every cpu after "setup_max_cpus". But don't mark
1155 * as disabled the cpus that are outside of our initial rectangle,
1156 * since that turns out to be confusing.
1158 cpus = 1; /* this cpu */
1159 cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */
1160 for (i = 0; cpus < setup_max_cpus; ++i)
1161 if (!cpumask_test_cpu(i, &disabled_map))
1163 for (; i < smp_height * smp_width; ++i)
1164 cpumask_set_cpu(i, &disabled_map);
1165 cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1166 for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1167 cpumask_clear_cpu(i, &disabled_map);
1170 * Setup cpu_possible map as every cpu allocated to us, minus
1171 * the results of any "disabled_cpus" settings.
1173 cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1174 init_cpu_possible(&cpu_possible_init);
1176 /* Learn which cpus are valid for LOTAR caching. */
1177 rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1178 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1179 sizeof(cpu_lotar_map));
1181 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1182 cpu_lotar_map = cpu_possible_map;
1185 #if CHIP_HAS_CBOX_HOME_MAP()
1186 /* Retrieve set of CPUs used for hash-for-home caching */
1187 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1188 (HV_VirtAddr) hash_for_home_map.bits,
1189 sizeof(hash_for_home_map));
1191 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1192 cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map);
1194 cpu_cacheable_map = cpu_possible_map;
1199 static int __init dataplane(char *str)
1201 pr_warning("WARNING: dataplane support disabled in this kernel\n");
1205 early_param("dataplane", dataplane);
1207 #ifdef CONFIG_CMDLINE_BOOL
1208 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1211 void __init setup_arch(char **cmdline_p)
1215 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1216 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1218 if (boot_command_line[0])
1219 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1221 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1224 #if defined(CONFIG_CMDLINE_BOOL)
1225 if (builtin_cmdline[0]) {
1226 int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1228 if (builtin_len < COMMAND_LINE_SIZE-1)
1229 boot_command_line[builtin_len++] = ' ';
1230 hv_cmdline = &boot_command_line[builtin_len];
1231 len = COMMAND_LINE_SIZE - builtin_len;
1235 hv_cmdline = boot_command_line;
1236 len = COMMAND_LINE_SIZE;
1238 len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1239 if (len < 0 || len > COMMAND_LINE_SIZE)
1240 early_panic("hv_get_command_line failed: %d\n", len);
1243 *cmdline_p = boot_command_line;
1245 /* Set disabled_map and setup_max_cpus very early */
1246 parse_early_param();
1248 /* Make sure the kernel is compatible with the hypervisor. */
1257 * Initialize the PCI structures. This is done before memory
1258 * setup so that we know whether or not a pci_reserve region
1261 if (tile_pci_init() == 0)
1264 /* PCI systems reserve a region just below 4GB for mapping iomem. */
1265 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
1266 pci_reserve_start_pfn = pci_reserve_end_pfn -
1267 (pci_reserve_mb << (20 - PAGE_SHIFT));
1270 init_mm.start_code = (unsigned long) _text;
1271 init_mm.end_code = (unsigned long) _etext;
1272 init_mm.end_data = (unsigned long) _edata;
1273 init_mm.brk = (unsigned long) _end;
1276 store_permanent_mappings();
1277 setup_bootmem_allocator();
1280 * NOTE: before this point _nobody_ is allowed to allocate
1281 * any memory using the bootmem allocator.
1285 setup_numa_mapping();
1295 * Set up per-cpu memory.
1298 unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1299 EXPORT_SYMBOL(__per_cpu_offset);
1301 static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1302 static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1305 * As the percpu code allocates pages, we return the pages from the
1306 * end of the node for the specified cpu.
1308 static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1310 int nid = cpu_to_node(cpu);
1311 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1313 BUG_ON(size % PAGE_SIZE != 0);
1314 pfn_offset[nid] += size / PAGE_SIZE;
1315 if (percpu_pfn[cpu] == 0)
1316 percpu_pfn[cpu] = pfn;
1317 return pfn_to_kaddr(pfn);
1321 * Pages reserved for percpu memory are not freeable, and in any case we are
1322 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1324 static void __init pcpu_fc_free(void *ptr, size_t size)
1329 * Set up vmalloc page tables using bootmem for the percpu code.
1331 static void __init pcpu_fc_populate_pte(unsigned long addr)
1338 BUG_ON(pgd_addr_invalid(addr));
1339 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1340 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1341 " try increasing CONFIG_VMALLOC_RESERVE\n",
1342 addr, VMALLOC_START, VMALLOC_END);
1344 pgd = swapper_pg_dir + pgd_index(addr);
1345 pud = pud_offset(pgd, addr);
1346 BUG_ON(!pud_present(*pud));
1347 pmd = pmd_offset(pud, addr);
1348 if (pmd_present(*pmd)) {
1349 BUG_ON(pmd_huge_page(*pmd));
1351 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1352 HV_PAGE_TABLE_ALIGN, 0);
1353 pmd_populate_kernel(&init_mm, pmd, pte);
1357 void __init setup_per_cpu_areas(void)
1360 unsigned long delta, pfn, lowmem_va;
1361 unsigned long size = percpu_size();
1365 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1366 pcpu_fc_free, pcpu_fc_populate_pte);
1368 panic("Cannot initialize percpu area (err=%d)", rc);
1370 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1371 for_each_possible_cpu(cpu) {
1372 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1374 /* finv the copy out of cache so we can change homecache */
1375 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1376 __finv_buffer(ptr, size);
1377 pfn = percpu_pfn[cpu];
1379 /* Rewrite the page tables to cache on that cpu */
1380 pg = pfn_to_page(pfn);
1381 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1383 /* Update the vmalloc mapping and page home. */
1385 virt_to_pte(NULL, (unsigned long)ptr + i);
1387 BUG_ON(pfn != pte_pfn(pte));
1388 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1389 pte = set_remote_cache_cpu(pte, cpu);
1392 /* Update the lowmem mapping for consistency. */
1393 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1394 ptep = virt_to_pte(NULL, lowmem_va);
1395 if (pte_huge(*ptep)) {
1396 printk(KERN_DEBUG "early shatter of huge page"
1397 " at %#lx\n", lowmem_va);
1398 shatter_pmd((pmd_t *)ptep);
1399 ptep = virt_to_pte(NULL, lowmem_va);
1400 BUG_ON(pte_huge(*ptep));
1402 BUG_ON(pfn != pte_pfn(*ptep));
1407 /* Set our thread pointer appropriately. */
1408 set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1410 /* Make sure the finv's have completed. */
1413 /* Flush the TLB so we reference it properly from here on out. */
1414 local_flush_tlb_all();
1417 static struct resource data_resource = {
1418 .name = "Kernel data",
1421 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1424 static struct resource code_resource = {
1425 .name = "Kernel code",
1428 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1432 * We reserve all resources above 4GB so that PCI won't try to put
1433 * mappings above 4GB; the standard allows that for some devices but
1434 * the probing code trunates values to 32 bits.
1437 static struct resource* __init
1438 insert_non_bus_resource(void)
1440 struct resource *res =
1441 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1442 res->name = "Non-Bus Physical Address Space";
1443 res->start = (1ULL << 32);
1445 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1446 if (insert_resource(&iomem_resource, res)) {
1454 static struct resource* __init
1455 insert_ram_resource(u64 start_pfn, u64 end_pfn)
1457 struct resource *res =
1458 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1459 res->name = "System RAM";
1460 res->start = start_pfn << PAGE_SHIFT;
1461 res->end = (end_pfn << PAGE_SHIFT) - 1;
1462 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1463 if (insert_resource(&iomem_resource, res)) {
1471 * Request address space for all standard resources
1473 * If the system includes PCI root complex drivers, we need to create
1474 * a window just below 4GB where PCI BARs can be mapped.
1476 static int __init request_standard_resources(void)
1479 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
1481 iomem_resource.end = -1LL;
1483 insert_non_bus_resource();
1486 for_each_online_node(i) {
1487 u64 start_pfn = node_start_pfn[i];
1488 u64 end_pfn = node_end_pfn[i];
1491 if (start_pfn <= pci_reserve_start_pfn &&
1492 end_pfn > pci_reserve_start_pfn) {
1493 if (end_pfn > pci_reserve_end_pfn)
1494 insert_ram_resource(pci_reserve_end_pfn,
1496 end_pfn = pci_reserve_start_pfn;
1499 insert_ram_resource(start_pfn, end_pfn);
1502 code_resource.start = __pa(_text - CODE_DELTA);
1503 code_resource.end = __pa(_etext - CODE_DELTA)-1;
1504 data_resource.start = __pa(_sdata);
1505 data_resource.end = __pa(_end)-1;
1507 insert_resource(&iomem_resource, &code_resource);
1508 insert_resource(&iomem_resource, &data_resource);
1511 insert_resource(&iomem_resource, &crashk_res);
1517 subsys_initcall(request_standard_resources);