2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
34 #include <asm/processor.h>
39 #include <asm/pgtable.h>
41 #include <asm/iommu.h>
42 #include <asm/btext.h>
43 #include <asm/sections.h>
44 #include <asm/machdep.h>
47 #include <linux/linux_logo.h>
50 * Eventually bump that one up
52 #define DEVTREE_CHUNK_SIZE 0x100000
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
61 #define MEM_RESERVE_MAP_SIZE 8
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
82 #define ADDR(x) (u32)(unsigned long)(x)
85 #define OF_WORKAROUNDS 0
87 #define OF_WORKAROUNDS of_workarounds
91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
94 #define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
101 #define prom_debug(x...) prom_printf(x)
103 #define prom_debug(x...)
107 typedef u32 prom_arg_t;
125 struct mem_map_entry {
130 typedef __be32 cell_t;
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
141 return ((int (*)(struct prom_args *))entry)(args);
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
149 static struct prom_t __initdata prom;
151 static unsigned long prom_entry __initdata;
153 #define PROM_SCRATCH_SIZE 256
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
171 /* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore
175 #define PLATFORM_PSERIES 0x0100
176 #define PLATFORM_PSERIES_LPAR 0x0101
177 #define PLATFORM_LPAR 0x0001
178 #define PLATFORM_POWERMAC 0x0400
179 #define PLATFORM_GENERIC 0x0500
180 #define PLATFORM_OPAL 0x0600
182 static int __initdata of_platform;
184 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
186 static unsigned long __initdata prom_memory_limit;
188 static unsigned long __initdata alloc_top;
189 static unsigned long __initdata alloc_top_high;
190 static unsigned long __initdata alloc_bottom;
191 static unsigned long __initdata rmo_top;
192 static unsigned long __initdata ram_top;
194 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195 static int __initdata mem_reserve_cnt;
197 static cell_t __initdata regbuf[1024];
199 static bool rtas_has_query_cpu_stopped;
203 * Error results ... some OF calls will return "-1" on error, some
204 * will return 0, some will return either. To simplify, here are
205 * macros to use with any ihandle or phandle return value to check if
209 #define PROM_ERROR (-1u)
210 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
211 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
214 /* This is the one and *ONLY* place where we actually call open
218 static int __init call_prom(const char *service, int nargs, int nret, ...)
221 struct prom_args args;
224 args.service = cpu_to_be32(ADDR(service));
225 args.nargs = cpu_to_be32(nargs);
226 args.nret = cpu_to_be32(nret);
228 va_start(list, nret);
229 for (i = 0; i < nargs; i++)
230 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
233 for (i = 0; i < nret; i++)
234 args.args[nargs+i] = 0;
236 if (enter_prom(&args, prom_entry) < 0)
239 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
242 static int __init call_prom_ret(const char *service, int nargs, int nret,
243 prom_arg_t *rets, ...)
246 struct prom_args args;
249 args.service = cpu_to_be32(ADDR(service));
250 args.nargs = cpu_to_be32(nargs);
251 args.nret = cpu_to_be32(nret);
253 va_start(list, rets);
254 for (i = 0; i < nargs; i++)
255 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
258 for (i = 0; i < nret; i++)
259 args.args[nargs+i] = 0;
261 if (enter_prom(&args, prom_entry) < 0)
265 for (i = 1; i < nret; ++i)
266 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
268 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
272 static void __init prom_print(const char *msg)
276 if (prom.stdout == 0)
279 for (p = msg; *p != 0; p = q) {
280 for (q = p; *q != 0 && *q != '\n'; ++q)
283 call_prom("write", 3, 1, prom.stdout, p, q - p);
287 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
292 static void __init prom_print_hex(unsigned long val)
294 int i, nibbles = sizeof(val)*2;
295 char buf[sizeof(val)*2+1];
297 for (i = nibbles-1; i >= 0; i--) {
298 buf[i] = (val & 0xf) + '0';
300 buf[i] += ('a'-'0'-10);
304 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
307 /* max number of decimal digits in an unsigned long */
309 static void __init prom_print_dec(unsigned long val)
312 char buf[UL_DIGITS+1];
314 for (i = UL_DIGITS-1; i >= 0; i--) {
315 buf[i] = (val % 10) + '0';
320 /* shift stuff down */
321 size = UL_DIGITS - i;
322 call_prom("write", 3, 1, prom.stdout, buf+i, size);
325 static void __init prom_printf(const char *format, ...)
327 const char *p, *q, *s;
332 va_start(args, format);
333 for (p = format; *p != 0; p = q) {
334 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
337 call_prom("write", 3, 1, prom.stdout, p, q - p);
342 call_prom("write", 3, 1, prom.stdout,
352 s = va_arg(args, const char *);
357 v = va_arg(args, unsigned long);
362 vs = va_arg(args, int);
373 else if (*q == 'x') {
375 v = va_arg(args, unsigned long);
377 } else if (*q == 'u') { /* '%lu' */
379 v = va_arg(args, unsigned long);
381 } else if (*q == 'd') { /* %ld */
383 vs = va_arg(args, long);
396 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
400 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
402 * Old OF requires we claim physical and virtual separately
403 * and then map explicitly (assuming virtual mode)
408 ret = call_prom_ret("call-method", 5, 2, &result,
409 ADDR("claim"), prom.memory,
411 if (ret != 0 || result == -1)
413 ret = call_prom_ret("call-method", 5, 2, &result,
414 ADDR("claim"), prom.mmumap,
417 call_prom("call-method", 4, 1, ADDR("release"),
418 prom.memory, size, virt);
421 /* the 0x12 is M (coherence) + PP == read/write */
422 call_prom("call-method", 6, 1,
423 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
426 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
430 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
433 /* Do not call exit because it clears the screen on pmac
434 * it also causes some sort of double-fault on early pmacs */
435 if (of_platform == PLATFORM_POWERMAC)
438 /* ToDo: should put up an SRC here on pSeries */
439 call_prom("exit", 0, 0);
441 for (;;) /* should never get here */
446 static int __init prom_next_node(phandle *nodep)
450 if ((node = *nodep) != 0
451 && (*nodep = call_prom("child", 1, 1, node)) != 0)
453 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
456 if ((node = call_prom("parent", 1, 1, node)) == 0)
458 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
463 static int inline prom_getprop(phandle node, const char *pname,
464 void *value, size_t valuelen)
466 return call_prom("getprop", 4, 1, node, ADDR(pname),
467 (u32)(unsigned long) value, (u32) valuelen);
470 static int inline prom_getproplen(phandle node, const char *pname)
472 return call_prom("getproplen", 2, 1, node, ADDR(pname));
475 static void add_string(char **str, const char *q)
485 static char *tohex(unsigned int x)
487 static char digits[] = "0123456789abcdef";
488 static char result[9];
495 result[i] = digits[x & 0xf];
497 } while (x != 0 && i > 0);
501 static int __init prom_setprop(phandle node, const char *nodename,
502 const char *pname, void *value, size_t valuelen)
506 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
507 return call_prom("setprop", 4, 1, node, ADDR(pname),
508 (u32)(unsigned long) value, (u32) valuelen);
510 /* gah... setprop doesn't work on longtrail, have to use interpret */
512 add_string(&p, "dev");
513 add_string(&p, nodename);
514 add_string(&p, tohex((u32)(unsigned long) value));
515 add_string(&p, tohex(valuelen));
516 add_string(&p, tohex(ADDR(pname)));
517 add_string(&p, tohex(strlen(pname)));
518 add_string(&p, "property");
520 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
523 /* We can't use the standard versions because of relocation headaches. */
524 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
525 || ('a' <= (c) && (c) <= 'f') \
526 || ('A' <= (c) && (c) <= 'F'))
528 #define isdigit(c) ('0' <= (c) && (c) <= '9')
529 #define islower(c) ('a' <= (c) && (c) <= 'z')
530 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
532 static unsigned long prom_strtoul(const char *cp, const char **endp)
534 unsigned long result = 0, base = 10, value;
539 if (toupper(*cp) == 'X') {
545 while (isxdigit(*cp) &&
546 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
547 result = result * base + value;
557 static unsigned long prom_memparse(const char *ptr, const char **retptr)
559 unsigned long ret = prom_strtoul(ptr, retptr);
563 * We can't use a switch here because GCC *may* generate a
564 * jump table which won't work, because we're not running at
565 * the address we're linked at.
567 if ('G' == **retptr || 'g' == **retptr)
570 if ('M' == **retptr || 'm' == **retptr)
573 if ('K' == **retptr || 'k' == **retptr)
585 * Early parsing of the command line passed to the kernel, used for
586 * "mem=x" and the options that affect the iommu
588 static void __init early_cmdline_parse(void)
595 prom_cmd_line[0] = 0;
597 if ((long)prom.chosen > 0)
598 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
599 #ifdef CONFIG_CMDLINE
600 if (l <= 0 || p[0] == '\0') /* dbl check */
601 strlcpy(prom_cmd_line,
602 CONFIG_CMDLINE, sizeof(prom_cmd_line));
603 #endif /* CONFIG_CMDLINE */
604 prom_printf("command line: %s\n", prom_cmd_line);
607 opt = strstr(prom_cmd_line, "iommu=");
609 prom_printf("iommu opt is: %s\n", opt);
611 while (*opt && *opt == ' ')
613 if (!strncmp(opt, "off", 3))
615 else if (!strncmp(opt, "force", 5))
616 prom_iommu_force_on = 1;
619 opt = strstr(prom_cmd_line, "mem=");
622 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
624 /* Align to 16 MB == size of ppc64 large page */
625 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
630 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
632 * The architecture vector has an array of PVR mask/value pairs,
633 * followed by # option vectors - 1, followed by the option vectors.
635 * See prom.h for the definition of the bits specified in the
636 * architecture vector.
638 * Because the description vector contains a mix of byte and word
639 * values, we declare it as an unsigned char array, and use this
640 * macro to put word values in.
642 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
643 ((x) >> 8) & 0xff, (x) & 0xff
645 unsigned char ibm_architecture_vec[] = {
646 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
647 W(0xffff0000), W(0x003e0000), /* POWER6 */
648 W(0xffff0000), W(0x003f0000), /* POWER7 */
649 W(0xffff0000), W(0x004b0000), /* POWER8E */
650 W(0xffff0000), W(0x004d0000), /* POWER8 */
651 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
652 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
653 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
654 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
655 6 - 1, /* 6 option vectors */
657 /* option vector 1: processor architectures supported */
659 0, /* don't ignore, don't halt */
660 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
661 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
663 /* option vector 2: Open Firmware options supported */
667 W(0xffffffff), /* real_base */
668 W(0xffffffff), /* real_size */
669 W(0xffffffff), /* virt_base */
670 W(0xffffffff), /* virt_size */
671 W(0xffffffff), /* load_base */
672 W(256), /* 256MB min RMA */
673 W(0xffffffff), /* full client load */
674 0, /* min RMA percentage of total RAM */
675 48, /* max log_2(hash table size) */
677 /* option vector 3: processor options supported */
679 0, /* don't ignore, don't halt */
680 OV3_FP | OV3_VMX | OV3_DFP,
682 /* option vector 4: IBM PAPR implementation */
685 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
687 /* option vector 5: PAPR/OF options */
689 0, /* don't ignore, don't halt */
690 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
691 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
692 #ifdef CONFIG_PCI_MSI
693 /* PCIe/MSI support. Without MSI full PCIe is not supported */
699 #ifdef CONFIG_PPC_SMLPAR
700 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
704 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
708 /* WARNING: The offset of the "number of cores" field below
709 * must match by the macro below. Update the definition if
710 * the structure layout changes.
712 #define IBM_ARCH_VEC_NRCORES_OFFSET 125
713 W(NR_CPUS), /* number of cores supported */
718 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
719 OV5_FEAT(OV5_PFO_HW_842),
720 OV5_FEAT(OV5_SUB_PROCESSORS),
721 /* option vector 6: IBM PAPR hints */
729 /* Old method - ELF header with PT_NOTE sections only works on BE */
730 #ifdef __BIG_ENDIAN__
731 static struct fake_elf {
738 char name[8]; /* "PowerPC" */
752 char name[24]; /* "IBM,RPA-Client-Config" */
766 .e_ident = { 0x7f, 'E', 'L', 'F',
767 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
768 .e_type = ET_EXEC, /* yeah right */
770 .e_version = EV_CURRENT,
771 .e_phoff = offsetof(struct fake_elf, phdr),
772 .e_phentsize = sizeof(Elf32_Phdr),
778 .p_offset = offsetof(struct fake_elf, chrpnote),
779 .p_filesz = sizeof(struct chrpnote)
782 .p_offset = offsetof(struct fake_elf, rpanote),
783 .p_filesz = sizeof(struct rpanote)
787 .namesz = sizeof("PowerPC"),
788 .descsz = sizeof(struct chrpdesc),
792 .real_mode = ~0U, /* ~0 means "don't care" */
801 .namesz = sizeof("IBM,RPA-Client-Config"),
802 .descsz = sizeof(struct rpadesc),
804 .name = "IBM,RPA-Client-Config",
807 .min_rmo_size = 64, /* in megabytes */
808 .min_rmo_percent = 0,
809 .max_pft_size = 48, /* 2^48 bytes max PFT size */
816 #endif /* __BIG_ENDIAN__ */
818 static int __init prom_count_smt_threads(void)
824 /* Pick up th first CPU node we can find */
825 for (node = 0; prom_next_node(&node); ) {
827 prom_getprop(node, "device_type", type, sizeof(type));
829 if (strcmp(type, "cpu"))
832 * There is an entry for each smt thread, each entry being
833 * 4 bytes long. All cpus should have the same number of
834 * smt threads, so return after finding the first.
836 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
837 if (plen == PROM_ERROR)
840 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
843 if (plen < 1 || plen > 64) {
844 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
845 (unsigned long)plen);
850 prom_debug("No threads found, assuming 1 per core\n");
857 static void __init prom_send_capabilities(void)
863 root = call_prom("open", 1, 1, ADDR("/"));
865 /* We need to tell the FW about the number of cores we support.
867 * To do that, we count the number of threads on the first core
868 * (we assume this is the same for all cores) and use it to
871 cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
872 if (be32_to_cpup(cores) != NR_CPUS) {
873 prom_printf("WARNING ! "
874 "ibm_architecture_vec structure inconsistent: %lu!\n",
875 be32_to_cpup(cores));
877 *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
878 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
879 be32_to_cpup(cores), NR_CPUS);
882 /* try calling the ibm,client-architecture-support method */
883 prom_printf("Calling ibm,client-architecture-support...");
884 if (call_prom_ret("call-method", 3, 2, &ret,
885 ADDR("ibm,client-architecture-support"),
887 ADDR(ibm_architecture_vec)) == 0) {
888 /* the call exists... */
890 prom_printf("\nWARNING: ibm,client-architecture"
891 "-support call FAILED!\n");
892 call_prom("close", 1, 0, root);
893 prom_printf(" done\n");
896 call_prom("close", 1, 0, root);
897 prom_printf(" not implemented\n");
900 #ifdef __BIG_ENDIAN__
904 /* no ibm,client-architecture-support call, try the old way */
905 elfloader = call_prom("open", 1, 1,
906 ADDR("/packages/elf-loader"));
907 if (elfloader == 0) {
908 prom_printf("couldn't open /packages/elf-loader\n");
911 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
912 elfloader, ADDR(&fake_elf));
913 call_prom("close", 1, 0, elfloader);
915 #endif /* __BIG_ENDIAN__ */
917 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
920 * Memory allocation strategy... our layout is normally:
922 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
923 * rare cases, initrd might end up being before the kernel though.
924 * We assume this won't override the final kernel at 0, we have no
925 * provision to handle that in this version, but it should hopefully
928 * alloc_top is set to the top of RMO, eventually shrink down if the
931 * alloc_bottom is set to the top of kernel/initrd
933 * from there, allocations are done this way : rtas is allocated
934 * topmost, and the device-tree is allocated from the bottom. We try
935 * to grow the device-tree allocation as we progress. If we can't,
936 * then we fail, we don't currently have a facility to restart
937 * elsewhere, but that shouldn't be necessary.
939 * Note that calls to reserve_mem have to be done explicitly, memory
940 * allocated with either alloc_up or alloc_down isn't automatically
946 * Allocates memory in the RMO upward from the kernel/initrd
948 * When align is 0, this is a special case, it means to allocate in place
949 * at the current location of alloc_bottom or fail (that is basically
950 * extending the previous allocation). Used for the device-tree flattening
952 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
954 unsigned long base = alloc_bottom;
955 unsigned long addr = 0;
958 base = _ALIGN_UP(base, align);
959 prom_debug("alloc_up(%x, %x)\n", size, align);
961 prom_panic("alloc_up() called with mem not initialized\n");
964 base = _ALIGN_UP(alloc_bottom, align);
968 for(; (base + size) <= alloc_top;
969 base = _ALIGN_UP(base + 0x100000, align)) {
970 prom_debug(" trying: 0x%x\n\r", base);
971 addr = (unsigned long)prom_claim(base, size, 0);
972 if (addr != PROM_ERROR && addr != 0)
980 alloc_bottom = addr + size;
982 prom_debug(" -> %x\n", addr);
983 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
984 prom_debug(" alloc_top : %x\n", alloc_top);
985 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
986 prom_debug(" rmo_top : %x\n", rmo_top);
987 prom_debug(" ram_top : %x\n", ram_top);
993 * Allocates memory downward, either from top of RMO, or if highmem
994 * is set, from the top of RAM. Note that this one doesn't handle
995 * failures. It does claim memory if highmem is not set.
997 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1000 unsigned long base, addr = 0;
1002 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1003 highmem ? "(high)" : "(low)");
1005 prom_panic("alloc_down() called with mem not initialized\n");
1008 /* Carve out storage for the TCE table. */
1009 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1010 if (addr <= alloc_bottom)
1012 /* Will we bump into the RMO ? If yes, check out that we
1013 * didn't overlap existing allocations there, if we did,
1014 * we are dead, we must be the first in town !
1016 if (addr < rmo_top) {
1017 /* Good, we are first */
1018 if (alloc_top == rmo_top)
1019 alloc_top = rmo_top = addr;
1023 alloc_top_high = addr;
1027 base = _ALIGN_DOWN(alloc_top - size, align);
1028 for (; base > alloc_bottom;
1029 base = _ALIGN_DOWN(base - 0x100000, align)) {
1030 prom_debug(" trying: 0x%x\n\r", base);
1031 addr = (unsigned long)prom_claim(base, size, 0);
1032 if (addr != PROM_ERROR && addr != 0)
1041 prom_debug(" -> %x\n", addr);
1042 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1043 prom_debug(" alloc_top : %x\n", alloc_top);
1044 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1045 prom_debug(" rmo_top : %x\n", rmo_top);
1046 prom_debug(" ram_top : %x\n", ram_top);
1052 * Parse a "reg" cell
1054 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1057 unsigned long r = 0;
1059 /* Ignore more than 2 cells */
1060 while (s > sizeof(unsigned long) / 4) {
1064 r = be32_to_cpu(*p++);
1068 r |= be32_to_cpu(*(p++));
1076 * Very dumb function for adding to the memory reserve list, but
1077 * we don't need anything smarter at this point
1079 * XXX Eventually check for collisions. They should NEVER happen.
1080 * If problems seem to show up, it would be a good start to track
1083 static void __init reserve_mem(u64 base, u64 size)
1085 u64 top = base + size;
1086 unsigned long cnt = mem_reserve_cnt;
1091 /* We need to always keep one empty entry so that we
1092 * have our terminator with "size" set to 0 since we are
1093 * dumb and just copy this entire array to the boot params
1095 base = _ALIGN_DOWN(base, PAGE_SIZE);
1096 top = _ALIGN_UP(top, PAGE_SIZE);
1099 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1100 prom_panic("Memory reserve map exhausted !\n");
1101 mem_reserve_map[cnt].base = cpu_to_be64(base);
1102 mem_reserve_map[cnt].size = cpu_to_be64(size);
1103 mem_reserve_cnt = cnt + 1;
1107 * Initialize memory allocation mechanism, parse "memory" nodes and
1108 * obtain that way the top of memory and RMO to setup out local allocator
1110 static void __init prom_init_mem(void)
1113 char *path, type[64];
1120 * We iterate the memory nodes to find
1121 * 1) top of RMO (first node)
1124 val = cpu_to_be32(2);
1125 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1126 rac = be32_to_cpu(val);
1127 val = cpu_to_be32(1);
1128 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1129 rsc = be32_to_cpu(val);
1130 prom_debug("root_addr_cells: %x\n", rac);
1131 prom_debug("root_size_cells: %x\n", rsc);
1133 prom_debug("scanning memory:\n");
1134 path = prom_scratch;
1136 for (node = 0; prom_next_node(&node); ) {
1138 prom_getprop(node, "device_type", type, sizeof(type));
1142 * CHRP Longtrail machines have no device_type
1143 * on the memory node, so check the name instead...
1145 prom_getprop(node, "name", type, sizeof(type));
1147 if (strcmp(type, "memory"))
1150 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1151 if (plen > sizeof(regbuf)) {
1152 prom_printf("memory node too large for buffer !\n");
1153 plen = sizeof(regbuf);
1156 endp = p + (plen / sizeof(cell_t));
1159 memset(path, 0, PROM_SCRATCH_SIZE);
1160 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1161 prom_debug(" node %s :\n", path);
1162 #endif /* DEBUG_PROM */
1164 while ((endp - p) >= (rac + rsc)) {
1165 unsigned long base, size;
1167 base = prom_next_cell(rac, &p);
1168 size = prom_next_cell(rsc, &p);
1172 prom_debug(" %x %x\n", base, size);
1173 if (base == 0 && (of_platform & PLATFORM_LPAR))
1175 if ((base + size) > ram_top)
1176 ram_top = base + size;
1180 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1183 * If prom_memory_limit is set we reduce the upper limits *except* for
1184 * alloc_top_high. This must be the real top of RAM so we can put
1188 alloc_top_high = ram_top;
1190 if (prom_memory_limit) {
1191 if (prom_memory_limit <= alloc_bottom) {
1192 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1194 prom_memory_limit = 0;
1195 } else if (prom_memory_limit >= ram_top) {
1196 prom_printf("Ignoring mem=%x >= ram_top.\n",
1198 prom_memory_limit = 0;
1200 ram_top = prom_memory_limit;
1201 rmo_top = min(rmo_top, prom_memory_limit);
1206 * Setup our top alloc point, that is top of RMO or top of
1207 * segment 0 when running non-LPAR.
1208 * Some RS64 machines have buggy firmware where claims up at
1209 * 1GB fail. Cap at 768MB as a workaround.
1210 * Since 768MB is plenty of room, and we need to cap to something
1211 * reasonable on 32-bit, cap at 768MB on all machines.
1215 rmo_top = min(0x30000000ul, rmo_top);
1216 alloc_top = rmo_top;
1217 alloc_top_high = ram_top;
1220 * Check if we have an initrd after the kernel but still inside
1221 * the RMO. If we do move our bottom point to after it.
1223 if (prom_initrd_start &&
1224 prom_initrd_start < rmo_top &&
1225 prom_initrd_end > alloc_bottom)
1226 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1228 prom_printf("memory layout at init:\n");
1229 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1230 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1231 prom_printf(" alloc_top : %x\n", alloc_top);
1232 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1233 prom_printf(" rmo_top : %x\n", rmo_top);
1234 prom_printf(" ram_top : %x\n", ram_top);
1237 static void __init prom_close_stdin(void)
1242 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1243 stdin = be32_to_cpu(val);
1244 call_prom("close", 1, 0, stdin);
1248 #ifdef CONFIG_PPC_POWERNV
1250 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1251 static u64 __initdata prom_opal_base;
1252 static u64 __initdata prom_opal_entry;
1255 #ifdef __BIG_ENDIAN__
1256 /* XXX Don't change this structure without updating opal-takeover.S */
1257 static struct opal_secondary_data {
1260 struct opal_takeover_args args; /* 16 */
1261 } opal_secondary_data;
1263 static u64 __initdata prom_opal_align;
1264 static u64 __initdata prom_opal_size;
1265 static int __initdata prom_rtas_start_cpu;
1266 static u64 __initdata prom_rtas_data;
1267 static u64 __initdata prom_rtas_entry;
1269 extern char opal_secondary_entry;
1271 static void __init prom_query_opal(void)
1275 /* We must not query for OPAL presence on a machine that
1276 * supports TNK takeover (970 blades), as this uses the same
1277 * h-call with different arguments and will crash
1279 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1280 ADDR("/tnk-memory-map")))) {
1281 prom_printf("TNK takeover detected, skipping OPAL check\n");
1285 prom_printf("Querying for OPAL presence... ");
1287 rc = opal_query_takeover(&prom_opal_size,
1289 prom_debug("(rc = %ld) ", rc);
1291 prom_printf("not there.\n");
1294 of_platform = PLATFORM_OPAL;
1295 prom_printf(" there !\n");
1296 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1297 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1298 if (prom_opal_align < 0x10000)
1299 prom_opal_align = 0x10000;
1302 static int __init prom_rtas_call(int token, int nargs, int nret,
1305 struct rtas_args rtas_args;
1309 rtas_args.token = token;
1310 rtas_args.nargs = nargs;
1311 rtas_args.nret = nret;
1312 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1313 va_start(list, outputs);
1314 for (i = 0; i < nargs; ++i)
1315 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1318 for (i = 0; i < nret; ++i)
1319 rtas_args.rets[i] = 0;
1321 opal_enter_rtas(&rtas_args, prom_rtas_data,
1324 if (nret > 1 && outputs != NULL)
1325 for (i = 0; i < nret-1; ++i)
1326 outputs[i] = rtas_args.rets[i+1];
1327 return (nret > 0)? rtas_args.rets[0]: 0;
1330 static void __init prom_opal_hold_cpus(void)
1332 int i, cnt, cpu, rc;
1337 void *entry = (unsigned long *)&opal_secondary_entry;
1338 struct opal_secondary_data *data = &opal_secondary_data;
1340 prom_debug("prom_opal_hold_cpus: start...\n");
1341 prom_debug(" - entry = 0x%x\n", entry);
1342 prom_debug(" - data = 0x%x\n", data);
1348 for (node = 0; prom_next_node(&node); ) {
1350 prom_getprop(node, "device_type", type, sizeof(type));
1351 if (strcmp(type, "cpu") != 0)
1354 /* Skip non-configured cpus. */
1355 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1356 if (strcmp(type, "okay") != 0)
1359 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1361 if (cnt == PROM_ERROR)
1364 for (i = 0; i < cnt; i++) {
1366 prom_debug("CPU %d ... ", cpu);
1367 if (cpu == prom.cpu) {
1368 prom_debug("booted !\n");
1371 prom_debug("starting ... ");
1373 /* Init the acknowledge var which will be reset by
1374 * the secondary cpu when it awakens from its OF
1378 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1379 NULL, cpu, entry, data);
1380 prom_debug("rtas rc=%d ...", rc);
1382 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1387 if (data->ack != -1)
1388 prom_debug("done, PIR=0x%x\n", data->ack);
1390 prom_debug("timeout !\n");
1393 prom_debug("prom_opal_hold_cpus: end...\n");
1396 static void __init prom_opal_takeover(void)
1398 struct opal_secondary_data *data = &opal_secondary_data;
1399 struct opal_takeover_args *args = &data->args;
1400 u64 align = prom_opal_align;
1401 u64 top_addr, opal_addr;
1403 args->k_image = (u64)_stext;
1404 args->k_size = _end - _stext;
1406 args->k_entry2 = 0x60;
1408 top_addr = _ALIGN_UP(args->k_size, align);
1410 if (prom_initrd_start != 0) {
1411 args->rd_image = prom_initrd_start;
1412 args->rd_size = prom_initrd_end - args->rd_image;
1413 args->rd_loc = top_addr;
1414 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1417 /* Pickup an address for the HAL. We want to go really high
1418 * up to avoid problem with future kexecs. On the other hand
1419 * we don't want to be all over the TCEs on P5IOC2 machines
1420 * which are going to be up there too. We assume the machine
1421 * has plenty of memory, and we ask for the HAL for now to
1422 * be just below the 1G point, or above the initrd
1424 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1425 if (opal_addr < top_addr)
1426 opal_addr = top_addr;
1427 args->hal_addr = opal_addr;
1429 /* Copy the command line to the kernel image */
1430 strlcpy(boot_command_line, prom_cmd_line,
1433 prom_debug(" k_image = 0x%lx\n", args->k_image);
1434 prom_debug(" k_size = 0x%lx\n", args->k_size);
1435 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1436 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1437 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1438 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1439 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1440 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1441 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1446 opal_do_takeover(args);
1448 #endif /* __BIG_ENDIAN__ */
1451 * Allocate room for and instantiate OPAL
1453 static void __init prom_instantiate_opal(void)
1458 u64 size = 0, align = 0x10000;
1462 prom_debug("prom_instantiate_opal: start...\n");
1464 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1465 prom_debug("opal_node: %x\n", opal_node);
1466 if (!PHANDLE_VALID(opal_node))
1470 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1471 size = be64_to_cpu(val64);
1475 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1476 align = be64_to_cpu(val64);
1478 base = alloc_down(size, align, 0);
1480 prom_printf("OPAL allocation failed !\n");
1484 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1485 if (!IHANDLE_VALID(opal_inst)) {
1486 prom_printf("opening opal package failed (%x)\n", opal_inst);
1490 prom_printf("instantiating opal at 0x%x...", base);
1492 if (call_prom_ret("call-method", 4, 3, rets,
1493 ADDR("load-opal-runtime"),
1495 base >> 32, base & 0xffffffff) != 0
1496 || (rets[0] == 0 && rets[1] == 0)) {
1497 prom_printf(" failed\n");
1500 entry = (((u64)rets[0]) << 32) | rets[1];
1502 prom_printf(" done\n");
1504 reserve_mem(base, size);
1506 prom_debug("opal base = 0x%x\n", base);
1507 prom_debug("opal align = 0x%x\n", align);
1508 prom_debug("opal entry = 0x%x\n", entry);
1509 prom_debug("opal size = 0x%x\n", (long)size);
1511 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1512 &base, sizeof(base));
1513 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1514 &entry, sizeof(entry));
1516 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1517 prom_opal_base = base;
1518 prom_opal_entry = entry;
1520 prom_debug("prom_instantiate_opal: end...\n");
1523 #endif /* CONFIG_PPC_POWERNV */
1526 * Allocate room for and instantiate RTAS
1528 static void __init prom_instantiate_rtas(void)
1532 u32 base, entry = 0;
1536 prom_debug("prom_instantiate_rtas: start...\n");
1538 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1539 prom_debug("rtas_node: %x\n", rtas_node);
1540 if (!PHANDLE_VALID(rtas_node))
1544 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1545 size = be32_to_cpu(val);
1549 base = alloc_down(size, PAGE_SIZE, 0);
1551 prom_panic("Could not allocate memory for RTAS\n");
1553 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1554 if (!IHANDLE_VALID(rtas_inst)) {
1555 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1559 prom_printf("instantiating rtas at 0x%x...", base);
1561 if (call_prom_ret("call-method", 3, 2, &entry,
1562 ADDR("instantiate-rtas"),
1563 rtas_inst, base) != 0
1565 prom_printf(" failed\n");
1568 prom_printf(" done\n");
1570 reserve_mem(base, size);
1572 val = cpu_to_be32(base);
1573 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1575 val = cpu_to_be32(entry);
1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1584 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1585 /* PowerVN takeover hack */
1586 prom_rtas_data = base;
1587 prom_rtas_entry = entry;
1588 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1590 prom_debug("rtas base = 0x%x\n", base);
1591 prom_debug("rtas entry = 0x%x\n", entry);
1592 prom_debug("rtas size = 0x%x\n", (long)size);
1594 prom_debug("prom_instantiate_rtas: end...\n");
1599 * Allocate room for and instantiate Stored Measurement Log (SML)
1601 static void __init prom_instantiate_sml(void)
1603 phandle ibmvtpm_node;
1604 ihandle ibmvtpm_inst;
1605 u32 entry = 0, size = 0;
1608 prom_debug("prom_instantiate_sml: start...\n");
1610 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1611 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1612 if (!PHANDLE_VALID(ibmvtpm_node))
1615 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1616 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1617 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1621 if (call_prom_ret("call-method", 2, 2, &size,
1622 ADDR("sml-get-handover-size"),
1623 ibmvtpm_inst) != 0 || size == 0) {
1624 prom_printf("SML get handover size failed\n");
1628 base = alloc_down(size, PAGE_SIZE, 0);
1630 prom_panic("Could not allocate memory for sml\n");
1632 prom_printf("instantiating sml at 0x%x...", base);
1634 if (call_prom_ret("call-method", 4, 2, &entry,
1635 ADDR("sml-handover"),
1636 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1637 prom_printf("SML handover failed\n");
1640 prom_printf(" done\n");
1642 reserve_mem(base, size);
1644 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1645 &base, sizeof(base));
1646 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1647 &size, sizeof(size));
1649 prom_debug("sml base = 0x%x\n", base);
1650 prom_debug("sml size = 0x%x\n", (long)size);
1652 prom_debug("prom_instantiate_sml: end...\n");
1656 * Allocate room for and initialize TCE tables
1658 #ifdef __BIG_ENDIAN__
1659 static void __init prom_initialize_tce_table(void)
1663 char compatible[64], type[64], model[64];
1664 char *path = prom_scratch;
1666 u32 minalign, minsize;
1667 u64 tce_entry, *tce_entryp;
1668 u64 local_alloc_top, local_alloc_bottom;
1674 prom_debug("starting prom_initialize_tce_table\n");
1676 /* Cache current top of allocs so we reserve a single block */
1677 local_alloc_top = alloc_top_high;
1678 local_alloc_bottom = local_alloc_top;
1680 /* Search all nodes looking for PHBs. */
1681 for (node = 0; prom_next_node(&node); ) {
1685 prom_getprop(node, "compatible",
1686 compatible, sizeof(compatible));
1687 prom_getprop(node, "device_type", type, sizeof(type));
1688 prom_getprop(node, "model", model, sizeof(model));
1690 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1693 /* Keep the old logic intact to avoid regression. */
1694 if (compatible[0] != 0) {
1695 if ((strstr(compatible, "python") == NULL) &&
1696 (strstr(compatible, "Speedwagon") == NULL) &&
1697 (strstr(compatible, "Winnipeg") == NULL))
1699 } else if (model[0] != 0) {
1700 if ((strstr(model, "ython") == NULL) &&
1701 (strstr(model, "peedwagon") == NULL) &&
1702 (strstr(model, "innipeg") == NULL))
1706 if (prom_getprop(node, "tce-table-minalign", &minalign,
1707 sizeof(minalign)) == PROM_ERROR)
1709 if (prom_getprop(node, "tce-table-minsize", &minsize,
1710 sizeof(minsize)) == PROM_ERROR)
1711 minsize = 4UL << 20;
1714 * Even though we read what OF wants, we just set the table
1715 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1716 * By doing this, we avoid the pitfalls of trying to DMA to
1717 * MMIO space and the DMA alias hole.
1719 * On POWER4, firmware sets the TCE region by assuming
1720 * each TCE table is 8MB. Using this memory for anything
1721 * else will impact performance, so we always allocate 8MB.
1724 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1725 minsize = 8UL << 20;
1727 minsize = 4UL << 20;
1729 /* Align to the greater of the align or size */
1730 align = max(minalign, minsize);
1731 base = alloc_down(minsize, align, 1);
1733 prom_panic("ERROR, cannot find space for TCE table.\n");
1734 if (base < local_alloc_bottom)
1735 local_alloc_bottom = base;
1737 /* It seems OF doesn't null-terminate the path :-( */
1738 memset(path, 0, PROM_SCRATCH_SIZE);
1739 /* Call OF to setup the TCE hardware */
1740 if (call_prom("package-to-path", 3, 1, node,
1741 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1742 prom_printf("package-to-path failed\n");
1745 /* Save away the TCE table attributes for later use. */
1746 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1747 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1749 prom_debug("TCE table: %s\n", path);
1750 prom_debug("\tnode = 0x%x\n", node);
1751 prom_debug("\tbase = 0x%x\n", base);
1752 prom_debug("\tsize = 0x%x\n", minsize);
1754 /* Initialize the table to have a one-to-one mapping
1755 * over the allocated size.
1757 tce_entryp = (u64 *)base;
1758 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1759 tce_entry = (i << PAGE_SHIFT);
1761 *tce_entryp = tce_entry;
1764 prom_printf("opening PHB %s", path);
1765 phb_node = call_prom("open", 1, 1, path);
1767 prom_printf("... failed\n");
1769 prom_printf("... done\n");
1771 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1772 phb_node, -1, minsize,
1773 (u32) base, (u32) (base >> 32));
1774 call_prom("close", 1, 0, phb_node);
1777 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1779 /* These are only really needed if there is a memory limit in
1780 * effect, but we don't know so export them always. */
1781 prom_tce_alloc_start = local_alloc_bottom;
1782 prom_tce_alloc_end = local_alloc_top;
1784 /* Flag the first invalid entry */
1785 prom_debug("ending prom_initialize_tce_table\n");
1787 #endif /* __BIG_ENDIAN__ */
1788 #endif /* CONFIG_PPC64 */
1791 * With CHRP SMP we need to use the OF to start the other processors.
1792 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1793 * so we have to put the processors into a holding pattern controlled
1794 * by the kernel (not OF) before we destroy the OF.
1796 * This uses a chunk of low memory, puts some holding pattern
1797 * code there and sends the other processors off to there until
1798 * smp_boot_cpus tells them to do something. The holding pattern
1799 * checks that address until its cpu # is there, when it is that
1800 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1801 * of setting those values.
1803 * We also use physical address 0x4 here to tell when a cpu
1804 * is in its holding pattern code.
1809 * We want to reference the copy of __secondary_hold_* in the
1810 * 0 - 0x100 address range
1812 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1814 static void __init prom_hold_cpus(void)
1819 unsigned long *spinloop
1820 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1821 unsigned long *acknowledge
1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1837 prom_debug("prom_hold_cpus: start...\n");
1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1840 prom_debug(" 1) acknowledge = 0x%x\n",
1841 (unsigned long)acknowledge);
1842 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1843 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1845 /* Set the common spinloop variable, so all of the secondary cpus
1846 * will block when they are awakened from their OF spinloop.
1847 * This must occur for both SMP and non SMP kernels, since OF will
1848 * be trashed when we move the kernel.
1853 for (node = 0; prom_next_node(&node); ) {
1854 unsigned int cpu_no;
1858 prom_getprop(node, "device_type", type, sizeof(type));
1859 if (strcmp(type, "cpu") != 0)
1862 /* Skip non-configured cpus. */
1863 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1864 if (strcmp(type, "okay") != 0)
1867 reg = cpu_to_be32(-1); /* make sparse happy */
1868 prom_getprop(node, "reg", ®, sizeof(reg));
1869 cpu_no = be32_to_cpu(reg);
1871 prom_debug("cpu hw idx = %lu\n", cpu_no);
1873 /* Init the acknowledge var which will be reset by
1874 * the secondary cpu when it awakens from its OF
1877 *acknowledge = (unsigned long)-1;
1879 if (cpu_no != prom.cpu) {
1880 /* Primary Thread of non-boot cpu or any thread */
1881 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1882 call_prom("start-cpu", 3, 0, node,
1883 secondary_hold, cpu_no);
1885 for (i = 0; (i < 100000000) &&
1886 (*acknowledge == ((unsigned long)-1)); i++ )
1889 if (*acknowledge == cpu_no)
1890 prom_printf("done\n");
1892 prom_printf("failed: %x\n", *acknowledge);
1896 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1897 #endif /* CONFIG_SMP */
1900 prom_debug("prom_hold_cpus: end...\n");
1904 static void __init prom_init_client_services(unsigned long pp)
1906 /* Get a handle to the prom entry point before anything else */
1909 /* get a handle for the stdout device */
1910 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1911 if (!PHANDLE_VALID(prom.chosen))
1912 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1914 /* get device tree root */
1915 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1916 if (!PHANDLE_VALID(prom.root))
1917 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1924 * For really old powermacs, we need to map things we claim.
1925 * For that, we need the ihandle of the mmu.
1926 * Also, on the longtrail, we need to work around other bugs.
1928 static void __init prom_find_mmu(void)
1933 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1934 if (!PHANDLE_VALID(oprom))
1936 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1938 version[sizeof(version) - 1] = 0;
1939 /* XXX might need to add other versions here */
1940 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1941 of_workarounds = OF_WA_CLAIM;
1942 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1943 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1944 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1947 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1948 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1949 sizeof(prom.mmumap));
1950 prom.mmumap = be32_to_cpu(prom.mmumap);
1951 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1952 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1955 #define prom_find_mmu()
1958 static void __init prom_init_stdout(void)
1960 char *path = of_stdout_device;
1962 phandle stdout_node;
1965 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1966 prom_panic("cannot find stdout");
1968 prom.stdout = be32_to_cpu(val);
1970 /* Get the full OF pathname of the stdout device */
1971 memset(path, 0, 256);
1972 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1973 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1974 val = cpu_to_be32(stdout_node);
1975 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1977 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1978 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1979 path, strlen(path) + 1);
1981 /* If it's a display, note it */
1982 memset(type, 0, sizeof(type));
1983 prom_getprop(stdout_node, "device_type", type, sizeof(type));
1984 if (strcmp(type, "display") == 0)
1985 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1988 static int __init prom_find_machine_type(void)
1997 /* Look for a PowerMac or a Cell */
1998 len = prom_getprop(prom.root, "compatible",
1999 compat, sizeof(compat)-1);
2003 char *p = &compat[i];
2007 if (strstr(p, "Power Macintosh") ||
2008 strstr(p, "MacRISC"))
2009 return PLATFORM_POWERMAC;
2011 /* We must make sure we don't detect the IBM Cell
2012 * blades as pSeries due to some firmware issues,
2015 if (strstr(p, "IBM,CBEA") ||
2016 strstr(p, "IBM,CPBW-1.0"))
2017 return PLATFORM_GENERIC;
2018 #endif /* CONFIG_PPC64 */
2023 /* Try to detect OPAL */
2024 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2025 return PLATFORM_OPAL;
2027 /* Try to figure out if it's an IBM pSeries or any other
2028 * PAPR compliant platform. We assume it is if :
2029 * - /device_type is "chrp" (please, do NOT use that for future
2033 len = prom_getprop(prom.root, "device_type",
2034 compat, sizeof(compat)-1);
2036 return PLATFORM_GENERIC;
2037 if (strcmp(compat, "chrp"))
2038 return PLATFORM_GENERIC;
2040 /* Default to pSeries. We need to know if we are running LPAR */
2041 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2042 if (!PHANDLE_VALID(rtas))
2043 return PLATFORM_GENERIC;
2044 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2045 if (x != PROM_ERROR) {
2046 prom_debug("Hypertas detected, assuming LPAR !\n");
2047 return PLATFORM_PSERIES_LPAR;
2049 return PLATFORM_PSERIES;
2051 return PLATFORM_GENERIC;
2055 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2057 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2061 * If we have a display that we don't know how to drive,
2062 * we will want to try to execute OF's open method for it
2063 * later. However, OF will probably fall over if we do that
2064 * we've taken over the MMU.
2065 * So we check whether we will need to open the display,
2066 * and if so, open it now.
2068 static void __init prom_check_displays(void)
2070 char type[16], *path;
2075 static unsigned char default_colors[] = {
2093 const unsigned char *clut;
2095 prom_debug("Looking for displays\n");
2096 for (node = 0; prom_next_node(&node); ) {
2097 memset(type, 0, sizeof(type));
2098 prom_getprop(node, "device_type", type, sizeof(type));
2099 if (strcmp(type, "display") != 0)
2102 /* It seems OF doesn't null-terminate the path :-( */
2103 path = prom_scratch;
2104 memset(path, 0, PROM_SCRATCH_SIZE);
2107 * leave some room at the end of the path for appending extra
2110 if (call_prom("package-to-path", 3, 1, node, path,
2111 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2113 prom_printf("found display : %s, opening... ", path);
2115 ih = call_prom("open", 1, 1, path);
2117 prom_printf("failed\n");
2122 prom_printf("done\n");
2123 prom_setprop(node, path, "linux,opened", NULL, 0);
2125 /* Setup a usable color table when the appropriate
2126 * method is available. Should update this to set-colors */
2127 clut = default_colors;
2128 for (i = 0; i < 16; i++, clut += 3)
2129 if (prom_set_color(ih, i, clut[0], clut[1],
2133 #ifdef CONFIG_LOGO_LINUX_CLUT224
2134 clut = PTRRELOC(logo_linux_clut224.clut);
2135 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2136 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2139 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2141 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2142 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2144 u32 width, height, pitch, addr;
2146 prom_printf("Setting btext !\n");
2147 prom_getprop(node, "width", &width, 4);
2148 prom_getprop(node, "height", &height, 4);
2149 prom_getprop(node, "linebytes", &pitch, 4);
2150 prom_getprop(node, "address", &addr, 4);
2151 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2152 width, height, pitch, addr);
2153 btext_setup_display(width, height, 8, pitch, addr);
2155 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2160 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2161 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2162 unsigned long needed, unsigned long align)
2166 *mem_start = _ALIGN(*mem_start, align);
2167 while ((*mem_start + needed) > *mem_end) {
2168 unsigned long room, chunk;
2170 prom_debug("Chunk exhausted, claiming more at %x...\n",
2172 room = alloc_top - alloc_bottom;
2173 if (room > DEVTREE_CHUNK_SIZE)
2174 room = DEVTREE_CHUNK_SIZE;
2175 if (room < PAGE_SIZE)
2176 prom_panic("No memory for flatten_device_tree "
2178 chunk = alloc_up(room, 0);
2180 prom_panic("No memory for flatten_device_tree "
2181 "(claim failed)\n");
2182 *mem_end = chunk + room;
2185 ret = (void *)*mem_start;
2186 *mem_start += needed;
2191 #define dt_push_token(token, mem_start, mem_end) do { \
2192 void *room = make_room(mem_start, mem_end, 4, 4); \
2193 *(__be32 *)room = cpu_to_be32(token); \
2196 static unsigned long __init dt_find_string(char *str)
2200 s = os = (char *)dt_string_start;
2202 while (s < (char *)dt_string_end) {
2203 if (strcmp(s, str) == 0)
2211 * The Open Firmware 1275 specification states properties must be 31 bytes or
2212 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2214 #define MAX_PROPERTY_NAME 64
2216 static void __init scan_dt_build_strings(phandle node,
2217 unsigned long *mem_start,
2218 unsigned long *mem_end)
2220 char *prev_name, *namep, *sstart;
2224 sstart = (char *)dt_string_start;
2226 /* get and store all property names */
2229 /* 64 is max len of name including nul. */
2230 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2231 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2232 /* No more nodes: unwind alloc */
2233 *mem_start = (unsigned long)namep;
2238 if (strcmp(namep, "name") == 0) {
2239 *mem_start = (unsigned long)namep;
2243 /* get/create string entry */
2244 soff = dt_find_string(namep);
2246 *mem_start = (unsigned long)namep;
2247 namep = sstart + soff;
2249 /* Trim off some if we can */
2250 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2251 dt_string_end = *mem_start;
2256 /* do all our children */
2257 child = call_prom("child", 1, 1, node);
2258 while (child != 0) {
2259 scan_dt_build_strings(child, mem_start, mem_end);
2260 child = call_prom("peer", 1, 1, child);
2264 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2265 unsigned long *mem_end)
2268 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2270 unsigned char *valp;
2271 static char pname[MAX_PROPERTY_NAME];
2272 int l, room, has_phandle = 0;
2274 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2276 /* get the node's full name */
2277 namep = (char *)*mem_start;
2278 room = *mem_end - *mem_start;
2281 l = call_prom("package-to-path", 3, 1, node, namep, room);
2283 /* Didn't fit? Get more room. */
2285 if (l >= *mem_end - *mem_start)
2286 namep = make_room(mem_start, mem_end, l+1, 1);
2287 call_prom("package-to-path", 3, 1, node, namep, l);
2291 /* Fixup an Apple bug where they have bogus \0 chars in the
2292 * middle of the path in some properties, and extract
2293 * the unit name (everything after the last '/').
2295 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2302 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2305 /* get it again for debugging */
2306 path = prom_scratch;
2307 memset(path, 0, PROM_SCRATCH_SIZE);
2308 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2310 /* get and store all properties */
2312 sstart = (char *)dt_string_start;
2314 if (call_prom("nextprop", 3, 1, node, prev_name,
2319 if (strcmp(pname, "name") == 0) {
2324 /* find string offset */
2325 soff = dt_find_string(pname);
2327 prom_printf("WARNING: Can't find string index for"
2328 " <%s>, node %s\n", pname, path);
2331 prev_name = sstart + soff;
2334 l = call_prom("getproplen", 2, 1, node, pname);
2337 if (l == PROM_ERROR)
2340 /* push property head */
2341 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2342 dt_push_token(l, mem_start, mem_end);
2343 dt_push_token(soff, mem_start, mem_end);
2345 /* push property content */
2346 valp = make_room(mem_start, mem_end, l, 4);
2347 call_prom("getprop", 4, 1, node, pname, valp, l);
2348 *mem_start = _ALIGN(*mem_start, 4);
2350 if (!strcmp(pname, "phandle"))
2354 /* Add a "linux,phandle" property if no "phandle" property already
2355 * existed (can happen with OPAL)
2358 soff = dt_find_string("linux,phandle");
2360 prom_printf("WARNING: Can't find string index for"
2361 " <linux-phandle> node %s\n", path);
2363 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2364 dt_push_token(4, mem_start, mem_end);
2365 dt_push_token(soff, mem_start, mem_end);
2366 valp = make_room(mem_start, mem_end, 4, 4);
2367 *(__be32 *)valp = cpu_to_be32(node);
2371 /* do all our children */
2372 child = call_prom("child", 1, 1, node);
2373 while (child != 0) {
2374 scan_dt_build_struct(child, mem_start, mem_end);
2375 child = call_prom("peer", 1, 1, child);
2378 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2381 static void __init flatten_device_tree(void)
2384 unsigned long mem_start, mem_end, room;
2385 struct boot_param_header *hdr;
2390 * Check how much room we have between alloc top & bottom (+/- a
2391 * few pages), crop to 1MB, as this is our "chunk" size
2393 room = alloc_top - alloc_bottom - 0x4000;
2394 if (room > DEVTREE_CHUNK_SIZE)
2395 room = DEVTREE_CHUNK_SIZE;
2396 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2398 /* Now try to claim that */
2399 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2401 prom_panic("Can't allocate initial device-tree chunk\n");
2402 mem_end = mem_start + room;
2404 /* Get root of tree */
2405 root = call_prom("peer", 1, 1, (phandle)0);
2406 if (root == (phandle)0)
2407 prom_panic ("couldn't get device tree root\n");
2409 /* Build header and make room for mem rsv map */
2410 mem_start = _ALIGN(mem_start, 4);
2411 hdr = make_room(&mem_start, &mem_end,
2412 sizeof(struct boot_param_header), 4);
2413 dt_header_start = (unsigned long)hdr;
2414 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2416 /* Start of strings */
2417 mem_start = PAGE_ALIGN(mem_start);
2418 dt_string_start = mem_start;
2419 mem_start += 4; /* hole */
2421 /* Add "linux,phandle" in there, we'll need it */
2422 namep = make_room(&mem_start, &mem_end, 16, 1);
2423 strcpy(namep, "linux,phandle");
2424 mem_start = (unsigned long)namep + strlen(namep) + 1;
2426 /* Build string array */
2427 prom_printf("Building dt strings...\n");
2428 scan_dt_build_strings(root, &mem_start, &mem_end);
2429 dt_string_end = mem_start;
2431 /* Build structure */
2432 mem_start = PAGE_ALIGN(mem_start);
2433 dt_struct_start = mem_start;
2434 prom_printf("Building dt structure...\n");
2435 scan_dt_build_struct(root, &mem_start, &mem_end);
2436 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2437 dt_struct_end = PAGE_ALIGN(mem_start);
2440 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2441 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2442 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2443 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2444 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2445 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2446 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2447 hdr->version = cpu_to_be32(OF_DT_VERSION);
2448 /* Version 16 is not backward compatible */
2449 hdr->last_comp_version = cpu_to_be32(0x10);
2451 /* Copy the reserve map in */
2452 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2457 prom_printf("reserved memory map:\n");
2458 for (i = 0; i < mem_reserve_cnt; i++)
2459 prom_printf(" %x - %x\n",
2460 be64_to_cpu(mem_reserve_map[i].base),
2461 be64_to_cpu(mem_reserve_map[i].size));
2464 /* Bump mem_reserve_cnt to cause further reservations to fail
2465 * since it's too late.
2467 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2469 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2470 dt_string_start, dt_string_end);
2471 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2472 dt_struct_start, dt_struct_end);
2475 #ifdef CONFIG_PPC_MAPLE
2476 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2477 * The values are bad, and it doesn't even have the right number of cells. */
2478 static void __init fixup_device_tree_maple(void)
2481 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2485 name = "/ht@0/isa@4";
2486 isa = call_prom("finddevice", 1, 1, ADDR(name));
2487 if (!PHANDLE_VALID(isa)) {
2488 name = "/ht@0/isa@6";
2489 isa = call_prom("finddevice", 1, 1, ADDR(name));
2490 rloc = 0x01003000; /* IO space; PCI device = 6 */
2492 if (!PHANDLE_VALID(isa))
2495 if (prom_getproplen(isa, "ranges") != 12)
2497 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2501 if (isa_ranges[0] != 0x1 ||
2502 isa_ranges[1] != 0xf4000000 ||
2503 isa_ranges[2] != 0x00010000)
2506 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2508 isa_ranges[0] = 0x1;
2509 isa_ranges[1] = 0x0;
2510 isa_ranges[2] = rloc;
2511 isa_ranges[3] = 0x0;
2512 isa_ranges[4] = 0x0;
2513 isa_ranges[5] = 0x00010000;
2514 prom_setprop(isa, name, "ranges",
2515 isa_ranges, sizeof(isa_ranges));
2518 #define CPC925_MC_START 0xf8000000
2519 #define CPC925_MC_LENGTH 0x1000000
2520 /* The values for memory-controller don't have right number of cells */
2521 static void __init fixup_device_tree_maple_memory_controller(void)
2525 char *name = "/hostbridge@f8000000";
2528 mc = call_prom("finddevice", 1, 1, ADDR(name));
2529 if (!PHANDLE_VALID(mc))
2532 if (prom_getproplen(mc, "reg") != 8)
2535 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2536 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2537 if ((ac != 2) || (sc != 2))
2540 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2543 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2546 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2549 mc_reg[1] = CPC925_MC_START;
2551 mc_reg[3] = CPC925_MC_LENGTH;
2552 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2555 #define fixup_device_tree_maple()
2556 #define fixup_device_tree_maple_memory_controller()
2559 #ifdef CONFIG_PPC_CHRP
2561 * Pegasos and BriQ lacks the "ranges" property in the isa node
2562 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2563 * Pegasos has the IDE configured in legacy mode, but advertised as native
2565 static void __init fixup_device_tree_chrp(void)
2569 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2573 name = "/pci@80000000/isa@c";
2574 ph = call_prom("finddevice", 1, 1, ADDR(name));
2575 if (!PHANDLE_VALID(ph)) {
2576 name = "/pci@ff500000/isa@6";
2577 ph = call_prom("finddevice", 1, 1, ADDR(name));
2578 rloc = 0x01003000; /* IO space; PCI device = 6 */
2580 if (PHANDLE_VALID(ph)) {
2581 rc = prom_getproplen(ph, "ranges");
2582 if (rc == 0 || rc == PROM_ERROR) {
2583 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2590 prop[5] = 0x00010000;
2591 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2595 name = "/pci@80000000/ide@C,1";
2596 ph = call_prom("finddevice", 1, 1, ADDR(name));
2597 if (PHANDLE_VALID(ph)) {
2598 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2601 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2602 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2603 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2604 if (rc == sizeof(u32)) {
2606 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2611 #define fixup_device_tree_chrp()
2614 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2615 static void __init fixup_device_tree_pmac(void)
2617 phandle u3, i2c, mpic;
2622 /* Some G5s have a missing interrupt definition, fix it up here */
2623 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2624 if (!PHANDLE_VALID(u3))
2626 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2627 if (!PHANDLE_VALID(i2c))
2629 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2630 if (!PHANDLE_VALID(mpic))
2633 /* check if proper rev of u3 */
2634 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2637 if (u3_rev < 0x35 || u3_rev > 0x39)
2639 /* does it need fixup ? */
2640 if (prom_getproplen(i2c, "interrupts") > 0)
2643 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2645 /* interrupt on this revision of u3 is number 0 and level */
2648 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2649 &interrupts, sizeof(interrupts));
2651 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2652 &parent, sizeof(parent));
2655 #define fixup_device_tree_pmac()
2658 #ifdef CONFIG_PPC_EFIKA
2660 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2661 * to talk to the phy. If the phy-handle property is missing, then this
2662 * function is called to add the appropriate nodes and link it to the
2665 static void __init fixup_device_tree_efika_add_phy(void)
2671 /* Check if /builtin/ethernet exists - bail if it doesn't */
2672 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2673 if (!PHANDLE_VALID(node))
2676 /* Check if the phy-handle property exists - bail if it does */
2677 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2682 * At this point the ethernet device doesn't have a phy described.
2683 * Now we need to add the missing phy node and linkage
2686 /* Check for an MDIO bus node - if missing then create one */
2687 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2688 if (!PHANDLE_VALID(node)) {
2689 prom_printf("Adding Ethernet MDIO node\n");
2690 call_prom("interpret", 1, 1,
2691 " s\" /builtin\" find-device"
2693 " 1 encode-int s\" #address-cells\" property"
2694 " 0 encode-int s\" #size-cells\" property"
2695 " s\" mdio\" device-name"
2696 " s\" fsl,mpc5200b-mdio\" encode-string"
2697 " s\" compatible\" property"
2698 " 0xf0003000 0x400 reg"
2700 " 0x5 encode-int encode+"
2701 " 0x3 encode-int encode+"
2702 " s\" interrupts\" property"
2706 /* Check for a PHY device node - if missing then create one and
2707 * give it's phandle to the ethernet node */
2708 node = call_prom("finddevice", 1, 1,
2709 ADDR("/builtin/mdio/ethernet-phy"));
2710 if (!PHANDLE_VALID(node)) {
2711 prom_printf("Adding Ethernet PHY node\n");
2712 call_prom("interpret", 1, 1,
2713 " s\" /builtin/mdio\" find-device"
2715 " s\" ethernet-phy\" device-name"
2716 " 0x10 encode-int s\" reg\" property"
2720 " s\" /builtin/ethernet\" find-device"
2722 " s\" phy-handle\" property"
2727 static void __init fixup_device_tree_efika(void)
2729 int sound_irq[3] = { 2, 2, 0 };
2730 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2731 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2732 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2733 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2738 /* Check if we're really running on a EFIKA */
2739 node = call_prom("finddevice", 1, 1, ADDR("/"));
2740 if (!PHANDLE_VALID(node))
2743 rv = prom_getprop(node, "model", prop, sizeof(prop));
2744 if (rv == PROM_ERROR)
2746 if (strcmp(prop, "EFIKA5K2"))
2749 prom_printf("Applying EFIKA device tree fixups\n");
2751 /* Claiming to be 'chrp' is death */
2752 node = call_prom("finddevice", 1, 1, ADDR("/"));
2753 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2754 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2755 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2757 /* CODEGEN,description is exposed in /proc/cpuinfo so
2759 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2760 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2761 prom_setprop(node, "/", "CODEGEN,description",
2762 "Efika 5200B PowerPC System",
2763 sizeof("Efika 5200B PowerPC System"));
2765 /* Fixup bestcomm interrupts property */
2766 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2767 if (PHANDLE_VALID(node)) {
2768 len = prom_getproplen(node, "interrupts");
2770 prom_printf("Fixing bestcomm interrupts property\n");
2771 prom_setprop(node, "/builtin/bestcom", "interrupts",
2772 bcomm_irq, sizeof(bcomm_irq));
2776 /* Fixup sound interrupts property */
2777 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2778 if (PHANDLE_VALID(node)) {
2779 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2780 if (rv == PROM_ERROR) {
2781 prom_printf("Adding sound interrupts property\n");
2782 prom_setprop(node, "/builtin/sound", "interrupts",
2783 sound_irq, sizeof(sound_irq));
2787 /* Make sure ethernet phy-handle property exists */
2788 fixup_device_tree_efika_add_phy();
2791 #define fixup_device_tree_efika()
2794 static void __init fixup_device_tree(void)
2796 fixup_device_tree_maple();
2797 fixup_device_tree_maple_memory_controller();
2798 fixup_device_tree_chrp();
2799 fixup_device_tree_pmac();
2800 fixup_device_tree_efika();
2803 static void __init prom_find_boot_cpu(void)
2810 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2812 prom_cpu = be32_to_cpu(rval);
2814 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2816 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2817 prom.cpu = be32_to_cpu(rval);
2819 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2822 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2824 #ifdef CONFIG_BLK_DEV_INITRD
2825 if (r3 && r4 && r4 != 0xdeadbeef) {
2828 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2829 prom_initrd_end = prom_initrd_start + r4;
2831 val = cpu_to_be64(prom_initrd_start);
2832 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2834 val = cpu_to_be64(prom_initrd_end);
2835 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2838 reserve_mem(prom_initrd_start,
2839 prom_initrd_end - prom_initrd_start);
2841 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2842 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2844 #endif /* CONFIG_BLK_DEV_INITRD */
2848 #ifdef CONFIG_RELOCATABLE
2849 static void reloc_toc(void)
2853 static void unreloc_toc(void)
2857 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2860 unsigned long *toc_entry;
2862 /* Get the start of the TOC by using r2 directly. */
2863 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2865 for (i = 0; i < nr_entries; i++) {
2866 *toc_entry = *toc_entry + offset;
2871 static void reloc_toc(void)
2873 unsigned long offset = reloc_offset();
2874 unsigned long nr_entries =
2875 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2877 __reloc_toc(offset, nr_entries);
2882 static void unreloc_toc(void)
2884 unsigned long offset = reloc_offset();
2885 unsigned long nr_entries =
2886 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2890 __reloc_toc(-offset, nr_entries);
2896 * We enter here early on, when the Open Firmware prom is still
2897 * handling exceptions and the MMU hash table for us.
2900 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2902 unsigned long r6, unsigned long r7,
2903 unsigned long kbase)
2908 unsigned long offset = reloc_offset();
2915 * First zero the BSS
2917 memset(&__bss_start, 0, __bss_stop - __bss_start);
2920 * Init interface to Open Firmware, get some node references,
2923 prom_init_client_services(pp);
2926 * See if this OF is old enough that we need to do explicit maps
2927 * and other workarounds
2932 * Init prom stdout device
2936 prom_printf("Preparing to boot %s", linux_banner);
2939 * Get default machine type. At this point, we do not differentiate
2940 * between pSeries SMP and pSeries LPAR
2942 of_platform = prom_find_machine_type();
2943 prom_printf("Detected machine type: %x\n", of_platform);
2945 #ifndef CONFIG_NONSTATIC_KERNEL
2946 /* Bail if this is a kdump kernel. */
2947 if (PHYSICAL_START > 0)
2948 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2952 * Check for an initrd
2954 prom_check_initrd(r3, r4);
2956 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2958 * On pSeries, inform the firmware about our capabilities
2960 if (of_platform == PLATFORM_PSERIES ||
2961 of_platform == PLATFORM_PSERIES_LPAR)
2962 prom_send_capabilities();
2966 * Copy the CPU hold code
2968 if (of_platform != PLATFORM_POWERMAC)
2969 copy_and_flush(0, kbase, 0x100, 0);
2972 * Do early parsing of command line
2974 early_cmdline_parse();
2977 * Initialize memory management within prom_init
2982 * Determine which cpu is actually running right _now_
2984 prom_find_boot_cpu();
2987 * Initialize display devices
2989 prom_check_displays();
2991 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
2993 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2994 * that uses the allocator, we need to make sure we get the top of memory
2995 * available for us here...
2997 if (of_platform == PLATFORM_PSERIES)
2998 prom_initialize_tce_table();
3002 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3003 * have a usable RTAS implementation.
3005 if (of_platform != PLATFORM_POWERMAC &&
3006 of_platform != PLATFORM_OPAL)
3007 prom_instantiate_rtas();
3009 #ifdef CONFIG_PPC_POWERNV
3010 #ifdef __BIG_ENDIAN__
3011 /* Detect HAL and try instanciating it & doing takeover */
3012 if (of_platform == PLATFORM_PSERIES_LPAR) {
3014 if (of_platform == PLATFORM_OPAL) {
3015 prom_opal_hold_cpus();
3016 prom_opal_takeover();
3019 #endif /* __BIG_ENDIAN__ */
3020 if (of_platform == PLATFORM_OPAL)
3021 prom_instantiate_opal();
3022 #endif /* CONFIG_PPC_POWERNV */
3025 /* instantiate sml */
3026 prom_instantiate_sml();
3030 * On non-powermacs, put all CPUs in spin-loops.
3032 * PowerMacs use a different mechanism to spin CPUs
3034 * (This must be done after instanciating RTAS)
3036 if (of_platform != PLATFORM_POWERMAC &&
3037 of_platform != PLATFORM_OPAL)
3041 * Fill in some infos for use by the kernel later on
3043 if (prom_memory_limit) {
3044 __be64 val = cpu_to_be64(prom_memory_limit);
3045 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3050 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3053 if (prom_iommu_force_on)
3054 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3057 if (prom_tce_alloc_start) {
3058 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3059 &prom_tce_alloc_start,
3060 sizeof(prom_tce_alloc_start));
3061 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3062 &prom_tce_alloc_end,
3063 sizeof(prom_tce_alloc_end));
3068 * Fixup any known bugs in the device-tree
3070 fixup_device_tree();
3073 * Now finally create the flattened device-tree
3075 prom_printf("copying OF device tree...\n");
3076 flatten_device_tree();
3079 * in case stdin is USB and still active on IBM machines...
3080 * Unfortunately quiesce crashes on some powermacs if we have
3081 * closed stdin already (in particular the powerbook 101). It
3082 * appears that the OPAL version of OFW doesn't like it either.
3084 if (of_platform != PLATFORM_POWERMAC &&
3085 of_platform != PLATFORM_OPAL)
3089 * Call OF "quiesce" method to shut down pending DMA's from
3092 prom_printf("Calling quiesce...\n");
3093 call_prom("quiesce", 0, 0);
3096 * And finally, call the kernel passing it the flattened device
3097 * tree and NULL as r5, thus triggering the new entry point which
3098 * is common to us and kexec
3100 hdr = dt_header_start;
3102 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3103 if (of_platform != PLATFORM_OPAL) {
3104 prom_printf("returning from prom_init\n");
3105 prom_debug("->dt_header_start=0x%x\n", hdr);
3109 reloc_got2(-offset);
3114 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3115 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3116 __start(hdr, kbase, 0, 0, 0,
3117 prom_opal_base, prom_opal_entry);
3119 __start(hdr, kbase, 0, 0, 0, 0, 0);