2 * prepare to run common code
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 #include <linux/init.h>
8 #include <linux/linkage.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/percpu.h>
13 #include <linux/start_kernel.h>
15 #include <linux/memblock.h>
17 #include <asm/processor.h>
18 #include <asm/proto.h>
20 #include <asm/setup.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sections.h>
25 #include <asm/kdebug.h>
27 #include <asm/bios_ebda.h>
28 #include <asm/bootparam_utils.h>
31 * Manage page tables very early on.
33 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
34 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
35 static unsigned int __initdata next_early_pgt = 2;
37 /* Wipe all early page tables except for the kernel symbol map */
38 static void __init reset_early_page_tables(void)
42 for (i = 0; i < PTRS_PER_PGD-1; i++)
43 early_level4_pgt[i].pgd = 0;
47 write_cr3(__pa(early_level4_pgt));
50 /* Create a new PMD entry */
51 int __init early_make_pgtable(unsigned long address)
53 unsigned long physaddr = address - __PAGE_OFFSET;
59 /* Invalid address or early pgt is done ? */
60 if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt))
63 i = (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
64 pgd_p = &early_level4_pgt[i].pgd;
68 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
69 * critical -- __PAGE_OFFSET would point us back into the dynamic
70 * range and we might end up looping forever...
72 if (pgd && next_early_pgt < EARLY_DYNAMIC_PAGE_TABLES) {
73 pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
75 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES-1)
76 reset_early_page_tables();
78 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
79 for (i = 0; i < PTRS_PER_PUD; i++)
82 *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
84 i = (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
87 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
88 pmd = (physaddr & PUD_MASK) + (__PAGE_KERNEL_LARGE & ~_PAGE_GLOBAL);
89 for (i = 0; i < PTRS_PER_PMD; i++) {
94 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
99 /* Don't add a printk in there. printk relies on the PDA which is not initialized
101 static void __init clear_bss(void)
103 memset(__bss_start, 0,
104 (unsigned long) __bss_stop - (unsigned long) __bss_start);
107 static void __init copy_bootdata(char *real_mode_data)
111 memcpy(&boot_params, real_mode_data, sizeof boot_params);
112 sanitize_boot_params(&boot_params);
113 if (boot_params.hdr.cmd_line_ptr) {
114 command_line = __va(boot_params.hdr.cmd_line_ptr);
115 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
119 void __init x86_64_start_kernel(char * real_mode_data)
124 * Build-time sanity checks on the kernel image and module
125 * area mappings. (these are purely build-time and produce no code)
127 BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START);
128 BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE);
129 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
130 BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0);
131 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
132 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
133 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
134 (__START_KERNEL & PGDIR_MASK)));
135 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
137 /* Kill off the identity-map trampoline */
138 reset_early_page_tables();
140 /* clear bss before set_intr_gate with early_idt_handler */
143 /* XXX - this is wrong... we need to build page tables from scratch */
144 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
146 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
147 #ifdef CONFIG_EARLY_PRINTK
148 set_intr_gate(i, &early_idt_handlers[i]);
150 set_intr_gate(i, early_idt_handler);
153 load_idt((const struct desc_ptr *)&idt_descr);
155 copy_bootdata(__va(real_mode_data));
157 if (console_loglevel == 10)
158 early_printk("Kernel alive\n");
160 clear_page(init_level4_pgt);
161 /* set init_level4_pgt kernel high mapping*/
162 init_level4_pgt[511] = early_level4_pgt[511];
164 x86_64_start_reservations(real_mode_data);
167 void __init x86_64_start_reservations(char *real_mode_data)
169 /* version is always not zero if it is copied */
170 if (!boot_params.hdr.version)
171 copy_bootdata(__va(real_mode_data));
173 memblock_reserve(__pa_symbol(&_text),
174 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
176 #ifdef CONFIG_BLK_DEV_INITRD
178 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
179 /* Assume only end is not page aligned */
180 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
181 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
182 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
183 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
187 reserve_ebda_region();
190 * At this point everything still needed from the boot loader
191 * or BIOS or kernel text should be early reserved or marked not
192 * RAM in e820. All other memory is free game.