2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <linux/numa.h>
14 #include <linux/ftrace.h>
16 #include <linux/suspend.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20 #include <asm/mmu_context.h>
21 #include <asm/debugreg.h>
23 static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
32 pgd += pgd_index(addr);
33 if (!pgd_present(*pgd)) {
34 page = kimage_alloc_control_pages(image, 0);
37 pud = (pud_t *)page_address(page);
38 memset(pud, 0, PAGE_SIZE);
39 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
41 pud = pud_offset(pgd, addr);
42 if (!pud_present(*pud)) {
43 page = kimage_alloc_control_pages(image, 0);
46 pmd = (pmd_t *)page_address(page);
47 memset(pmd, 0, PAGE_SIZE);
48 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
50 pmd = pmd_offset(pud, addr);
51 if (!pmd_present(*pmd))
52 set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
58 static void init_level2_page(pmd_t *level2p, unsigned long addr)
60 unsigned long end_addr;
63 end_addr = addr + PUD_SIZE;
64 while (addr < end_addr) {
65 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
70 static int init_level3_page(struct kimage *image, pud_t *level3p,
71 unsigned long addr, unsigned long last_addr)
73 unsigned long end_addr;
78 end_addr = addr + PGDIR_SIZE;
79 while ((addr < last_addr) && (addr < end_addr)) {
83 page = kimage_alloc_control_pages(image, 0);
88 level2p = (pmd_t *)page_address(page);
89 init_level2_page(level2p, addr);
90 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
93 /* clear the unused entries */
94 while (addr < end_addr) {
103 static int init_level4_page(struct kimage *image, pgd_t *level4p,
104 unsigned long addr, unsigned long last_addr)
106 unsigned long end_addr;
111 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
112 while ((addr < last_addr) && (addr < end_addr)) {
116 page = kimage_alloc_control_pages(image, 0);
121 level3p = (pud_t *)page_address(page);
122 result = init_level3_page(image, level3p, addr, last_addr);
125 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
128 /* clear the unused entries */
129 while (addr < end_addr) {
130 pgd_clear(level4p++);
137 static void free_transition_pgtable(struct kimage *image)
139 free_page((unsigned long)image->arch.pud);
140 free_page((unsigned long)image->arch.pmd);
141 free_page((unsigned long)image->arch.pte);
144 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
149 unsigned long vaddr, paddr;
150 int result = -ENOMEM;
152 vaddr = (unsigned long)relocate_kernel;
153 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
154 pgd += pgd_index(vaddr);
155 if (!pgd_present(*pgd)) {
156 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
159 image->arch.pud = pud;
160 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
162 pud = pud_offset(pgd, vaddr);
163 if (!pud_present(*pud)) {
164 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
167 image->arch.pmd = pmd;
168 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
170 pmd = pmd_offset(pud, vaddr);
171 if (!pmd_present(*pmd)) {
172 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
175 image->arch.pte = pte;
176 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
178 pte = pte_offset_kernel(pmd, vaddr);
179 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
182 free_transition_pgtable(image);
187 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
191 level4p = (pgd_t *)__va(start_pgtable);
192 result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
196 * image->start may be outside 0 ~ max_pfn, for example when
197 * jump back to original kernel from kexeced kernel
199 result = init_one_level2_page(image, level4p, image->start);
202 return init_transition_pgtable(image, level4p);
205 static void set_idt(void *newidt, u16 limit)
207 struct desc_ptr curidt;
209 /* x86-64 supports unaliged loads & stores */
211 curidt.address = (unsigned long)newidt;
213 __asm__ __volatile__ (
220 static void set_gdt(void *newgdt, u16 limit)
222 struct desc_ptr curgdt;
224 /* x86-64 supports unaligned loads & stores */
226 curgdt.address = (unsigned long)newgdt;
228 __asm__ __volatile__ (
234 static void load_segments(void)
236 __asm__ __volatile__ (
242 : : "a" (__KERNEL_DS) : "memory"
246 int machine_kexec_prepare(struct kimage *image)
248 unsigned long start_pgtable;
251 /* Calculate the offsets */
252 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
254 /* Setup the identity mapped 64bit page table */
255 result = init_pgtable(image, start_pgtable);
262 void machine_kexec_cleanup(struct kimage *image)
264 free_transition_pgtable(image);
268 * Do not allocate memory (or fail in any way) in machine_kexec().
269 * We are past the point of no return, committed to rebooting now.
271 void machine_kexec(struct kimage *image)
273 unsigned long page_list[PAGES_NR];
275 int save_ftrace_enabled;
277 #ifdef CONFIG_KEXEC_JUMP
278 if (image->preserve_context)
279 save_processor_state();
282 save_ftrace_enabled = __ftrace_enabled_save();
284 /* Interrupts aren't acceptable while we reboot */
286 hw_breakpoint_disable();
288 if (image->preserve_context) {
289 #ifdef CONFIG_X86_IO_APIC
291 * We need to put APICs in legacy mode so that we can
292 * get timer interrupts in second kernel. kexec/kdump
293 * paths already have calls to disable_IO_APIC() in
294 * one form or other. kexec jump path also need
301 control_page = page_address(image->control_code_page) + PAGE_SIZE;
302 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
304 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
305 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
306 page_list[PA_TABLE_PAGE] =
307 (unsigned long)__pa(page_address(image->control_code_page));
309 if (image->type == KEXEC_TYPE_DEFAULT)
310 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
314 * The segment registers are funny things, they have both a
315 * visible and an invisible part. Whenever the visible part is
316 * set to a specific selector, the invisible part is loaded
317 * with from a table in memory. At no other time is the
318 * descriptor table in memory accessed.
320 * I take advantage of this here by force loading the
321 * segments, before I zap the gdt with an invalid value.
325 * The gdt & idt are now invalid.
326 * If you want to load them you must set up your own idt & gdt.
328 set_gdt(phys_to_virt(0), 0);
329 set_idt(phys_to_virt(0), 0);
332 image->start = relocate_kernel((unsigned long)image->head,
333 (unsigned long)page_list,
335 image->preserve_context);
337 #ifdef CONFIG_KEXEC_JUMP
338 if (image->preserve_context)
339 restore_processor_state();
342 __ftrace_enabled_restore(save_ftrace_enabled);
345 void arch_crash_save_vmcoreinfo(void)
347 VMCOREINFO_SYMBOL(phys_base);
348 VMCOREINFO_SYMBOL(init_level4_pgt);
351 VMCOREINFO_SYMBOL(node_data);
352 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);