4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/export.h>
47 #include <linux/init.h>
48 #include <linux/gfp.h>
49 #include <linux/memblock.h>
50 #include <linux/seq_file.h>
51 #include <linux/crash_dump.h>
52 #ifdef CONFIG_KEXEC_CORE
53 #include <linux/kexec.h>
56 #include <trace/events/xen.h>
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
60 #include <asm/fixmap.h>
61 #include <asm/mmu_context.h>
62 #include <asm/setup.h>
63 #include <asm/paravirt.h>
64 #include <asm/e820/api.h>
65 #include <asm/linkage.h>
71 #include <asm/xen/hypercall.h>
72 #include <asm/xen/hypervisor.h>
76 #include <xen/interface/xen.h>
77 #include <xen/interface/hvm/hvm_op.h>
78 #include <xen/interface/version.h>
79 #include <xen/interface/memory.h>
80 #include <xen/hvc-console.h>
82 #include "multicalls.h"
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
92 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
96 /* l3 pud for userspace vsyscall mapping */
97 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98 #endif /* CONFIG_X86_64 */
101 * Note about cr3 (pagetable base) values:
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
114 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
117 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125 void make_lowmem_page_readonly(void *vaddr)
128 unsigned long address = (unsigned long)vaddr;
131 pte = lookup_address(address, &level);
133 return; /* vaddr missing */
135 ptev = pte_wrprotect(*pte);
137 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
141 void make_lowmem_page_readwrite(void *vaddr)
144 unsigned long address = (unsigned long)vaddr;
147 pte = lookup_address(address, &level);
149 return; /* vaddr missing */
151 ptev = pte_mkwrite(*pte);
153 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
158 static bool xen_page_pinned(void *ptr)
160 struct page *page = virt_to_page(ptr);
162 return PagePinned(page);
165 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
167 struct multicall_space mcs;
168 struct mmu_update *u;
170 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
172 mcs = xen_mc_entry(sizeof(*u));
175 /* ptep might be kmapped when using 32-bit HIGHPTE */
176 u->ptr = virt_to_machine(ptep).maddr;
177 u->val = pte_val_ma(pteval);
179 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
181 xen_mc_issue(PARAVIRT_LAZY_MMU);
183 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
185 static void xen_extend_mmu_update(const struct mmu_update *update)
187 struct multicall_space mcs;
188 struct mmu_update *u;
190 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
192 if (mcs.mc != NULL) {
195 mcs = __xen_mc_entry(sizeof(*u));
196 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
203 static void xen_extend_mmuext_op(const struct mmuext_op *op)
205 struct multicall_space mcs;
208 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
210 if (mcs.mc != NULL) {
213 mcs = __xen_mc_entry(sizeof(*u));
214 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
221 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
229 /* ptr may be ioremapped for 64-bit pagetable setup */
230 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
231 u.val = pmd_val_ma(val);
232 xen_extend_mmu_update(&u);
234 xen_mc_issue(PARAVIRT_LAZY_MMU);
239 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
241 trace_xen_mmu_set_pmd(ptr, val);
243 /* If page is not pinned, we can just update the entry
245 if (!xen_page_pinned(ptr)) {
250 xen_set_pmd_hyper(ptr, val);
254 * Associate a virtual page frame with a given physical page frame
255 * and protection flags for that frame.
257 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
259 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
262 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
266 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
271 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
272 u.val = pte_val_ma(pteval);
273 xen_extend_mmu_update(&u);
275 xen_mc_issue(PARAVIRT_LAZY_MMU);
280 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
282 if (!xen_batched_set_pte(ptep, pteval)) {
284 * Could call native_set_pte() here and trap and
285 * emulate the PTE write but with 32-bit guests this
286 * needs two traps (one for each of the two 32-bit
287 * words in the PTE) so do one hypercall directly
292 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
293 u.val = pte_val_ma(pteval);
294 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
298 static void xen_set_pte(pte_t *ptep, pte_t pteval)
300 trace_xen_mmu_set_pte(ptep, pteval);
301 __xen_set_pte(ptep, pteval);
304 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
305 pte_t *ptep, pte_t pteval)
307 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
308 __xen_set_pte(ptep, pteval);
311 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
312 unsigned long addr, pte_t *ptep)
314 /* Just return the pte as-is. We preserve the bits on commit */
315 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
319 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
320 pte_t *ptep, pte_t pte)
324 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
327 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
328 u.val = pte_val_ma(pte);
329 xen_extend_mmu_update(&u);
331 xen_mc_issue(PARAVIRT_LAZY_MMU);
334 /* Assume pteval_t is equivalent to all the other *val_t types. */
335 static pteval_t pte_mfn_to_pfn(pteval_t val)
337 if (val & _PAGE_PRESENT) {
338 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
339 unsigned long pfn = mfn_to_pfn(mfn);
341 pteval_t flags = val & PTE_FLAGS_MASK;
342 if (unlikely(pfn == ~0))
343 val = flags & ~_PAGE_PRESENT;
345 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
351 static pteval_t pte_pfn_to_mfn(pteval_t val)
353 if (val & _PAGE_PRESENT) {
354 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
355 pteval_t flags = val & PTE_FLAGS_MASK;
358 mfn = __pfn_to_mfn(pfn);
361 * If there's no mfn for the pfn, then just create an
362 * empty non-present pte. Unfortunately this loses
363 * information about the original pfn, so
364 * pte_mfn_to_pfn is asymmetric.
366 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
370 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
371 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
377 __visible pteval_t xen_pte_val(pte_t pte)
379 pteval_t pteval = pte.pte;
381 return pte_mfn_to_pfn(pteval);
383 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
385 __visible pgdval_t xen_pgd_val(pgd_t pgd)
387 return pte_mfn_to_pfn(pgd.pgd);
389 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
391 __visible pte_t xen_make_pte(pteval_t pte)
393 pte = pte_pfn_to_mfn(pte);
395 return native_make_pte(pte);
397 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
399 __visible pgd_t xen_make_pgd(pgdval_t pgd)
401 pgd = pte_pfn_to_mfn(pgd);
402 return native_make_pgd(pgd);
404 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
406 __visible pmdval_t xen_pmd_val(pmd_t pmd)
408 return pte_mfn_to_pfn(pmd.pmd);
410 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
412 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
420 /* ptr may be ioremapped for 64-bit pagetable setup */
421 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
422 u.val = pud_val_ma(val);
423 xen_extend_mmu_update(&u);
425 xen_mc_issue(PARAVIRT_LAZY_MMU);
430 static void xen_set_pud(pud_t *ptr, pud_t val)
432 trace_xen_mmu_set_pud(ptr, val);
434 /* If page is not pinned, we can just update the entry
436 if (!xen_page_pinned(ptr)) {
441 xen_set_pud_hyper(ptr, val);
444 #ifdef CONFIG_X86_PAE
445 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
447 trace_xen_mmu_set_pte_atomic(ptep, pte);
448 set_64bit((u64 *)ptep, native_pte_val(pte));
451 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
453 trace_xen_mmu_pte_clear(mm, addr, ptep);
454 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
455 native_pte_clear(mm, addr, ptep);
458 static void xen_pmd_clear(pmd_t *pmdp)
460 trace_xen_mmu_pmd_clear(pmdp);
461 set_pmd(pmdp, __pmd(0));
463 #endif /* CONFIG_X86_PAE */
465 __visible pmd_t xen_make_pmd(pmdval_t pmd)
467 pmd = pte_pfn_to_mfn(pmd);
468 return native_make_pmd(pmd);
470 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
472 #if CONFIG_PGTABLE_LEVELS == 4
473 __visible pudval_t xen_pud_val(pud_t pud)
475 return pte_mfn_to_pfn(pud.pud);
477 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
479 __visible pud_t xen_make_pud(pudval_t pud)
481 pud = pte_pfn_to_mfn(pud);
483 return native_make_pud(pud);
485 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
487 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
489 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
490 unsigned offset = pgd - pgd_page;
491 pgd_t *user_ptr = NULL;
493 if (offset < pgd_index(USER_LIMIT)) {
494 struct page *page = virt_to_page(pgd_page);
495 user_ptr = (pgd_t *)page->private;
503 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
507 u.ptr = virt_to_machine(ptr).maddr;
508 u.val = p4d_val_ma(val);
509 xen_extend_mmu_update(&u);
513 * Raw hypercall-based set_p4d, intended for in early boot before
514 * there's a page structure. This implies:
515 * 1. The only existing pagetable is the kernel's
516 * 2. It is always pinned
517 * 3. It has no user pagetable attached to it
519 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
525 __xen_set_p4d_hyper(ptr, val);
527 xen_mc_issue(PARAVIRT_LAZY_MMU);
532 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
534 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
537 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
539 /* If page is not pinned, we can just update the entry
541 if (!xen_page_pinned(ptr)) {
544 WARN_ON(xen_page_pinned(user_ptr));
545 pgd_val.pgd = p4d_val_ma(val);
551 /* If it's pinned, then we can at least batch the kernel and
552 user updates together. */
555 __xen_set_p4d_hyper(ptr, val);
557 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
559 xen_mc_issue(PARAVIRT_LAZY_MMU);
561 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
563 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
564 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
565 bool last, unsigned long limit)
567 int i, nr, flush = 0;
569 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
570 for (i = 0; i < nr; i++) {
571 if (!pmd_none(pmd[i]))
572 flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
577 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
578 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
579 bool last, unsigned long limit)
581 int i, nr, flush = 0;
583 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
584 for (i = 0; i < nr; i++) {
587 if (pud_none(pud[i]))
590 pmd = pmd_offset(&pud[i], 0);
591 if (PTRS_PER_PMD > 1)
592 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
593 flush |= xen_pmd_walk(mm, pmd, func,
594 last && i == nr - 1, limit);
599 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
600 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
601 bool last, unsigned long limit)
603 int i, nr, flush = 0;
605 nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
606 for (i = 0; i < nr; i++) {
609 if (p4d_none(p4d[i]))
612 pud = pud_offset(&p4d[i], 0);
613 if (PTRS_PER_PUD > 1)
614 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
615 flush |= xen_pud_walk(mm, pud, func,
616 last && i == nr - 1, limit);
622 * (Yet another) pagetable walker. This one is intended for pinning a
623 * pagetable. This means that it walks a pagetable and calls the
624 * callback function on each page it finds making up the page table,
625 * at every level. It walks the entire pagetable, but it only bothers
626 * pinning pte pages which are below limit. In the normal case this
627 * will be STACK_TOP_MAX, but at boot we need to pin up to
630 * For 32-bit the important bit is that we don't pin beyond there,
631 * because then we start getting into Xen's ptes.
633 * For 64-bit, we must skip the Xen hole in the middle of the address
634 * space, just after the big x86-64 virtual hole.
636 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
637 int (*func)(struct mm_struct *mm, struct page *,
641 int i, nr, flush = 0;
642 unsigned hole_low, hole_high;
644 /* The limit is the last byte to be touched */
646 BUG_ON(limit >= FIXADDR_TOP);
649 * 64-bit has a great big hole in the middle of the address
650 * space, which contains the Xen mappings. On 32-bit these
651 * will end up making a zero-sized hole and so is a no-op.
653 hole_low = pgd_index(USER_LIMIT);
654 hole_high = pgd_index(PAGE_OFFSET);
656 nr = pgd_index(limit) + 1;
657 for (i = 0; i < nr; i++) {
660 if (i >= hole_low && i < hole_high)
663 if (pgd_none(pgd[i]))
666 p4d = p4d_offset(&pgd[i], 0);
667 if (PTRS_PER_P4D > 1)
668 flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
669 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
672 /* Do the top level last, so that the callbacks can use it as
673 a cue to do final things like tlb flushes. */
674 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
679 static int xen_pgd_walk(struct mm_struct *mm,
680 int (*func)(struct mm_struct *mm, struct page *,
684 return __xen_pgd_walk(mm, mm->pgd, func, limit);
687 /* If we're using split pte locks, then take the page's lock and
688 return a pointer to it. Otherwise return NULL. */
689 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
691 spinlock_t *ptl = NULL;
693 #if USE_SPLIT_PTE_PTLOCKS
694 ptl = ptlock_ptr(page);
695 spin_lock_nest_lock(ptl, &mm->page_table_lock);
701 static void xen_pte_unlock(void *v)
707 static void xen_do_pin(unsigned level, unsigned long pfn)
712 op.arg1.mfn = pfn_to_mfn(pfn);
714 xen_extend_mmuext_op(&op);
717 static int xen_pin_page(struct mm_struct *mm, struct page *page,
720 unsigned pgfl = TestSetPagePinned(page);
724 flush = 0; /* already pinned */
725 else if (PageHighMem(page))
726 /* kmaps need flushing if we found an unpinned
730 void *pt = lowmem_page_address(page);
731 unsigned long pfn = page_to_pfn(page);
732 struct multicall_space mcs = __xen_mc_entry(0);
738 * We need to hold the pagetable lock between the time
739 * we make the pagetable RO and when we actually pin
740 * it. If we don't, then other users may come in and
741 * attempt to update the pagetable by writing it,
742 * which will fail because the memory is RO but not
743 * pinned, so Xen won't do the trap'n'emulate.
745 * If we're using split pte locks, we can't hold the
746 * entire pagetable's worth of locks during the
747 * traverse, because we may wrap the preempt count (8
748 * bits). The solution is to mark RO and pin each PTE
749 * page while holding the lock. This means the number
750 * of locks we end up holding is never more than a
751 * batch size (~32 entries, at present).
753 * If we're not using split pte locks, we needn't pin
754 * the PTE pages independently, because we're
755 * protected by the overall pagetable lock.
759 ptl = xen_pte_lock(page, mm);
761 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
762 pfn_pte(pfn, PAGE_KERNEL_RO),
763 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
766 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
768 /* Queue a deferred unlock for when this batch
770 xen_mc_callback(xen_pte_unlock, ptl);
777 /* This is called just after a mm has been created, but it has not
778 been used yet. We need to make sure that its pagetable is all
779 read-only, and can be pinned. */
780 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
782 trace_xen_mmu_pgd_pin(mm, pgd);
786 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
787 /* re-enable interrupts for flushing */
797 pgd_t *user_pgd = xen_get_user_pgd(pgd);
799 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
802 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
803 xen_do_pin(MMUEXT_PIN_L4_TABLE,
804 PFN_DOWN(__pa(user_pgd)));
807 #else /* CONFIG_X86_32 */
808 #ifdef CONFIG_X86_PAE
809 /* Need to make sure unshared kernel PMD is pinnable */
810 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
813 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
814 #endif /* CONFIG_X86_64 */
818 static void xen_pgd_pin(struct mm_struct *mm)
820 __xen_pgd_pin(mm, mm->pgd);
824 * On save, we need to pin all pagetables to make sure they get their
825 * mfns turned into pfns. Search the list for any unpinned pgds and pin
826 * them (unpinned pgds are not currently in use, probably because the
827 * process is under construction or destruction).
829 * Expected to be called in stop_machine() ("equivalent to taking
830 * every spinlock in the system"), so the locking doesn't really
831 * matter all that much.
833 void xen_mm_pin_all(void)
837 spin_lock(&pgd_lock);
839 list_for_each_entry(page, &pgd_list, lru) {
840 if (!PagePinned(page)) {
841 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
842 SetPageSavePinned(page);
846 spin_unlock(&pgd_lock);
850 * The init_mm pagetable is really pinned as soon as its created, but
851 * that's before we have page structures to store the bits. So do all
852 * the book-keeping now.
854 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
861 static void __init xen_mark_init_mm_pinned(void)
863 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
866 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
869 unsigned pgfl = TestClearPagePinned(page);
871 if (pgfl && !PageHighMem(page)) {
872 void *pt = lowmem_page_address(page);
873 unsigned long pfn = page_to_pfn(page);
874 spinlock_t *ptl = NULL;
875 struct multicall_space mcs;
878 * Do the converse to pin_page. If we're using split
879 * pte locks, we must be holding the lock for while
880 * the pte page is unpinned but still RO to prevent
881 * concurrent updates from seeing it in this
882 * partially-pinned state.
884 if (level == PT_PTE) {
885 ptl = xen_pte_lock(page, mm);
888 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
891 mcs = __xen_mc_entry(0);
893 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
894 pfn_pte(pfn, PAGE_KERNEL),
895 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
898 /* unlock when batch completed */
899 xen_mc_callback(xen_pte_unlock, ptl);
903 return 0; /* never need to flush on unpin */
906 /* Release a pagetables pages back as normal RW */
907 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
909 trace_xen_mmu_pgd_unpin(mm, pgd);
913 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
917 pgd_t *user_pgd = xen_get_user_pgd(pgd);
920 xen_do_pin(MMUEXT_UNPIN_TABLE,
921 PFN_DOWN(__pa(user_pgd)));
922 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
927 #ifdef CONFIG_X86_PAE
928 /* Need to make sure unshared kernel PMD is unpinned */
929 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
933 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
938 static void xen_pgd_unpin(struct mm_struct *mm)
940 __xen_pgd_unpin(mm, mm->pgd);
944 * On resume, undo any pinning done at save, so that the rest of the
945 * kernel doesn't see any unexpected pinned pagetables.
947 void xen_mm_unpin_all(void)
951 spin_lock(&pgd_lock);
953 list_for_each_entry(page, &pgd_list, lru) {
954 if (PageSavePinned(page)) {
955 BUG_ON(!PagePinned(page));
956 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
957 ClearPageSavePinned(page);
961 spin_unlock(&pgd_lock);
964 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
966 spin_lock(&next->page_table_lock);
968 spin_unlock(&next->page_table_lock);
971 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
973 spin_lock(&mm->page_table_lock);
975 spin_unlock(&mm->page_table_lock);
980 /* Another cpu may still have their %cr3 pointing at the pagetable, so
981 we need to repoint it somewhere else before we can unpin it. */
982 static void drop_other_mm_ref(void *info)
984 struct mm_struct *mm = info;
985 struct mm_struct *active_mm;
987 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
989 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
990 leave_mm(smp_processor_id());
992 /* If this cpu still has a stale cr3 reference, then make sure
993 it has been flushed. */
994 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
995 load_cr3(swapper_pg_dir);
998 static void xen_drop_mm_ref(struct mm_struct *mm)
1003 if (current->active_mm == mm) {
1004 if (current->mm == mm)
1005 load_cr3(swapper_pg_dir);
1007 leave_mm(smp_processor_id());
1010 /* Get the "official" set of cpus referring to our pagetable. */
1011 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1012 for_each_online_cpu(cpu) {
1013 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1014 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1016 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1020 cpumask_copy(mask, mm_cpumask(mm));
1022 /* It's possible that a vcpu may have a stale reference to our
1023 cr3, because its in lazy mode, and it hasn't yet flushed
1024 its set of pending hypercalls yet. In this case, we can
1025 look at its actual current cr3 value, and force it to flush
1027 for_each_online_cpu(cpu) {
1028 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1029 cpumask_set_cpu(cpu, mask);
1032 if (!cpumask_empty(mask))
1033 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1034 free_cpumask_var(mask);
1037 static void xen_drop_mm_ref(struct mm_struct *mm)
1039 if (current->active_mm == mm)
1040 load_cr3(swapper_pg_dir);
1045 * While a process runs, Xen pins its pagetables, which means that the
1046 * hypervisor forces it to be read-only, and it controls all updates
1047 * to it. This means that all pagetable updates have to go via the
1048 * hypervisor, which is moderately expensive.
1050 * Since we're pulling the pagetable down, we switch to use init_mm,
1051 * unpin old process pagetable and mark it all read-write, which
1052 * allows further operations on it to be simple memory accesses.
1054 * The only subtle point is that another CPU may be still using the
1055 * pagetable because of lazy tlb flushing. This means we need need to
1056 * switch all CPUs off this pagetable before we can unpin it.
1058 static void xen_exit_mmap(struct mm_struct *mm)
1060 get_cpu(); /* make sure we don't move around */
1061 xen_drop_mm_ref(mm);
1064 spin_lock(&mm->page_table_lock);
1066 /* pgd may not be pinned in the error exit path of execve */
1067 if (xen_page_pinned(mm->pgd))
1070 spin_unlock(&mm->page_table_lock);
1073 static void xen_post_allocator_init(void);
1075 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1077 struct mmuext_op op;
1080 op.arg1.mfn = pfn_to_mfn(pfn);
1081 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1085 #ifdef CONFIG_X86_64
1086 static void __init xen_cleanhighmap(unsigned long vaddr,
1087 unsigned long vaddr_end)
1089 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1090 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1092 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1093 * We include the PMD passed in on _both_ boundaries. */
1094 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1095 pmd++, vaddr += PMD_SIZE) {
1098 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1099 set_pmd(pmd, __pmd(0));
1101 /* In case we did something silly, we should crash in this function
1102 * instead of somewhere later and be confusing. */
1107 * Make a page range writeable and free it.
1109 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1111 void *vaddr = __va(paddr);
1112 void *vaddr_end = vaddr + size;
1114 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1115 make_lowmem_page_readwrite(vaddr);
1117 memblock_free(paddr, size);
1120 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1122 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1125 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1126 ClearPagePinned(virt_to_page(__va(pa)));
1127 xen_free_ro_pages(pa, PAGE_SIZE);
1130 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1136 if (pmd_large(*pmd)) {
1137 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1138 xen_free_ro_pages(pa, PMD_SIZE);
1142 pte_tbl = pte_offset_kernel(pmd, 0);
1143 for (i = 0; i < PTRS_PER_PTE; i++) {
1144 if (pte_none(pte_tbl[i]))
1146 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1147 xen_free_ro_pages(pa, PAGE_SIZE);
1149 set_pmd(pmd, __pmd(0));
1150 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1153 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1159 if (pud_large(*pud)) {
1160 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1161 xen_free_ro_pages(pa, PUD_SIZE);
1165 pmd_tbl = pmd_offset(pud, 0);
1166 for (i = 0; i < PTRS_PER_PMD; i++) {
1167 if (pmd_none(pmd_tbl[i]))
1169 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1171 set_pud(pud, __pud(0));
1172 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1175 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1181 if (p4d_large(*p4d)) {
1182 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1183 xen_free_ro_pages(pa, P4D_SIZE);
1187 pud_tbl = pud_offset(p4d, 0);
1188 for (i = 0; i < PTRS_PER_PUD; i++) {
1189 if (pud_none(pud_tbl[i]))
1191 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1193 set_p4d(p4d, __p4d(0));
1194 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1198 * Since it is well isolated we can (and since it is perhaps large we should)
1199 * also free the page tables mapping the initial P->M table.
1201 static void __init xen_cleanmfnmap(unsigned long vaddr)
1208 unpin = (vaddr == 2 * PGDIR_SIZE);
1210 pgd = pgd_offset_k(vaddr);
1211 p4d = p4d_offset(pgd, 0);
1212 for (i = 0; i < PTRS_PER_P4D; i++) {
1213 if (p4d_none(p4d[i]))
1215 xen_cleanmfnmap_p4d(p4d + i, unpin);
1217 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
1218 set_pgd(pgd, __pgd(0));
1219 xen_cleanmfnmap_free_pgtbl(p4d, unpin);
1223 static void __init xen_pagetable_p2m_free(void)
1228 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1230 /* No memory or already called. */
1231 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1234 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1235 memset((void *)xen_start_info->mfn_list, 0xff, size);
1237 addr = xen_start_info->mfn_list;
1239 * We could be in __ka space.
1240 * We roundup to the PMD, which means that if anybody at this stage is
1241 * using the __ka address of xen_start_info or
1242 * xen_start_info->shared_info they are in going to crash. Fortunatly
1243 * we have already revectored in xen_setup_kernel_pagetable and in
1244 * xen_setup_shared_info.
1246 size = roundup(size, PMD_SIZE);
1248 if (addr >= __START_KERNEL_map) {
1249 xen_cleanhighmap(addr, addr + size);
1250 size = PAGE_ALIGN(xen_start_info->nr_pages *
1251 sizeof(unsigned long));
1252 memblock_free(__pa(addr), size);
1254 xen_cleanmfnmap(addr);
1258 static void __init xen_pagetable_cleanhighmap(void)
1263 /* At this stage, cleanup_highmap has already cleaned __ka space
1264 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1265 * the ramdisk). We continue on, erasing PMD entries that point to page
1266 * tables - do note that they are accessible at this stage via __va.
1267 * For good measure we also round up to the PMD - which means that if
1268 * anybody is using __ka address to the initial boot-stack - and try
1269 * to use it - they are going to crash. The xen_start_info has been
1270 * taken care of already in xen_setup_kernel_pagetable. */
1271 addr = xen_start_info->pt_base;
1272 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1274 xen_cleanhighmap(addr, addr + size);
1275 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1277 /* This is superfluous and is not necessary, but you know what
1278 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1279 * anything at this stage. */
1280 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1285 static void __init xen_pagetable_p2m_setup(void)
1287 xen_vmalloc_p2m_tree();
1289 #ifdef CONFIG_X86_64
1290 xen_pagetable_p2m_free();
1292 xen_pagetable_cleanhighmap();
1294 /* And revector! Bye bye old array */
1295 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1298 static void __init xen_pagetable_init(void)
1301 xen_post_allocator_init();
1303 xen_pagetable_p2m_setup();
1305 /* Allocate and initialize top and mid mfn levels for p2m structure */
1306 xen_build_mfn_list_list();
1308 /* Remap memory freed due to conflicts with E820 map */
1311 xen_setup_shared_info();
1313 static void xen_write_cr2(unsigned long cr2)
1315 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1318 static unsigned long xen_read_cr2(void)
1320 return this_cpu_read(xen_vcpu)->arch.cr2;
1323 unsigned long xen_read_cr2_direct(void)
1325 return this_cpu_read(xen_vcpu_info.arch.cr2);
1328 static void xen_flush_tlb(void)
1330 struct mmuext_op *op;
1331 struct multicall_space mcs;
1333 trace_xen_mmu_flush_tlb(0);
1337 mcs = xen_mc_entry(sizeof(*op));
1340 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1341 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1343 xen_mc_issue(PARAVIRT_LAZY_MMU);
1348 static void xen_flush_tlb_single(unsigned long addr)
1350 struct mmuext_op *op;
1351 struct multicall_space mcs;
1353 trace_xen_mmu_flush_tlb_single(addr);
1357 mcs = xen_mc_entry(sizeof(*op));
1359 op->cmd = MMUEXT_INVLPG_LOCAL;
1360 op->arg1.linear_addr = addr & PAGE_MASK;
1361 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1363 xen_mc_issue(PARAVIRT_LAZY_MMU);
1368 static void xen_flush_tlb_others(const struct cpumask *cpus,
1369 struct mm_struct *mm, unsigned long start,
1373 struct mmuext_op op;
1375 DECLARE_BITMAP(mask, num_processors);
1377 DECLARE_BITMAP(mask, NR_CPUS);
1380 struct multicall_space mcs;
1382 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1384 if (cpumask_empty(cpus))
1385 return; /* nothing to do */
1387 mcs = xen_mc_entry(sizeof(*args));
1389 args->op.arg2.vcpumask = to_cpumask(args->mask);
1391 /* Remove us, and any offline CPUS. */
1392 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1393 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1395 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1396 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1397 args->op.cmd = MMUEXT_INVLPG_MULTI;
1398 args->op.arg1.linear_addr = start;
1401 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1403 xen_mc_issue(PARAVIRT_LAZY_MMU);
1406 static unsigned long xen_read_cr3(void)
1408 return this_cpu_read(xen_cr3);
1411 static void set_current_cr3(void *v)
1413 this_cpu_write(xen_current_cr3, (unsigned long)v);
1416 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1418 struct mmuext_op op;
1421 trace_xen_mmu_write_cr3(kernel, cr3);
1424 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1428 WARN_ON(mfn == 0 && kernel);
1430 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1433 xen_extend_mmuext_op(&op);
1436 this_cpu_write(xen_cr3, cr3);
1438 /* Update xen_current_cr3 once the batch has actually
1440 xen_mc_callback(set_current_cr3, (void *)cr3);
1443 static void xen_write_cr3(unsigned long cr3)
1445 BUG_ON(preemptible());
1447 xen_mc_batch(); /* disables interrupts */
1449 /* Update while interrupts are disabled, so its atomic with
1451 this_cpu_write(xen_cr3, cr3);
1453 __xen_write_cr3(true, cr3);
1455 #ifdef CONFIG_X86_64
1457 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1459 __xen_write_cr3(false, __pa(user_pgd));
1461 __xen_write_cr3(false, 0);
1465 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1468 #ifdef CONFIG_X86_64
1470 * At the start of the day - when Xen launches a guest, it has already
1471 * built pagetables for the guest. We diligently look over them
1472 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1473 * init_level4_pgt and its friends. Then when we are happy we load
1474 * the new init_level4_pgt - and continue on.
1476 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1477 * up the rest of the pagetables. When it has completed it loads the cr3.
1478 * N.B. that baremetal would start at 'start_kernel' (and the early
1479 * #PF handler would create bootstrap pagetables) - so we are running
1480 * with the same assumptions as what to do when write_cr3 is executed
1483 * Since there are no user-page tables at all, we have two variants
1484 * of xen_write_cr3 - the early bootup (this one), and the late one
1485 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1486 * the Linux kernel and user-space are both in ring 3 while the
1487 * hypervisor is in ring 0.
1489 static void __init xen_write_cr3_init(unsigned long cr3)
1491 BUG_ON(preemptible());
1493 xen_mc_batch(); /* disables interrupts */
1495 /* Update while interrupts are disabled, so its atomic with
1497 this_cpu_write(xen_cr3, cr3);
1499 __xen_write_cr3(true, cr3);
1501 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1505 static int xen_pgd_alloc(struct mm_struct *mm)
1507 pgd_t *pgd = mm->pgd;
1510 BUG_ON(PagePinned(virt_to_page(pgd)));
1512 #ifdef CONFIG_X86_64
1514 struct page *page = virt_to_page(pgd);
1517 BUG_ON(page->private != 0);
1521 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1522 page->private = (unsigned long)user_pgd;
1524 if (user_pgd != NULL) {
1525 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1526 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1527 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1532 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1538 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1540 #ifdef CONFIG_X86_64
1541 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1544 free_page((unsigned long)user_pgd);
1549 * Init-time set_pte while constructing initial pagetables, which
1550 * doesn't allow RO page table pages to be remapped RW.
1552 * If there is no MFN for this PFN then this page is initially
1553 * ballooned out so clear the PTE (as in decrease_reservation() in
1554 * drivers/xen/balloon.c).
1556 * Many of these PTE updates are done on unpinned and writable pages
1557 * and doing a hypercall for these is unnecessary and expensive. At
1558 * this point it is not possible to tell if a page is pinned or not,
1559 * so always write the PTE directly and rely on Xen trapping and
1560 * emulating any updates as necessary.
1562 __visible pte_t xen_make_pte_init(pteval_t pte)
1564 #ifdef CONFIG_X86_64
1568 * Pages belonging to the initial p2m list mapped outside the default
1569 * address range must be mapped read-only. This region contains the
1570 * page tables for mapping the p2m list, too, and page tables MUST be
1573 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1574 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1575 pfn >= xen_start_info->first_p2m_pfn &&
1576 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1579 pte = pte_pfn_to_mfn(pte);
1580 return native_make_pte(pte);
1582 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1584 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1586 #ifdef CONFIG_X86_32
1587 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1588 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1589 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1590 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1593 native_set_pte(ptep, pte);
1596 /* Early in boot, while setting up the initial pagetable, assume
1597 everything is pinned. */
1598 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1600 #ifdef CONFIG_FLATMEM
1601 BUG_ON(mem_map); /* should only be used early */
1603 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1604 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1607 /* Used for pmd and pud */
1608 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1610 #ifdef CONFIG_FLATMEM
1611 BUG_ON(mem_map); /* should only be used early */
1613 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1616 /* Early release_pte assumes that all pts are pinned, since there's
1617 only init_mm and anything attached to that is pinned. */
1618 static void __init xen_release_pte_init(unsigned long pfn)
1620 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1621 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1624 static void __init xen_release_pmd_init(unsigned long pfn)
1626 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1629 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1631 struct multicall_space mcs;
1632 struct mmuext_op *op;
1634 mcs = __xen_mc_entry(sizeof(*op));
1637 op->arg1.mfn = pfn_to_mfn(pfn);
1639 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1642 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1644 struct multicall_space mcs;
1645 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1647 mcs = __xen_mc_entry(0);
1648 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1649 pfn_pte(pfn, prot), 0);
1652 /* This needs to make sure the new pte page is pinned iff its being
1653 attached to a pinned pagetable. */
1654 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1657 bool pinned = PagePinned(virt_to_page(mm->pgd));
1659 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1662 struct page *page = pfn_to_page(pfn);
1664 SetPagePinned(page);
1666 if (!PageHighMem(page)) {
1669 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1671 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1672 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1674 xen_mc_issue(PARAVIRT_LAZY_MMU);
1676 /* make sure there are no stray mappings of
1678 kmap_flush_unused();
1683 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1685 xen_alloc_ptpage(mm, pfn, PT_PTE);
1688 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1690 xen_alloc_ptpage(mm, pfn, PT_PMD);
1693 /* This should never happen until we're OK to use struct page */
1694 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1696 struct page *page = pfn_to_page(pfn);
1697 bool pinned = PagePinned(page);
1699 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1702 if (!PageHighMem(page)) {
1705 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1706 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1708 __set_pfn_prot(pfn, PAGE_KERNEL);
1710 xen_mc_issue(PARAVIRT_LAZY_MMU);
1712 ClearPagePinned(page);
1716 static void xen_release_pte(unsigned long pfn)
1718 xen_release_ptpage(pfn, PT_PTE);
1721 static void xen_release_pmd(unsigned long pfn)
1723 xen_release_ptpage(pfn, PT_PMD);
1726 #if CONFIG_PGTABLE_LEVELS >= 4
1727 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1729 xen_alloc_ptpage(mm, pfn, PT_PUD);
1732 static void xen_release_pud(unsigned long pfn)
1734 xen_release_ptpage(pfn, PT_PUD);
1738 void __init xen_reserve_top(void)
1740 #ifdef CONFIG_X86_32
1741 unsigned long top = HYPERVISOR_VIRT_START;
1742 struct xen_platform_parameters pp;
1744 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1745 top = pp.virt_start;
1747 reserve_top_address(-top);
1748 #endif /* CONFIG_X86_32 */
1752 * Like __va(), but returns address in the kernel mapping (which is
1753 * all we have until the physical memory mapping has been set up.
1755 static void * __init __ka(phys_addr_t paddr)
1757 #ifdef CONFIG_X86_64
1758 return (void *)(paddr + __START_KERNEL_map);
1764 /* Convert a machine address to physical address */
1765 static unsigned long __init m2p(phys_addr_t maddr)
1769 maddr &= PTE_PFN_MASK;
1770 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1775 /* Convert a machine address to kernel virtual */
1776 static void * __init m2v(phys_addr_t maddr)
1778 return __ka(m2p(maddr));
1781 /* Set the page permissions on an identity-mapped pages */
1782 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1783 unsigned long flags)
1785 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1786 pte_t pte = pfn_pte(pfn, prot);
1788 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1791 static void __init set_page_prot(void *addr, pgprot_t prot)
1793 return set_page_prot_flags(addr, prot, UVMF_NONE);
1795 #ifdef CONFIG_X86_32
1796 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1798 unsigned pmdidx, pteidx;
1802 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1807 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1810 /* Reuse or allocate a page of ptes */
1811 if (pmd_present(pmd[pmdidx]))
1812 pte_page = m2v(pmd[pmdidx].pmd);
1814 /* Check for free pte pages */
1815 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1818 pte_page = &level1_ident_pgt[ident_pte];
1819 ident_pte += PTRS_PER_PTE;
1821 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1824 /* Install mappings */
1825 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1828 if (pfn > max_pfn_mapped)
1829 max_pfn_mapped = pfn;
1831 if (!pte_none(pte_page[pteidx]))
1834 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1835 pte_page[pteidx] = pte;
1839 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1840 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1842 set_page_prot(pmd, PAGE_KERNEL_RO);
1845 void __init xen_setup_machphys_mapping(void)
1847 struct xen_machphys_mapping mapping;
1849 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1850 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1851 machine_to_phys_nr = mapping.max_mfn + 1;
1853 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1855 #ifdef CONFIG_X86_32
1856 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1857 < machine_to_phys_mapping);
1861 #ifdef CONFIG_X86_64
1862 static void __init convert_pfn_mfn(void *v)
1867 /* All levels are converted the same way, so just treat them
1869 for (i = 0; i < PTRS_PER_PTE; i++)
1870 pte[i] = xen_make_pte(pte[i].pte);
1872 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1875 if (*pt_base == PFN_DOWN(__pa(addr))) {
1876 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1877 clear_page((void *)addr);
1880 if (*pt_end == PFN_DOWN(__pa(addr))) {
1881 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1882 clear_page((void *)addr);
1887 * Set up the initial kernel pagetable.
1889 * We can construct this by grafting the Xen provided pagetable into
1890 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1891 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1892 * kernel has a physical mapping to start with - but that's enough to
1893 * get __va working. We need to fill in the rest of the physical
1894 * mapping once some sort of allocator has been set up.
1896 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1900 unsigned long addr[3];
1901 unsigned long pt_base, pt_end;
1904 /* max_pfn_mapped is the last pfn mapped in the initial memory
1905 * mappings. Considering that on Xen after the kernel mappings we
1906 * have the mappings of some pages that don't exist in pfn space, we
1907 * set max_pfn_mapped to the last real pfn mapped. */
1908 if (xen_start_info->mfn_list < __START_KERNEL_map)
1909 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1911 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1913 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1914 pt_end = pt_base + xen_start_info->nr_pt_frames;
1916 /* Zap identity mapping */
1917 init_level4_pgt[0] = __pgd(0);
1919 /* Pre-constructed entries are in pfn, so convert to mfn */
1920 /* L4[272] -> level3_ident_pgt */
1921 /* L4[511] -> level3_kernel_pgt */
1922 convert_pfn_mfn(init_level4_pgt);
1924 /* L3_i[0] -> level2_ident_pgt */
1925 convert_pfn_mfn(level3_ident_pgt);
1926 /* L3_k[510] -> level2_kernel_pgt */
1927 /* L3_k[511] -> level2_fixmap_pgt */
1928 convert_pfn_mfn(level3_kernel_pgt);
1930 /* L3_k[511][506] -> level1_fixmap_pgt */
1931 convert_pfn_mfn(level2_fixmap_pgt);
1933 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1934 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1935 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1937 addr[0] = (unsigned long)pgd;
1938 addr[1] = (unsigned long)l3;
1939 addr[2] = (unsigned long)l2;
1940 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1941 * Both L4[272][0] and L4[511][510] have entries that point to the same
1942 * L2 (PMD) tables. Meaning that if you modify it in __va space
1943 * it will be also modified in the __ka space! (But if you just
1944 * modify the PMD table to point to other PTE's or none, then you
1945 * are OK - which is what cleanup_highmap does) */
1946 copy_page(level2_ident_pgt, l2);
1947 /* Graft it onto L4[511][510] */
1948 copy_page(level2_kernel_pgt, l2);
1950 /* Copy the initial P->M table mappings if necessary. */
1951 i = pgd_index(xen_start_info->mfn_list);
1952 if (i && i < pgd_index(__START_KERNEL_map))
1953 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1955 /* Make pagetable pieces RO */
1956 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1957 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1958 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1959 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1960 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1961 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1962 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1963 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1965 /* Pin down new L4 */
1966 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1967 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1969 /* Unpin Xen-provided one */
1970 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1973 * At this stage there can be no user pgd, and no page structure to
1974 * attach it to, so make sure we just set kernel pgd.
1977 __xen_write_cr3(true, __pa(init_level4_pgt));
1978 xen_mc_issue(PARAVIRT_LAZY_CPU);
1980 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1981 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1982 * the initial domain. For guests using the toolstack, they are in:
1983 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1984 * rip out the [L4] (pgd), but for guests we shave off three pages.
1986 for (i = 0; i < ARRAY_SIZE(addr); i++)
1987 check_pt_base(&pt_base, &pt_end, addr[i]);
1989 /* Our (by three pages) smaller Xen pagetable that we are using */
1990 xen_pt_base = PFN_PHYS(pt_base);
1991 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1992 memblock_reserve(xen_pt_base, xen_pt_size);
1994 /* Revector the xen_start_info */
1995 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1999 * Read a value from a physical address.
2001 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2003 unsigned long *vaddr;
2006 vaddr = early_memremap_ro(addr, sizeof(val));
2008 early_memunmap(vaddr, sizeof(val));
2013 * Translate a virtual address to a physical one without relying on mapped
2014 * page tables. Don't rely on big pages being aligned in (guest) physical
2017 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2026 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2028 if (!pgd_present(pgd))
2031 pa = pgd_val(pgd) & PTE_PFN_MASK;
2032 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2034 if (!pud_present(pud))
2036 pa = pud_val(pud) & PTE_PFN_MASK;
2038 return pa + (vaddr & ~PUD_MASK);
2040 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2042 if (!pmd_present(pmd))
2044 pa = pmd_val(pmd) & PTE_PFN_MASK;
2046 return pa + (vaddr & ~PMD_MASK);
2048 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2050 if (!pte_present(pte))
2052 pa = pte_pfn(pte) << PAGE_SHIFT;
2054 return pa | (vaddr & ~PAGE_MASK);
2058 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2061 void __init xen_relocate_p2m(void)
2063 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
2064 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2065 int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
2071 unsigned long *new_p2m;
2074 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2075 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2076 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2077 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2078 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2079 if (PTRS_PER_P4D > 1)
2080 n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2083 n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
2085 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2087 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2092 * Setup the page tables for addressing the new p2m list.
2093 * We have asked the hypervisor to map the p2m list at the user address
2094 * PUD_SIZE. It may have done so, or it may have used a kernel space
2095 * address depending on the Xen version.
2096 * To avoid any possible virtual address collision, just use
2097 * 2 * PUD_SIZE for the new area.
2099 p4d_phys = new_area;
2100 pud_phys = p4d_phys + PFN_PHYS(n_p4d);
2101 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2102 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2103 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2105 pgd = __va(read_cr3());
2106 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2111 p4d = early_memremap(p4d_phys, PAGE_SIZE);
2113 n_pud = min(save_pud, PTRS_PER_P4D);
2115 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2116 pud = early_memremap(pud_phys, PAGE_SIZE);
2118 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2120 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2122 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2124 pt = early_memremap(pt_phys, PAGE_SIZE);
2127 idx_pte < min(n_pte, PTRS_PER_PTE);
2129 set_pte(pt + idx_pte,
2130 pfn_pte(p2m_pfn, PAGE_KERNEL));
2133 n_pte -= PTRS_PER_PTE;
2134 early_memunmap(pt, PAGE_SIZE);
2135 make_lowmem_page_readonly(__va(pt_phys));
2136 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2138 set_pmd(pmd + idx_pt,
2139 __pmd(_PAGE_TABLE | pt_phys));
2140 pt_phys += PAGE_SIZE;
2142 n_pt -= PTRS_PER_PMD;
2143 early_memunmap(pmd, PAGE_SIZE);
2144 make_lowmem_page_readonly(__va(pmd_phys));
2145 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2146 PFN_DOWN(pmd_phys));
2147 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2148 pmd_phys += PAGE_SIZE;
2150 n_pmd -= PTRS_PER_PUD;
2151 early_memunmap(pud, PAGE_SIZE);
2152 make_lowmem_page_readonly(__va(pud_phys));
2153 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2155 set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
2157 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2158 pud_phys += PAGE_SIZE;
2161 save_pud -= PTRS_PER_P4D;
2162 early_memunmap(p4d, PAGE_SIZE);
2163 make_lowmem_page_readonly(__va(p4d_phys));
2164 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
2165 set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
2166 p4d_phys += PAGE_SIZE;
2168 } while (++idx_p4d < n_p4d);
2170 /* Now copy the old p2m info to the new area. */
2171 memcpy(new_p2m, xen_p2m_addr, size);
2172 xen_p2m_addr = new_p2m;
2174 /* Release the old p2m list and set new list info. */
2175 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2177 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2179 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2180 pfn = xen_start_info->first_p2m_pfn;
2181 pfn_end = xen_start_info->first_p2m_pfn +
2182 xen_start_info->nr_p2m_frames;
2183 set_pgd(pgd + 1, __pgd(0));
2186 pfn_end = p2m_pfn_end;
2189 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2190 while (pfn < pfn_end) {
2191 if (pfn == p2m_pfn) {
2195 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2199 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2200 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2201 xen_start_info->nr_p2m_frames = n_frames;
2204 #else /* !CONFIG_X86_64 */
2205 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2206 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2208 static void __init xen_write_cr3_init(unsigned long cr3)
2210 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2212 BUG_ON(read_cr3() != __pa(initial_page_table));
2213 BUG_ON(cr3 != __pa(swapper_pg_dir));
2216 * We are switching to swapper_pg_dir for the first time (from
2217 * initial_page_table) and therefore need to mark that page
2218 * read-only and then pin it.
2220 * Xen disallows sharing of kernel PMDs for PAE
2221 * guests. Therefore we must copy the kernel PMD from
2222 * initial_page_table into a new kernel PMD to be used in
2225 swapper_kernel_pmd =
2226 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2227 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2228 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2229 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2230 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2232 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2234 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2236 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2237 PFN_DOWN(__pa(initial_page_table)));
2238 set_page_prot(initial_page_table, PAGE_KERNEL);
2239 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2241 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2245 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2246 * not the first page table in the page table pool.
2247 * Iterate through the initial page tables to find the real page table base.
2249 static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2251 phys_addr_t pt_base, paddr;
2254 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2256 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2257 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2258 paddr = m2p(pmd[pmdidx].pmd);
2259 pt_base = min(pt_base, paddr);
2265 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2269 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2271 xen_pt_base = xen_find_pt_base(kernel_pmd);
2272 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2274 initial_kernel_pmd =
2275 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2277 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2279 copy_page(initial_kernel_pmd, kernel_pmd);
2281 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2283 copy_page(initial_page_table, pgd);
2284 initial_page_table[KERNEL_PGD_BOUNDARY] =
2285 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2287 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2288 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2289 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2291 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2293 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2294 PFN_DOWN(__pa(initial_page_table)));
2295 xen_write_cr3(__pa(initial_page_table));
2297 memblock_reserve(xen_pt_base, xen_pt_size);
2299 #endif /* CONFIG_X86_64 */
2301 void __init xen_reserve_special_pages(void)
2305 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2306 if (xen_start_info->store_mfn) {
2307 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2308 memblock_reserve(paddr, PAGE_SIZE);
2310 if (!xen_initial_domain()) {
2311 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2312 memblock_reserve(paddr, PAGE_SIZE);
2316 void __init xen_pt_check_e820(void)
2318 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2319 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2324 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2326 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2330 phys >>= PAGE_SHIFT;
2333 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2335 #ifdef CONFIG_X86_32
2337 # ifdef CONFIG_HIGHMEM
2338 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2340 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2343 case FIX_TEXT_POKE0:
2344 case FIX_TEXT_POKE1:
2345 case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
2346 /* All local page mappings */
2347 pte = pfn_pte(phys, prot);
2350 #ifdef CONFIG_X86_LOCAL_APIC
2351 case FIX_APIC_BASE: /* maps dummy local APIC */
2352 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2356 #ifdef CONFIG_X86_IO_APIC
2357 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2359 * We just don't map the IO APIC - all access is via
2360 * hypercalls. Keep the address in the pte for reference.
2362 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2366 case FIX_PARAVIRT_BOOTMAP:
2367 /* This is an MFN, but it isn't an IO mapping from the
2369 pte = mfn_pte(phys, prot);
2373 /* By default, set_fixmap is used for hardware mappings */
2374 pte = mfn_pte(phys, prot);
2378 __native_set_fixmap(idx, pte);
2380 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2381 /* Replicate changes to map the vsyscall page into the user
2382 pagetable vsyscall mapping. */
2383 if (idx == VSYSCALL_PAGE) {
2384 unsigned long vaddr = __fix_to_virt(idx);
2385 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2390 static void __init xen_post_allocator_init(void)
2392 pv_mmu_ops.set_pte = xen_set_pte;
2393 pv_mmu_ops.set_pmd = xen_set_pmd;
2394 pv_mmu_ops.set_pud = xen_set_pud;
2395 #if CONFIG_PGTABLE_LEVELS >= 4
2396 pv_mmu_ops.set_p4d = xen_set_p4d;
2399 /* This will work as long as patching hasn't happened yet
2400 (which it hasn't) */
2401 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2402 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2403 pv_mmu_ops.release_pte = xen_release_pte;
2404 pv_mmu_ops.release_pmd = xen_release_pmd;
2405 #if CONFIG_PGTABLE_LEVELS >= 4
2406 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2407 pv_mmu_ops.release_pud = xen_release_pud;
2409 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2411 #ifdef CONFIG_X86_64
2412 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2413 SetPagePinned(virt_to_page(level3_user_vsyscall));
2415 xen_mark_init_mm_pinned();
2418 static void xen_leave_lazy_mmu(void)
2422 paravirt_leave_lazy_mmu();
2426 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2427 .read_cr2 = xen_read_cr2,
2428 .write_cr2 = xen_write_cr2,
2430 .read_cr3 = xen_read_cr3,
2431 .write_cr3 = xen_write_cr3_init,
2433 .flush_tlb_user = xen_flush_tlb,
2434 .flush_tlb_kernel = xen_flush_tlb,
2435 .flush_tlb_single = xen_flush_tlb_single,
2436 .flush_tlb_others = xen_flush_tlb_others,
2438 .pte_update = paravirt_nop,
2440 .pgd_alloc = xen_pgd_alloc,
2441 .pgd_free = xen_pgd_free,
2443 .alloc_pte = xen_alloc_pte_init,
2444 .release_pte = xen_release_pte_init,
2445 .alloc_pmd = xen_alloc_pmd_init,
2446 .release_pmd = xen_release_pmd_init,
2448 .set_pte = xen_set_pte_init,
2449 .set_pte_at = xen_set_pte_at,
2450 .set_pmd = xen_set_pmd_hyper,
2452 .ptep_modify_prot_start = __ptep_modify_prot_start,
2453 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2455 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2456 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2458 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2459 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2461 #ifdef CONFIG_X86_PAE
2462 .set_pte_atomic = xen_set_pte_atomic,
2463 .pte_clear = xen_pte_clear,
2464 .pmd_clear = xen_pmd_clear,
2465 #endif /* CONFIG_X86_PAE */
2466 .set_pud = xen_set_pud_hyper,
2468 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2469 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2471 #if CONFIG_PGTABLE_LEVELS >= 4
2472 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2473 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2474 .set_p4d = xen_set_p4d_hyper,
2476 .alloc_pud = xen_alloc_pmd_init,
2477 .release_pud = xen_release_pmd_init,
2478 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
2480 .activate_mm = xen_activate_mm,
2481 .dup_mmap = xen_dup_mmap,
2482 .exit_mmap = xen_exit_mmap,
2485 .enter = paravirt_enter_lazy_mmu,
2486 .leave = xen_leave_lazy_mmu,
2487 .flush = paravirt_flush_lazy_mmu,
2490 .set_fixmap = xen_set_fixmap,
2493 void __init xen_init_mmu_ops(void)
2495 x86_init.paging.pagetable_init = xen_pagetable_init;
2497 pv_mmu_ops = xen_mmu_ops;
2499 memset(dummy_mapping, 0xff, PAGE_SIZE);
2502 /* Protected by xen_reservation_lock. */
2503 #define MAX_CONTIG_ORDER 9 /* 2MB */
2504 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2506 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2507 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2508 unsigned long *in_frames,
2509 unsigned long *out_frames)
2512 struct multicall_space mcs;
2515 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2516 mcs = __xen_mc_entry(0);
2519 in_frames[i] = virt_to_mfn(vaddr);
2521 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2522 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2525 out_frames[i] = virt_to_pfn(vaddr);
2531 * Update the pfn-to-mfn mappings for a virtual address range, either to
2532 * point to an array of mfns, or contiguously from a single starting
2535 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2536 unsigned long *mfns,
2537 unsigned long first_mfn)
2544 limit = 1u << order;
2545 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2546 struct multicall_space mcs;
2549 mcs = __xen_mc_entry(0);
2553 mfn = first_mfn + i;
2555 if (i < (limit - 1))
2559 flags = UVMF_INVLPG | UVMF_ALL;
2561 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2564 MULTI_update_va_mapping(mcs.mc, vaddr,
2565 mfn_pte(mfn, PAGE_KERNEL), flags);
2567 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2574 * Perform the hypercall to exchange a region of our pfns to point to
2575 * memory with the required contiguous alignment. Takes the pfns as
2576 * input, and populates mfns as output.
2578 * Returns a success code indicating whether the hypervisor was able to
2579 * satisfy the request or not.
2581 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2582 unsigned long *pfns_in,
2583 unsigned long extents_out,
2584 unsigned int order_out,
2585 unsigned long *mfns_out,
2586 unsigned int address_bits)
2591 struct xen_memory_exchange exchange = {
2593 .nr_extents = extents_in,
2594 .extent_order = order_in,
2595 .extent_start = pfns_in,
2599 .nr_extents = extents_out,
2600 .extent_order = order_out,
2601 .extent_start = mfns_out,
2602 .address_bits = address_bits,
2607 BUG_ON(extents_in << order_in != extents_out << order_out);
2609 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2610 success = (exchange.nr_exchanged == extents_in);
2612 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2613 BUG_ON(success && (rc != 0));
2618 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2619 unsigned int address_bits,
2620 dma_addr_t *dma_handle)
2622 unsigned long *in_frames = discontig_frames, out_frame;
2623 unsigned long flags;
2625 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2628 * Currently an auto-translated guest will not perform I/O, nor will
2629 * it require PAE page directories below 4GB. Therefore any calls to
2630 * this function are redundant and can be ignored.
2633 if (unlikely(order > MAX_CONTIG_ORDER))
2636 memset((void *) vstart, 0, PAGE_SIZE << order);
2638 spin_lock_irqsave(&xen_reservation_lock, flags);
2640 /* 1. Zap current PTEs, remembering MFNs. */
2641 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2643 /* 2. Get a new contiguous memory extent. */
2644 out_frame = virt_to_pfn(vstart);
2645 success = xen_exchange_memory(1UL << order, 0, in_frames,
2646 1, order, &out_frame,
2649 /* 3. Map the new extent in place of old pages. */
2651 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2653 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2655 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2657 *dma_handle = virt_to_machine(vstart).maddr;
2658 return success ? 0 : -ENOMEM;
2660 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2662 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2664 unsigned long *out_frames = discontig_frames, in_frame;
2665 unsigned long flags;
2667 unsigned long vstart;
2669 if (unlikely(order > MAX_CONTIG_ORDER))
2672 vstart = (unsigned long)phys_to_virt(pstart);
2673 memset((void *) vstart, 0, PAGE_SIZE << order);
2675 spin_lock_irqsave(&xen_reservation_lock, flags);
2677 /* 1. Find start MFN of contiguous extent. */
2678 in_frame = virt_to_mfn(vstart);
2680 /* 2. Zap current PTEs. */
2681 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2683 /* 3. Do the exchange for non-contiguous MFNs. */
2684 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2687 /* 4. Map new pages in place of old pages. */
2689 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2691 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2693 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2695 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2697 #ifdef CONFIG_KEXEC_CORE
2698 phys_addr_t paddr_vmcoreinfo_note(void)
2700 if (xen_pv_domain())
2701 return virt_to_machine(&vmcoreinfo_note).maddr;
2703 return __pa_symbol(&vmcoreinfo_note);
2705 #endif /* CONFIG_KEXEC_CORE */