4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
49 #include <asm/pgtable.h>
50 #include <asm/tlbflush.h>
51 #include <asm/fixmap.h>
52 #include <asm/mmu_context.h>
53 #include <asm/setup.h>
54 #include <asm/paravirt.h>
56 #include <asm/linkage.h>
59 #include <asm/xen/hypercall.h>
60 #include <asm/xen/hypervisor.h>
64 #include <xen/interface/xen.h>
65 #include <xen/interface/hvm/hvm_op.h>
66 #include <xen/interface/version.h>
67 #include <xen/interface/memory.h>
68 #include <xen/hvc-console.h>
70 #include "multicalls.h"
74 #define MMU_UPDATE_HISTO 30
77 * Protects atomic reservation decrease/increase against concurrent increases.
78 * Also protects non-atomic updates of current_pages and driver_pages, and
81 DEFINE_SPINLOCK(xen_reservation_lock);
83 #ifdef CONFIG_XEN_DEBUG_FS
87 u32 pgd_update_pinned;
88 u32 pgd_update_batched;
91 u32 pud_update_pinned;
92 u32 pud_update_batched;
95 u32 pmd_update_pinned;
96 u32 pmd_update_batched;
99 u32 pte_update_pinned;
100 u32 pte_update_batched;
103 u32 mmu_update_extended;
104 u32 mmu_update_histo[MMU_UPDATE_HISTO];
107 u32 prot_commit_batched;
110 u32 set_pte_at_batched;
111 u32 set_pte_at_pinned;
112 u32 set_pte_at_current;
113 u32 set_pte_at_kernel;
116 static u8 zero_stats;
118 static inline void check_zero(void)
120 if (unlikely(zero_stats)) {
121 memset(&mmu_stats, 0, sizeof(mmu_stats));
126 #define ADD_STATS(elem, val) \
127 do { check_zero(); mmu_stats.elem += (val); } while(0)
129 #else /* !CONFIG_XEN_DEBUG_FS */
131 #define ADD_STATS(elem, val) do { (void)(val); } while(0)
133 #endif /* CONFIG_XEN_DEBUG_FS */
137 * Identity map, in addition to plain kernel map. This needs to be
138 * large enough to allocate page table pages to allocate the rest.
139 * Each page can map 2MB.
141 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
142 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
145 /* l3 pud for userspace vsyscall mapping */
146 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
147 #endif /* CONFIG_X86_64 */
150 * Note about cr3 (pagetable base) values:
152 * xen_cr3 contains the current logical cr3 value; it contains the
153 * last set cr3. This may not be the current effective cr3, because
154 * its update may be being lazily deferred. However, a vcpu looking
155 * at its own cr3 can use this value knowing that it everything will
156 * be self-consistent.
158 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
159 * hypercall to set the vcpu cr3 is complete (so it may be a little
160 * out of date, but it will never be set early). If one vcpu is
161 * looking at another vcpu's cr3 value, it should use this variable.
163 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
164 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
168 * Just beyond the highest usermode address. STACK_TOP_MAX has a
169 * redzone above it, so round it up to a PGD boundary.
171 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
174 * Xen leaves the responsibility for maintaining p2m mappings to the
175 * guests themselves, but it must also access and update the p2m array
176 * during suspend/resume when all the pages are reallocated.
178 * The p2m table is logically a flat array, but we implement it as a
179 * three-level tree to allow the address space to be sparse.
183 * p2m_top p2m_top_mfn
185 * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
187 * p2m p2m p2m p2m p2m p2m p2m ...
189 * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
190 * maximum representable pseudo-physical address space is:
191 * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
193 * P2M_PER_PAGE depends on the architecture, as a mfn is always
194 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
195 * 512 and 1024 entries respectively.
198 unsigned long xen_max_p2m_pfn __read_mostly;
200 #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
201 #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
202 #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
204 #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
206 /* Placeholders for holes in the address space */
207 static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
208 static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
209 static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
211 static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
212 static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
214 RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
215 RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
217 static inline unsigned p2m_top_index(unsigned long pfn)
219 BUG_ON(pfn >= MAX_P2M_PFN);
220 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
223 static inline unsigned p2m_mid_index(unsigned long pfn)
225 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
228 static inline unsigned p2m_index(unsigned long pfn)
230 return pfn % P2M_PER_PAGE;
233 static void p2m_top_init(unsigned long ***top)
237 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
238 top[i] = p2m_mid_missing;
241 static void p2m_top_mfn_init(unsigned long *top)
245 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
246 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
249 static void p2m_mid_init(unsigned long **mid)
253 for (i = 0; i < P2M_MID_PER_PAGE; i++)
254 mid[i] = p2m_missing;
257 static void p2m_mid_mfn_init(unsigned long *mid)
261 for (i = 0; i < P2M_MID_PER_PAGE; i++)
262 mid[i] = virt_to_mfn(p2m_missing);
265 static void p2m_init(unsigned long *p2m)
269 for (i = 0; i < P2M_MID_PER_PAGE; i++)
270 p2m[i] = INVALID_P2M_ENTRY;
274 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
276 * This is called both at boot time, and after resuming from suspend:
277 * - At boot time we're called very early, and must use extend_brk()
278 * to allocate memory.
280 * - After resume we're called from within stop_machine, but the mfn
281 * tree should alreay be completely allocated.
283 void xen_build_mfn_list_list(void)
287 /* Pre-initialize p2m_top_mfn to be completely missing */
288 if (p2m_top_mfn == NULL) {
289 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
290 p2m_mid_mfn_init(p2m_mid_missing_mfn);
292 p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
293 p2m_top_mfn_init(p2m_top_mfn);
296 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
297 unsigned topidx = p2m_top_index(pfn);
298 unsigned mididx = p2m_mid_index(pfn);
300 unsigned long mid_mfn;
301 unsigned long *mid_mfn_p;
303 mid = p2m_top[topidx];
305 /* Don't bother allocating any mfn mid levels if
306 they're just missing */
307 if (mid[mididx] == p2m_missing)
310 mid_mfn = p2m_top_mfn[topidx];
311 mid_mfn_p = mfn_to_virt(mid_mfn);
313 if (mid_mfn_p == p2m_mid_missing_mfn) {
315 * XXX boot-time only! We should never find
316 * missing parts of the mfn tree after
317 * runtime. extend_brk() will BUG if we call
320 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
321 p2m_mid_mfn_init(mid_mfn_p);
323 mid_mfn = virt_to_mfn(mid_mfn_p);
325 p2m_top_mfn[topidx] = mid_mfn;
328 mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
332 void xen_setup_mfn_list_list(void)
334 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
336 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
337 virt_to_mfn(p2m_top_mfn);
338 HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
341 /* Set up p2m_top to point to the domain-builder provided p2m pages */
342 void __init xen_build_dynamic_phys_to_machine(void)
344 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
345 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
348 xen_max_p2m_pfn = max_pfn;
350 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
351 p2m_init(p2m_missing);
353 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
354 p2m_mid_init(p2m_mid_missing);
356 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
357 p2m_top_init(p2m_top);
360 * The domain builder gives us a pre-constructed p2m array in
361 * mfn_list for all the pages initially given to us, so we just
362 * need to graft that into our tree structure.
364 for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
365 unsigned topidx = p2m_top_index(pfn);
366 unsigned mididx = p2m_mid_index(pfn);
368 if (p2m_top[topidx] == p2m_mid_missing) {
369 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
372 p2m_top[topidx] = mid;
375 p2m_top[topidx][mididx] = &mfn_list[pfn];
379 unsigned long get_phys_to_machine(unsigned long pfn)
381 unsigned topidx, mididx, idx;
383 if (unlikely(pfn >= MAX_P2M_PFN))
384 return INVALID_P2M_ENTRY;
386 topidx = p2m_top_index(pfn);
387 mididx = p2m_mid_index(pfn);
388 idx = p2m_index(pfn);
390 return p2m_top[topidx][mididx][idx];
392 EXPORT_SYMBOL_GPL(get_phys_to_machine);
394 static void *alloc_p2m_page(void)
396 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
399 static void free_p2m_page(void *p)
401 free_page((unsigned long)p);
405 * Fully allocate the p2m structure for a given pfn. We need to check
406 * that both the top and mid levels are allocated, and make sure the
407 * parallel mfn tree is kept in sync. We may race with other cpus, so
408 * the new pages are installed with cmpxchg; if we lose the race then
409 * simply free the page we allocated and use the one that's there.
411 static bool alloc_p2m(unsigned long pfn)
413 unsigned topidx, mididx;
414 unsigned long ***top_p, **mid;
415 unsigned long *top_mfn_p, *mid_mfn;
417 topidx = p2m_top_index(pfn);
418 mididx = p2m_mid_index(pfn);
420 top_p = &p2m_top[topidx];
423 if (mid == p2m_mid_missing) {
424 /* Mid level is missing, allocate a new one */
425 mid = alloc_p2m_page();
431 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
435 top_mfn_p = &p2m_top_mfn[topidx];
436 mid_mfn = mfn_to_virt(*top_mfn_p);
438 if (mid_mfn == p2m_mid_missing_mfn) {
439 /* Separately check the mid mfn level */
440 unsigned long missing_mfn;
441 unsigned long mid_mfn_mfn;
443 mid_mfn = alloc_p2m_page();
447 p2m_mid_mfn_init(mid_mfn);
449 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
450 mid_mfn_mfn = virt_to_mfn(mid_mfn);
451 if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
452 free_p2m_page(mid_mfn);
455 if (p2m_top[topidx][mididx] == p2m_missing) {
456 /* p2m leaf page is missing */
459 p2m = alloc_p2m_page();
465 if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
468 mid_mfn[mididx] = virt_to_mfn(p2m);
474 /* Try to install p2m mapping; fail if intermediate bits missing */
475 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
477 unsigned topidx, mididx, idx;
479 if (unlikely(pfn >= MAX_P2M_PFN)) {
480 BUG_ON(mfn != INVALID_P2M_ENTRY);
484 topidx = p2m_top_index(pfn);
485 mididx = p2m_mid_index(pfn);
486 idx = p2m_index(pfn);
488 if (p2m_top[topidx][mididx] == p2m_missing)
489 return mfn == INVALID_P2M_ENTRY;
491 p2m_top[topidx][mididx][idx] = mfn;
496 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
498 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
499 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
503 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
507 if (!__set_phys_to_machine(pfn, mfn))
514 unsigned long arbitrary_virt_to_mfn(void *vaddr)
516 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
518 return PFN_DOWN(maddr.maddr);
521 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
523 unsigned long address = (unsigned long)vaddr;
529 * if the PFN is in the linear mapped vaddr range, we can just use
530 * the (quick) virt_to_machine() p2m lookup
532 if (virt_addr_valid(vaddr))
533 return virt_to_machine(vaddr);
535 /* otherwise we have to do a (slower) full page-table walk */
537 pte = lookup_address(address, &level);
539 offset = address & ~PAGE_MASK;
540 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
543 void make_lowmem_page_readonly(void *vaddr)
546 unsigned long address = (unsigned long)vaddr;
549 pte = lookup_address(address, &level);
552 ptev = pte_wrprotect(*pte);
554 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
558 void make_lowmem_page_readwrite(void *vaddr)
561 unsigned long address = (unsigned long)vaddr;
564 pte = lookup_address(address, &level);
567 ptev = pte_mkwrite(*pte);
569 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
574 static bool xen_page_pinned(void *ptr)
576 struct page *page = virt_to_page(ptr);
578 return PagePinned(page);
581 static bool xen_iomap_pte(pte_t pte)
583 return pte_flags(pte) & _PAGE_IOMAP;
586 static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
588 struct multicall_space mcs;
589 struct mmu_update *u;
591 mcs = xen_mc_entry(sizeof(*u));
594 /* ptep might be kmapped when using 32-bit HIGHPTE */
595 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
596 u->val = pte_val_ma(pteval);
598 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
600 xen_mc_issue(PARAVIRT_LAZY_MMU);
603 static void xen_extend_mmu_update(const struct mmu_update *update)
605 struct multicall_space mcs;
606 struct mmu_update *u;
608 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
610 if (mcs.mc != NULL) {
611 ADD_STATS(mmu_update_extended, 1);
612 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
616 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
617 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
619 ADD_STATS(mmu_update_histo[0], 1);
621 ADD_STATS(mmu_update, 1);
622 mcs = __xen_mc_entry(sizeof(*u));
623 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
624 ADD_STATS(mmu_update_histo[1], 1);
631 void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
639 /* ptr may be ioremapped for 64-bit pagetable setup */
640 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
641 u.val = pmd_val_ma(val);
642 xen_extend_mmu_update(&u);
644 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
646 xen_mc_issue(PARAVIRT_LAZY_MMU);
651 void xen_set_pmd(pmd_t *ptr, pmd_t val)
653 ADD_STATS(pmd_update, 1);
655 /* If page is not pinned, we can just update the entry
657 if (!xen_page_pinned(ptr)) {
662 ADD_STATS(pmd_update_pinned, 1);
664 xen_set_pmd_hyper(ptr, val);
668 * Associate a virtual page frame with a given physical page frame
669 * and protection flags for that frame.
671 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
673 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
676 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
677 pte_t *ptep, pte_t pteval)
679 if (xen_iomap_pte(pteval)) {
680 xen_set_iomap_pte(ptep, pteval);
684 ADD_STATS(set_pte_at, 1);
685 // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
686 ADD_STATS(set_pte_at_current, mm == current->mm);
687 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
689 if (mm == current->mm || mm == &init_mm) {
690 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
691 struct multicall_space mcs;
692 mcs = xen_mc_entry(0);
694 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
695 ADD_STATS(set_pte_at_batched, 1);
696 xen_mc_issue(PARAVIRT_LAZY_MMU);
699 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
702 xen_set_pte(ptep, pteval);
707 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
708 unsigned long addr, pte_t *ptep)
710 /* Just return the pte as-is. We preserve the bits on commit */
714 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
715 pte_t *ptep, pte_t pte)
721 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
722 u.val = pte_val_ma(pte);
723 xen_extend_mmu_update(&u);
725 ADD_STATS(prot_commit, 1);
726 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
728 xen_mc_issue(PARAVIRT_LAZY_MMU);
731 /* Assume pteval_t is equivalent to all the other *val_t types. */
732 static pteval_t pte_mfn_to_pfn(pteval_t val)
734 if (val & _PAGE_PRESENT) {
735 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
736 pteval_t flags = val & PTE_FLAGS_MASK;
737 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
743 static pteval_t pte_pfn_to_mfn(pteval_t val)
745 if (val & _PAGE_PRESENT) {
746 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
747 pteval_t flags = val & PTE_FLAGS_MASK;
748 unsigned long mfn = pfn_to_mfn(pfn);
751 * If there's no mfn for the pfn, then just create an
752 * empty non-present pte. Unfortunately this loses
753 * information about the original pfn, so
754 * pte_mfn_to_pfn is asymmetric.
756 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
761 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
767 static pteval_t iomap_pte(pteval_t val)
769 if (val & _PAGE_PRESENT) {
770 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
771 pteval_t flags = val & PTE_FLAGS_MASK;
773 /* We assume the pte frame number is a MFN, so
774 just use it as-is. */
775 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
781 pteval_t xen_pte_val(pte_t pte)
783 if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
786 return pte_mfn_to_pfn(pte.pte);
788 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
790 pgdval_t xen_pgd_val(pgd_t pgd)
792 return pte_mfn_to_pfn(pgd.pgd);
794 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
796 pte_t xen_make_pte(pteval_t pte)
798 phys_addr_t addr = (pte & PTE_PFN_MASK);
801 * Unprivileged domains are allowed to do IOMAPpings for
802 * PCI passthrough, but not map ISA space. The ISA
803 * mappings are just dummy local mappings to keep other
804 * parts of the kernel happy.
806 if (unlikely(pte & _PAGE_IOMAP) &&
807 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
808 pte = iomap_pte(pte);
811 pte = pte_pfn_to_mfn(pte);
814 return native_make_pte(pte);
816 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
818 pgd_t xen_make_pgd(pgdval_t pgd)
820 pgd = pte_pfn_to_mfn(pgd);
821 return native_make_pgd(pgd);
823 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
825 pmdval_t xen_pmd_val(pmd_t pmd)
827 return pte_mfn_to_pfn(pmd.pmd);
829 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
831 void xen_set_pud_hyper(pud_t *ptr, pud_t val)
839 /* ptr may be ioremapped for 64-bit pagetable setup */
840 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
841 u.val = pud_val_ma(val);
842 xen_extend_mmu_update(&u);
844 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
846 xen_mc_issue(PARAVIRT_LAZY_MMU);
851 void xen_set_pud(pud_t *ptr, pud_t val)
853 ADD_STATS(pud_update, 1);
855 /* If page is not pinned, we can just update the entry
857 if (!xen_page_pinned(ptr)) {
862 ADD_STATS(pud_update_pinned, 1);
864 xen_set_pud_hyper(ptr, val);
867 void xen_set_pte(pte_t *ptep, pte_t pte)
869 if (xen_iomap_pte(pte)) {
870 xen_set_iomap_pte(ptep, pte);
874 ADD_STATS(pte_update, 1);
875 // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
876 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
878 #ifdef CONFIG_X86_PAE
879 ptep->pte_high = pte.pte_high;
881 ptep->pte_low = pte.pte_low;
887 #ifdef CONFIG_X86_PAE
888 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
890 if (xen_iomap_pte(pte)) {
891 xen_set_iomap_pte(ptep, pte);
895 set_64bit((u64 *)ptep, native_pte_val(pte));
898 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
901 smp_wmb(); /* make sure low gets written first */
905 void xen_pmd_clear(pmd_t *pmdp)
907 set_pmd(pmdp, __pmd(0));
909 #endif /* CONFIG_X86_PAE */
911 pmd_t xen_make_pmd(pmdval_t pmd)
913 pmd = pte_pfn_to_mfn(pmd);
914 return native_make_pmd(pmd);
916 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
918 #if PAGETABLE_LEVELS == 4
919 pudval_t xen_pud_val(pud_t pud)
921 return pte_mfn_to_pfn(pud.pud);
923 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
925 pud_t xen_make_pud(pudval_t pud)
927 pud = pte_pfn_to_mfn(pud);
929 return native_make_pud(pud);
931 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
933 pgd_t *xen_get_user_pgd(pgd_t *pgd)
935 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
936 unsigned offset = pgd - pgd_page;
937 pgd_t *user_ptr = NULL;
939 if (offset < pgd_index(USER_LIMIT)) {
940 struct page *page = virt_to_page(pgd_page);
941 user_ptr = (pgd_t *)page->private;
949 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
953 u.ptr = virt_to_machine(ptr).maddr;
954 u.val = pgd_val_ma(val);
955 xen_extend_mmu_update(&u);
959 * Raw hypercall-based set_pgd, intended for in early boot before
960 * there's a page structure. This implies:
961 * 1. The only existing pagetable is the kernel's
962 * 2. It is always pinned
963 * 3. It has no user pagetable attached to it
965 void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
971 __xen_set_pgd_hyper(ptr, val);
973 xen_mc_issue(PARAVIRT_LAZY_MMU);
978 void xen_set_pgd(pgd_t *ptr, pgd_t val)
980 pgd_t *user_ptr = xen_get_user_pgd(ptr);
982 ADD_STATS(pgd_update, 1);
984 /* If page is not pinned, we can just update the entry
986 if (!xen_page_pinned(ptr)) {
989 WARN_ON(xen_page_pinned(user_ptr));
995 ADD_STATS(pgd_update_pinned, 1);
996 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
998 /* If it's pinned, then we can at least batch the kernel and
999 user updates together. */
1002 __xen_set_pgd_hyper(ptr, val);
1004 __xen_set_pgd_hyper(user_ptr, val);
1006 xen_mc_issue(PARAVIRT_LAZY_MMU);
1008 #endif /* PAGETABLE_LEVELS == 4 */
1011 * (Yet another) pagetable walker. This one is intended for pinning a
1012 * pagetable. This means that it walks a pagetable and calls the
1013 * callback function on each page it finds making up the page table,
1014 * at every level. It walks the entire pagetable, but it only bothers
1015 * pinning pte pages which are below limit. In the normal case this
1016 * will be STACK_TOP_MAX, but at boot we need to pin up to
1019 * For 32-bit the important bit is that we don't pin beyond there,
1020 * because then we start getting into Xen's ptes.
1022 * For 64-bit, we must skip the Xen hole in the middle of the address
1023 * space, just after the big x86-64 virtual hole.
1025 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
1026 int (*func)(struct mm_struct *mm, struct page *,
1028 unsigned long limit)
1031 unsigned hole_low, hole_high;
1032 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
1033 unsigned pgdidx, pudidx, pmdidx;
1035 /* The limit is the last byte to be touched */
1037 BUG_ON(limit >= FIXADDR_TOP);
1039 if (xen_feature(XENFEAT_auto_translated_physmap))
1043 * 64-bit has a great big hole in the middle of the address
1044 * space, which contains the Xen mappings. On 32-bit these
1045 * will end up making a zero-sized hole and so is a no-op.
1047 hole_low = pgd_index(USER_LIMIT);
1048 hole_high = pgd_index(PAGE_OFFSET);
1050 pgdidx_limit = pgd_index(limit);
1051 #if PTRS_PER_PUD > 1
1052 pudidx_limit = pud_index(limit);
1056 #if PTRS_PER_PMD > 1
1057 pmdidx_limit = pmd_index(limit);
1062 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
1065 if (pgdidx >= hole_low && pgdidx < hole_high)
1068 if (!pgd_val(pgd[pgdidx]))
1071 pud = pud_offset(&pgd[pgdidx], 0);
1073 if (PTRS_PER_PUD > 1) /* not folded */
1074 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
1076 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
1079 if (pgdidx == pgdidx_limit &&
1080 pudidx > pudidx_limit)
1083 if (pud_none(pud[pudidx]))
1086 pmd = pmd_offset(&pud[pudidx], 0);
1088 if (PTRS_PER_PMD > 1) /* not folded */
1089 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
1091 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
1094 if (pgdidx == pgdidx_limit &&
1095 pudidx == pudidx_limit &&
1096 pmdidx > pmdidx_limit)
1099 if (pmd_none(pmd[pmdidx]))
1102 pte = pmd_page(pmd[pmdidx]);
1103 flush |= (*func)(mm, pte, PT_PTE);
1109 /* Do the top level last, so that the callbacks can use it as
1110 a cue to do final things like tlb flushes. */
1111 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
1116 static int xen_pgd_walk(struct mm_struct *mm,
1117 int (*func)(struct mm_struct *mm, struct page *,
1119 unsigned long limit)
1121 return __xen_pgd_walk(mm, mm->pgd, func, limit);
1124 /* If we're using split pte locks, then take the page's lock and
1125 return a pointer to it. Otherwise return NULL. */
1126 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
1128 spinlock_t *ptl = NULL;
1130 #if USE_SPLIT_PTLOCKS
1131 ptl = __pte_lockptr(page);
1132 spin_lock_nest_lock(ptl, &mm->page_table_lock);
1138 static void xen_pte_unlock(void *v)
1140 spinlock_t *ptl = v;
1144 static void xen_do_pin(unsigned level, unsigned long pfn)
1146 struct mmuext_op *op;
1147 struct multicall_space mcs;
1149 mcs = __xen_mc_entry(sizeof(*op));
1152 op->arg1.mfn = pfn_to_mfn(pfn);
1153 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1156 static int xen_pin_page(struct mm_struct *mm, struct page *page,
1157 enum pt_level level)
1159 unsigned pgfl = TestSetPagePinned(page);
1163 flush = 0; /* already pinned */
1164 else if (PageHighMem(page))
1165 /* kmaps need flushing if we found an unpinned
1169 void *pt = lowmem_page_address(page);
1170 unsigned long pfn = page_to_pfn(page);
1171 struct multicall_space mcs = __xen_mc_entry(0);
1177 * We need to hold the pagetable lock between the time
1178 * we make the pagetable RO and when we actually pin
1179 * it. If we don't, then other users may come in and
1180 * attempt to update the pagetable by writing it,
1181 * which will fail because the memory is RO but not
1182 * pinned, so Xen won't do the trap'n'emulate.
1184 * If we're using split pte locks, we can't hold the
1185 * entire pagetable's worth of locks during the
1186 * traverse, because we may wrap the preempt count (8
1187 * bits). The solution is to mark RO and pin each PTE
1188 * page while holding the lock. This means the number
1189 * of locks we end up holding is never more than a
1190 * batch size (~32 entries, at present).
1192 * If we're not using split pte locks, we needn't pin
1193 * the PTE pages independently, because we're
1194 * protected by the overall pagetable lock.
1197 if (level == PT_PTE)
1198 ptl = xen_pte_lock(page, mm);
1200 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1201 pfn_pte(pfn, PAGE_KERNEL_RO),
1202 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1205 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
1207 /* Queue a deferred unlock for when this batch
1209 xen_mc_callback(xen_pte_unlock, ptl);
1216 /* This is called just after a mm has been created, but it has not
1217 been used yet. We need to make sure that its pagetable is all
1218 read-only, and can be pinned. */
1219 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
1223 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
1224 /* re-enable interrupts for flushing */
1227 kmap_flush_unused();
1232 #ifdef CONFIG_X86_64
1234 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1236 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1239 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
1240 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1241 PFN_DOWN(__pa(user_pgd)));
1244 #else /* CONFIG_X86_32 */
1245 #ifdef CONFIG_X86_PAE
1246 /* Need to make sure unshared kernel PMD is pinnable */
1247 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1250 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
1251 #endif /* CONFIG_X86_64 */
1255 static void xen_pgd_pin(struct mm_struct *mm)
1257 __xen_pgd_pin(mm, mm->pgd);
1261 * On save, we need to pin all pagetables to make sure they get their
1262 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1263 * them (unpinned pgds are not currently in use, probably because the
1264 * process is under construction or destruction).
1266 * Expected to be called in stop_machine() ("equivalent to taking
1267 * every spinlock in the system"), so the locking doesn't really
1268 * matter all that much.
1270 void xen_mm_pin_all(void)
1272 unsigned long flags;
1275 spin_lock_irqsave(&pgd_lock, flags);
1277 list_for_each_entry(page, &pgd_list, lru) {
1278 if (!PagePinned(page)) {
1279 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
1280 SetPageSavePinned(page);
1284 spin_unlock_irqrestore(&pgd_lock, flags);
1288 * The init_mm pagetable is really pinned as soon as its created, but
1289 * that's before we have page structures to store the bits. So do all
1290 * the book-keeping now.
1292 static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1293 enum pt_level level)
1295 SetPagePinned(page);
1299 static void __init xen_mark_init_mm_pinned(void)
1301 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
1304 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1305 enum pt_level level)
1307 unsigned pgfl = TestClearPagePinned(page);
1309 if (pgfl && !PageHighMem(page)) {
1310 void *pt = lowmem_page_address(page);
1311 unsigned long pfn = page_to_pfn(page);
1312 spinlock_t *ptl = NULL;
1313 struct multicall_space mcs;
1316 * Do the converse to pin_page. If we're using split
1317 * pte locks, we must be holding the lock for while
1318 * the pte page is unpinned but still RO to prevent
1319 * concurrent updates from seeing it in this
1320 * partially-pinned state.
1322 if (level == PT_PTE) {
1323 ptl = xen_pte_lock(page, mm);
1326 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
1329 mcs = __xen_mc_entry(0);
1331 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1332 pfn_pte(pfn, PAGE_KERNEL),
1333 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1336 /* unlock when batch completed */
1337 xen_mc_callback(xen_pte_unlock, ptl);
1341 return 0; /* never need to flush on unpin */
1344 /* Release a pagetables pages back as normal RW */
1345 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1349 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1351 #ifdef CONFIG_X86_64
1353 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1356 xen_do_pin(MMUEXT_UNPIN_TABLE,
1357 PFN_DOWN(__pa(user_pgd)));
1358 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1363 #ifdef CONFIG_X86_PAE
1364 /* Need to make sure unshared kernel PMD is unpinned */
1365 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1369 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1374 static void xen_pgd_unpin(struct mm_struct *mm)
1376 __xen_pgd_unpin(mm, mm->pgd);
1380 * On resume, undo any pinning done at save, so that the rest of the
1381 * kernel doesn't see any unexpected pinned pagetables.
1383 void xen_mm_unpin_all(void)
1385 unsigned long flags;
1388 spin_lock_irqsave(&pgd_lock, flags);
1390 list_for_each_entry(page, &pgd_list, lru) {
1391 if (PageSavePinned(page)) {
1392 BUG_ON(!PagePinned(page));
1393 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1394 ClearPageSavePinned(page);
1398 spin_unlock_irqrestore(&pgd_lock, flags);
1401 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1403 spin_lock(&next->page_table_lock);
1405 spin_unlock(&next->page_table_lock);
1408 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1410 spin_lock(&mm->page_table_lock);
1412 spin_unlock(&mm->page_table_lock);
1417 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1418 we need to repoint it somewhere else before we can unpin it. */
1419 static void drop_other_mm_ref(void *info)
1421 struct mm_struct *mm = info;
1422 struct mm_struct *active_mm;
1424 active_mm = percpu_read(cpu_tlbstate.active_mm);
1426 if (active_mm == mm)
1427 leave_mm(smp_processor_id());
1429 /* If this cpu still has a stale cr3 reference, then make sure
1430 it has been flushed. */
1431 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
1432 load_cr3(swapper_pg_dir);
1435 static void xen_drop_mm_ref(struct mm_struct *mm)
1440 if (current->active_mm == mm) {
1441 if (current->mm == mm)
1442 load_cr3(swapper_pg_dir);
1444 leave_mm(smp_processor_id());
1447 /* Get the "official" set of cpus referring to our pagetable. */
1448 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1449 for_each_online_cpu(cpu) {
1450 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1451 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1453 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1457 cpumask_copy(mask, mm_cpumask(mm));
1459 /* It's possible that a vcpu may have a stale reference to our
1460 cr3, because its in lazy mode, and it hasn't yet flushed
1461 its set of pending hypercalls yet. In this case, we can
1462 look at its actual current cr3 value, and force it to flush
1464 for_each_online_cpu(cpu) {
1465 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1466 cpumask_set_cpu(cpu, mask);
1469 if (!cpumask_empty(mask))
1470 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1471 free_cpumask_var(mask);
1474 static void xen_drop_mm_ref(struct mm_struct *mm)
1476 if (current->active_mm == mm)
1477 load_cr3(swapper_pg_dir);
1482 * While a process runs, Xen pins its pagetables, which means that the
1483 * hypervisor forces it to be read-only, and it controls all updates
1484 * to it. This means that all pagetable updates have to go via the
1485 * hypervisor, which is moderately expensive.
1487 * Since we're pulling the pagetable down, we switch to use init_mm,
1488 * unpin old process pagetable and mark it all read-write, which
1489 * allows further operations on it to be simple memory accesses.
1491 * The only subtle point is that another CPU may be still using the
1492 * pagetable because of lazy tlb flushing. This means we need need to
1493 * switch all CPUs off this pagetable before we can unpin it.
1495 void xen_exit_mmap(struct mm_struct *mm)
1497 get_cpu(); /* make sure we don't move around */
1498 xen_drop_mm_ref(mm);
1501 spin_lock(&mm->page_table_lock);
1503 /* pgd may not be pinned in the error exit path of execve */
1504 if (xen_page_pinned(mm->pgd))
1507 spin_unlock(&mm->page_table_lock);
1510 static __init void xen_pagetable_setup_start(pgd_t *base)
1514 static void xen_post_allocator_init(void);
1516 static __init void xen_pagetable_setup_done(pgd_t *base)
1518 xen_setup_shared_info();
1519 xen_post_allocator_init();
1522 static void xen_write_cr2(unsigned long cr2)
1524 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1527 static unsigned long xen_read_cr2(void)
1529 return percpu_read(xen_vcpu)->arch.cr2;
1532 unsigned long xen_read_cr2_direct(void)
1534 return percpu_read(xen_vcpu_info.arch.cr2);
1537 static void xen_flush_tlb(void)
1539 struct mmuext_op *op;
1540 struct multicall_space mcs;
1544 mcs = xen_mc_entry(sizeof(*op));
1547 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1548 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1550 xen_mc_issue(PARAVIRT_LAZY_MMU);
1555 static void xen_flush_tlb_single(unsigned long addr)
1557 struct mmuext_op *op;
1558 struct multicall_space mcs;
1562 mcs = xen_mc_entry(sizeof(*op));
1564 op->cmd = MMUEXT_INVLPG_LOCAL;
1565 op->arg1.linear_addr = addr & PAGE_MASK;
1566 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1568 xen_mc_issue(PARAVIRT_LAZY_MMU);
1573 static void xen_flush_tlb_others(const struct cpumask *cpus,
1574 struct mm_struct *mm, unsigned long va)
1577 struct mmuext_op op;
1578 DECLARE_BITMAP(mask, NR_CPUS);
1580 struct multicall_space mcs;
1582 if (cpumask_empty(cpus))
1583 return; /* nothing to do */
1585 mcs = xen_mc_entry(sizeof(*args));
1587 args->op.arg2.vcpumask = to_cpumask(args->mask);
1589 /* Remove us, and any offline CPUS. */
1590 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1591 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1593 if (va == TLB_FLUSH_ALL) {
1594 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1596 args->op.cmd = MMUEXT_INVLPG_MULTI;
1597 args->op.arg1.linear_addr = va;
1600 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1602 xen_mc_issue(PARAVIRT_LAZY_MMU);
1605 static unsigned long xen_read_cr3(void)
1607 return percpu_read(xen_cr3);
1610 static void set_current_cr3(void *v)
1612 percpu_write(xen_current_cr3, (unsigned long)v);
1615 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1617 struct mmuext_op *op;
1618 struct multicall_space mcs;
1622 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1626 WARN_ON(mfn == 0 && kernel);
1628 mcs = __xen_mc_entry(sizeof(*op));
1631 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1634 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1637 percpu_write(xen_cr3, cr3);
1639 /* Update xen_current_cr3 once the batch has actually
1641 xen_mc_callback(set_current_cr3, (void *)cr3);
1645 static void xen_write_cr3(unsigned long cr3)
1647 BUG_ON(preemptible());
1649 xen_mc_batch(); /* disables interrupts */
1651 /* Update while interrupts are disabled, so its atomic with
1653 percpu_write(xen_cr3, cr3);
1655 __xen_write_cr3(true, cr3);
1657 #ifdef CONFIG_X86_64
1659 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1661 __xen_write_cr3(false, __pa(user_pgd));
1663 __xen_write_cr3(false, 0);
1667 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1670 static int xen_pgd_alloc(struct mm_struct *mm)
1672 pgd_t *pgd = mm->pgd;
1675 BUG_ON(PagePinned(virt_to_page(pgd)));
1677 #ifdef CONFIG_X86_64
1679 struct page *page = virt_to_page(pgd);
1682 BUG_ON(page->private != 0);
1686 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1687 page->private = (unsigned long)user_pgd;
1689 if (user_pgd != NULL) {
1690 user_pgd[pgd_index(VSYSCALL_START)] =
1691 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1695 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1702 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1704 #ifdef CONFIG_X86_64
1705 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1708 free_page((unsigned long)user_pgd);
1712 #ifdef CONFIG_X86_32
1713 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1715 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1716 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1717 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1723 /* Init-time set_pte while constructing initial pagetables, which
1724 doesn't allow RO pagetable pages to be remapped RW */
1725 static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1727 pte = mask_rw_pte(ptep, pte);
1729 xen_set_pte(ptep, pte);
1733 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1735 struct mmuext_op op;
1737 op.arg1.mfn = pfn_to_mfn(pfn);
1738 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1742 /* Early in boot, while setting up the initial pagetable, assume
1743 everything is pinned. */
1744 static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1746 #ifdef CONFIG_FLATMEM
1747 BUG_ON(mem_map); /* should only be used early */
1749 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1750 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1753 /* Used for pmd and pud */
1754 static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1756 #ifdef CONFIG_FLATMEM
1757 BUG_ON(mem_map); /* should only be used early */
1759 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1762 /* Early release_pte assumes that all pts are pinned, since there's
1763 only init_mm and anything attached to that is pinned. */
1764 static __init void xen_release_pte_init(unsigned long pfn)
1766 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1767 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1770 static __init void xen_release_pmd_init(unsigned long pfn)
1772 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1775 /* This needs to make sure the new pte page is pinned iff its being
1776 attached to a pinned pagetable. */
1777 static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1779 struct page *page = pfn_to_page(pfn);
1781 if (PagePinned(virt_to_page(mm->pgd))) {
1782 SetPagePinned(page);
1784 if (!PageHighMem(page)) {
1785 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1786 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1787 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1789 /* make sure there are no stray mappings of
1791 kmap_flush_unused();
1796 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1798 xen_alloc_ptpage(mm, pfn, PT_PTE);
1801 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1803 xen_alloc_ptpage(mm, pfn, PT_PMD);
1806 /* This should never happen until we're OK to use struct page */
1807 static void xen_release_ptpage(unsigned long pfn, unsigned level)
1809 struct page *page = pfn_to_page(pfn);
1811 if (PagePinned(page)) {
1812 if (!PageHighMem(page)) {
1813 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1814 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1815 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1817 ClearPagePinned(page);
1821 static void xen_release_pte(unsigned long pfn)
1823 xen_release_ptpage(pfn, PT_PTE);
1826 static void xen_release_pmd(unsigned long pfn)
1828 xen_release_ptpage(pfn, PT_PMD);
1831 #if PAGETABLE_LEVELS == 4
1832 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1834 xen_alloc_ptpage(mm, pfn, PT_PUD);
1837 static void xen_release_pud(unsigned long pfn)
1839 xen_release_ptpage(pfn, PT_PUD);
1843 void __init xen_reserve_top(void)
1845 #ifdef CONFIG_X86_32
1846 unsigned long top = HYPERVISOR_VIRT_START;
1847 struct xen_platform_parameters pp;
1849 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1850 top = pp.virt_start;
1852 reserve_top_address(-top);
1853 #endif /* CONFIG_X86_32 */
1857 * Like __va(), but returns address in the kernel mapping (which is
1858 * all we have until the physical memory mapping has been set up.
1860 static void *__ka(phys_addr_t paddr)
1862 #ifdef CONFIG_X86_64
1863 return (void *)(paddr + __START_KERNEL_map);
1869 /* Convert a machine address to physical address */
1870 static unsigned long m2p(phys_addr_t maddr)
1874 maddr &= PTE_PFN_MASK;
1875 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1880 /* Convert a machine address to kernel virtual */
1881 static void *m2v(phys_addr_t maddr)
1883 return __ka(m2p(maddr));
1886 static void set_page_prot(void *addr, pgprot_t prot)
1888 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1889 pte_t pte = pfn_pte(pfn, prot);
1891 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1895 static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1897 unsigned pmdidx, pteidx;
1901 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1906 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1909 /* Reuse or allocate a page of ptes */
1910 if (pmd_present(pmd[pmdidx]))
1911 pte_page = m2v(pmd[pmdidx].pmd);
1913 /* Check for free pte pages */
1914 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1917 pte_page = &level1_ident_pgt[ident_pte];
1918 ident_pte += PTRS_PER_PTE;
1920 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1923 /* Install mappings */
1924 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1927 if (pfn > max_pfn_mapped)
1928 max_pfn_mapped = pfn;
1930 if (!pte_none(pte_page[pteidx]))
1933 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1934 pte_page[pteidx] = pte;
1938 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1939 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1941 set_page_prot(pmd, PAGE_KERNEL_RO);
1944 #ifdef CONFIG_X86_64
1945 static void convert_pfn_mfn(void *v)
1950 /* All levels are converted the same way, so just treat them
1952 for (i = 0; i < PTRS_PER_PTE; i++)
1953 pte[i] = xen_make_pte(pte[i].pte);
1957 * Set up the inital kernel pagetable.
1959 * We can construct this by grafting the Xen provided pagetable into
1960 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1961 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1962 * means that only the kernel has a physical mapping to start with -
1963 * but that's enough to get __va working. We need to fill in the rest
1964 * of the physical mapping once some sort of allocator has been set
1967 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1968 unsigned long max_pfn)
1973 /* Zap identity mapping */
1974 init_level4_pgt[0] = __pgd(0);
1976 /* Pre-constructed entries are in pfn, so convert to mfn */
1977 convert_pfn_mfn(init_level4_pgt);
1978 convert_pfn_mfn(level3_ident_pgt);
1979 convert_pfn_mfn(level3_kernel_pgt);
1981 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1982 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1984 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1985 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1987 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1988 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1989 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1991 /* Set up identity map */
1992 xen_map_identity_early(level2_ident_pgt, max_pfn);
1994 /* Make pagetable pieces RO */
1995 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1996 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1997 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1998 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1999 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2000 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
2002 /* Pin down new L4 */
2003 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
2004 PFN_DOWN(__pa_symbol(init_level4_pgt)));
2006 /* Unpin Xen-provided one */
2007 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2010 pgd = init_level4_pgt;
2013 * At this stage there can be no user pgd, and no page
2014 * structure to attach it to, so make sure we just set kernel
2018 __xen_write_cr3(true, __pa(pgd));
2019 xen_mc_issue(PARAVIRT_LAZY_CPU);
2021 reserve_early(__pa(xen_start_info->pt_base),
2022 __pa(xen_start_info->pt_base +
2023 xen_start_info->nr_pt_frames * PAGE_SIZE),
2028 #else /* !CONFIG_X86_64 */
2029 static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
2031 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2032 unsigned long max_pfn)
2036 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
2038 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2039 xen_start_info->nr_pt_frames * PAGE_SIZE +
2042 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2043 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
2045 xen_map_identity_early(level2_kernel_pgt, max_pfn);
2047 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
2048 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
2049 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
2051 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2052 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2053 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2055 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2057 xen_write_cr3(__pa(swapper_pg_dir));
2059 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
2061 reserve_early(__pa(xen_start_info->pt_base),
2062 __pa(xen_start_info->pt_base +
2063 xen_start_info->nr_pt_frames * PAGE_SIZE),
2066 return swapper_pg_dir;
2068 #endif /* CONFIG_X86_64 */
2070 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2074 phys >>= PAGE_SHIFT;
2077 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2078 #ifdef CONFIG_X86_F00F_BUG
2081 #ifdef CONFIG_X86_32
2084 # ifdef CONFIG_HIGHMEM
2085 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2088 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2090 #ifdef CONFIG_X86_LOCAL_APIC
2091 case FIX_APIC_BASE: /* maps dummy local APIC */
2093 case FIX_TEXT_POKE0:
2094 case FIX_TEXT_POKE1:
2095 /* All local page mappings */
2096 pte = pfn_pte(phys, prot);
2099 case FIX_PARAVIRT_BOOTMAP:
2100 /* This is an MFN, but it isn't an IO mapping from the
2102 pte = mfn_pte(phys, prot);
2106 /* By default, set_fixmap is used for hardware mappings */
2107 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2111 __native_set_fixmap(idx, pte);
2113 #ifdef CONFIG_X86_64
2114 /* Replicate changes to map the vsyscall page into the user
2115 pagetable vsyscall mapping. */
2116 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
2117 unsigned long vaddr = __fix_to_virt(idx);
2118 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2123 static __init void xen_post_allocator_init(void)
2125 pv_mmu_ops.set_pte = xen_set_pte;
2126 pv_mmu_ops.set_pmd = xen_set_pmd;
2127 pv_mmu_ops.set_pud = xen_set_pud;
2128 #if PAGETABLE_LEVELS == 4
2129 pv_mmu_ops.set_pgd = xen_set_pgd;
2132 /* This will work as long as patching hasn't happened yet
2133 (which it hasn't) */
2134 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2135 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2136 pv_mmu_ops.release_pte = xen_release_pte;
2137 pv_mmu_ops.release_pmd = xen_release_pmd;
2138 #if PAGETABLE_LEVELS == 4
2139 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2140 pv_mmu_ops.release_pud = xen_release_pud;
2143 #ifdef CONFIG_X86_64
2144 SetPagePinned(virt_to_page(level3_user_vsyscall));
2146 xen_mark_init_mm_pinned();
2149 static void xen_leave_lazy_mmu(void)
2153 paravirt_leave_lazy_mmu();
2157 static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2158 .read_cr2 = xen_read_cr2,
2159 .write_cr2 = xen_write_cr2,
2161 .read_cr3 = xen_read_cr3,
2162 .write_cr3 = xen_write_cr3,
2164 .flush_tlb_user = xen_flush_tlb,
2165 .flush_tlb_kernel = xen_flush_tlb,
2166 .flush_tlb_single = xen_flush_tlb_single,
2167 .flush_tlb_others = xen_flush_tlb_others,
2169 .pte_update = paravirt_nop,
2170 .pte_update_defer = paravirt_nop,
2172 .pgd_alloc = xen_pgd_alloc,
2173 .pgd_free = xen_pgd_free,
2175 .alloc_pte = xen_alloc_pte_init,
2176 .release_pte = xen_release_pte_init,
2177 .alloc_pmd = xen_alloc_pmd_init,
2178 .alloc_pmd_clone = paravirt_nop,
2179 .release_pmd = xen_release_pmd_init,
2181 #ifdef CONFIG_X86_64
2182 .set_pte = xen_set_pte,
2184 .set_pte = xen_set_pte_init,
2186 .set_pte_at = xen_set_pte_at,
2187 .set_pmd = xen_set_pmd_hyper,
2189 .ptep_modify_prot_start = __ptep_modify_prot_start,
2190 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2192 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2193 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2195 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2196 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2198 #ifdef CONFIG_X86_PAE
2199 .set_pte_atomic = xen_set_pte_atomic,
2200 .pte_clear = xen_pte_clear,
2201 .pmd_clear = xen_pmd_clear,
2202 #endif /* CONFIG_X86_PAE */
2203 .set_pud = xen_set_pud_hyper,
2205 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2206 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2208 #if PAGETABLE_LEVELS == 4
2209 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2210 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2211 .set_pgd = xen_set_pgd_hyper,
2213 .alloc_pud = xen_alloc_pmd_init,
2214 .release_pud = xen_release_pmd_init,
2215 #endif /* PAGETABLE_LEVELS == 4 */
2217 .activate_mm = xen_activate_mm,
2218 .dup_mmap = xen_dup_mmap,
2219 .exit_mmap = xen_exit_mmap,
2222 .enter = paravirt_enter_lazy_mmu,
2223 .leave = xen_leave_lazy_mmu,
2226 .set_fixmap = xen_set_fixmap,
2229 void __init xen_init_mmu_ops(void)
2231 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2232 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2233 pv_mmu_ops = xen_mmu_ops;
2235 vmap_lazy_unmap = false;
2238 /* Protected by xen_reservation_lock. */
2239 #define MAX_CONTIG_ORDER 9 /* 2MB */
2240 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2242 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2243 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2244 unsigned long *in_frames,
2245 unsigned long *out_frames)
2248 struct multicall_space mcs;
2251 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2252 mcs = __xen_mc_entry(0);
2255 in_frames[i] = virt_to_mfn(vaddr);
2257 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2258 set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2261 out_frames[i] = virt_to_pfn(vaddr);
2267 * Update the pfn-to-mfn mappings for a virtual address range, either to
2268 * point to an array of mfns, or contiguously from a single starting
2271 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2272 unsigned long *mfns,
2273 unsigned long first_mfn)
2280 limit = 1u << order;
2281 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2282 struct multicall_space mcs;
2285 mcs = __xen_mc_entry(0);
2289 mfn = first_mfn + i;
2291 if (i < (limit - 1))
2295 flags = UVMF_INVLPG | UVMF_ALL;
2297 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2300 MULTI_update_va_mapping(mcs.mc, vaddr,
2301 mfn_pte(mfn, PAGE_KERNEL), flags);
2303 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2310 * Perform the hypercall to exchange a region of our pfns to point to
2311 * memory with the required contiguous alignment. Takes the pfns as
2312 * input, and populates mfns as output.
2314 * Returns a success code indicating whether the hypervisor was able to
2315 * satisfy the request or not.
2317 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2318 unsigned long *pfns_in,
2319 unsigned long extents_out,
2320 unsigned int order_out,
2321 unsigned long *mfns_out,
2322 unsigned int address_bits)
2327 struct xen_memory_exchange exchange = {
2329 .nr_extents = extents_in,
2330 .extent_order = order_in,
2331 .extent_start = pfns_in,
2335 .nr_extents = extents_out,
2336 .extent_order = order_out,
2337 .extent_start = mfns_out,
2338 .address_bits = address_bits,
2343 BUG_ON(extents_in << order_in != extents_out << order_out);
2345 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2346 success = (exchange.nr_exchanged == extents_in);
2348 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2349 BUG_ON(success && (rc != 0));
2354 int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2355 unsigned int address_bits)
2357 unsigned long *in_frames = discontig_frames, out_frame;
2358 unsigned long flags;
2362 * Currently an auto-translated guest will not perform I/O, nor will
2363 * it require PAE page directories below 4GB. Therefore any calls to
2364 * this function are redundant and can be ignored.
2367 if (xen_feature(XENFEAT_auto_translated_physmap))
2370 if (unlikely(order > MAX_CONTIG_ORDER))
2373 memset((void *) vstart, 0, PAGE_SIZE << order);
2375 spin_lock_irqsave(&xen_reservation_lock, flags);
2377 /* 1. Zap current PTEs, remembering MFNs. */
2378 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2380 /* 2. Get a new contiguous memory extent. */
2381 out_frame = virt_to_pfn(vstart);
2382 success = xen_exchange_memory(1UL << order, 0, in_frames,
2383 1, order, &out_frame,
2386 /* 3. Map the new extent in place of old pages. */
2388 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2390 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2392 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2394 return success ? 0 : -ENOMEM;
2396 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2398 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2400 unsigned long *out_frames = discontig_frames, in_frame;
2401 unsigned long flags;
2404 if (xen_feature(XENFEAT_auto_translated_physmap))
2407 if (unlikely(order > MAX_CONTIG_ORDER))
2410 memset((void *) vstart, 0, PAGE_SIZE << order);
2412 spin_lock_irqsave(&xen_reservation_lock, flags);
2414 /* 1. Find start MFN of contiguous extent. */
2415 in_frame = virt_to_mfn(vstart);
2417 /* 2. Zap current PTEs. */
2418 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2420 /* 3. Do the exchange for non-contiguous MFNs. */
2421 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2424 /* 4. Map new pages in place of old pages. */
2426 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2428 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2430 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2432 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2434 #ifdef CONFIG_XEN_PVHVM
2435 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2437 struct xen_hvm_pagetable_dying a;
2440 a.domid = DOMID_SELF;
2441 a.gpa = __pa(mm->pgd);
2442 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2443 WARN_ON_ONCE(rc < 0);
2446 static int is_pagetable_dying_supported(void)
2448 struct xen_hvm_pagetable_dying a;
2451 a.domid = DOMID_SELF;
2453 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2455 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2461 void __init xen_hvm_init_mmu_ops(void)
2463 if (is_pagetable_dying_supported())
2464 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2468 #ifdef CONFIG_XEN_DEBUG_FS
2470 static struct dentry *d_mmu_debug;
2472 static int __init xen_mmu_debugfs(void)
2474 struct dentry *d_xen = xen_init_debugfs();
2479 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2481 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2483 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2484 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2485 &mmu_stats.pgd_update_pinned);
2486 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2487 &mmu_stats.pgd_update_pinned);
2489 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2490 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2491 &mmu_stats.pud_update_pinned);
2492 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2493 &mmu_stats.pud_update_pinned);
2495 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2496 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2497 &mmu_stats.pmd_update_pinned);
2498 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2499 &mmu_stats.pmd_update_pinned);
2501 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2502 // debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2503 // &mmu_stats.pte_update_pinned);
2504 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2505 &mmu_stats.pte_update_pinned);
2507 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2508 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2509 &mmu_stats.mmu_update_extended);
2510 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2511 mmu_stats.mmu_update_histo, 20);
2513 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2514 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2515 &mmu_stats.set_pte_at_batched);
2516 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2517 &mmu_stats.set_pte_at_current);
2518 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2519 &mmu_stats.set_pte_at_kernel);
2521 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2522 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2523 &mmu_stats.prot_commit_batched);
2527 fs_initcall(xen_mmu_debugfs);
2529 #endif /* CONFIG_XEN_DEBUG_FS */