4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/bug.h>
45 #include <asm/pgtable.h>
46 #include <asm/tlbflush.h>
47 #include <asm/mmu_context.h>
48 #include <asm/paravirt.h>
50 #include <asm/xen/hypercall.h>
51 #include <asm/xen/hypervisor.h>
54 #include <xen/interface/xen.h>
56 #include "multicalls.h"
59 xmaddr_t arbitrary_virt_to_machine(unsigned long address)
62 pte_t *pte = lookup_address(address, &level);
63 unsigned offset = address & PAGE_MASK;
67 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
70 void make_lowmem_page_readonly(void *vaddr)
73 unsigned long address = (unsigned long)vaddr;
76 pte = lookup_address(address, &level);
79 ptev = pte_wrprotect(*pte);
81 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
85 void make_lowmem_page_readwrite(void *vaddr)
88 unsigned long address = (unsigned long)vaddr;
91 pte = lookup_address(address, &level);
94 ptev = pte_mkwrite(*pte);
96 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
101 void xen_set_pmd(pmd_t *ptr, pmd_t val)
103 struct multicall_space mcs;
104 struct mmu_update *u;
108 mcs = xen_mc_entry(sizeof(*u));
110 u->ptr = virt_to_machine(ptr).maddr;
111 u->val = pmd_val_ma(val);
112 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
114 xen_mc_issue(PARAVIRT_LAZY_MMU);
120 * Associate a virtual page frame with a given physical page frame
121 * and protection flags for that frame.
123 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
130 pgd = swapper_pg_dir + pgd_index(vaddr);
131 if (pgd_none(*pgd)) {
135 pud = pud_offset(pgd, vaddr);
136 if (pud_none(*pud)) {
140 pmd = pmd_offset(pud, vaddr);
141 if (pmd_none(*pmd)) {
145 pte = pte_offset_kernel(pmd, vaddr);
146 /* <mfn,flags> stored as-is, to permit clearing entries */
147 xen_set_pte(pte, mfn_pte(mfn, flags));
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
157 pte_t *ptep, pte_t pteval)
159 if (mm == current->mm || mm == &init_mm) {
160 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
161 struct multicall_space mcs;
162 mcs = xen_mc_entry(0);
164 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
165 xen_mc_issue(PARAVIRT_LAZY_MMU);
168 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
171 xen_set_pte(ptep, pteval);
174 #ifdef CONFIG_X86_PAE
175 void xen_set_pud(pud_t *ptr, pud_t val)
177 struct multicall_space mcs;
178 struct mmu_update *u;
182 mcs = xen_mc_entry(sizeof(*u));
184 u->ptr = virt_to_machine(ptr).maddr;
185 u->val = pud_val_ma(val);
186 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
188 xen_mc_issue(PARAVIRT_LAZY_MMU);
193 void xen_set_pte(pte_t *ptep, pte_t pte)
195 ptep->pte_high = pte.pte_high;
197 ptep->pte_low = pte.pte_low;
200 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
202 set_64bit((u64 *)ptep, pte_val_ma(pte));
205 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
208 smp_wmb(); /* make sure low gets written first */
212 void xen_pmd_clear(pmd_t *pmdp)
214 xen_set_pmd(pmdp, __pmd(0));
217 pteval_t xen_pte_val(pte_t pte)
219 pteval_t ret = pte.pte;
221 if (ret & _PAGE_PRESENT)
222 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
227 pmdval_t xen_pmd_val(pmd_t pmd)
229 pmdval_t ret = pmd.pmd;
230 if (ret & _PAGE_PRESENT)
231 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
235 pgdval_t xen_pgd_val(pgd_t pgd)
237 pgdval_t ret = pgd.pgd;
238 if (ret & _PAGE_PRESENT)
239 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
243 pte_t xen_make_pte(pteval_t pte)
245 if (pte & _PAGE_PRESENT) {
246 pte = phys_to_machine(XPADDR(pte)).maddr;
247 pte &= ~(_PAGE_PCD | _PAGE_PWT);
250 return (pte_t){ .pte = pte };
253 pmd_t xen_make_pmd(pmdval_t pmd)
255 if (pmd & _PAGE_PRESENT)
256 pmd = phys_to_machine(XPADDR(pmd)).maddr;
258 return (pmd_t){ pmd };
261 pgd_t xen_make_pgd(pgdval_t pgd)
263 if (pgd & _PAGE_PRESENT)
264 pgd = phys_to_machine(XPADDR(pgd)).maddr;
266 return (pgd_t){ pgd };
269 void xen_set_pte(pte_t *ptep, pte_t pte)
274 pteval_t xen_pte_val(pte_t pte)
276 pteval_t ret = pte.pte;
278 if (ret & _PAGE_PRESENT)
279 ret = machine_to_phys(XMADDR(ret)).paddr;
284 pgdval_t xen_pgd_val(pgd_t pgd)
286 pteval_t ret = pgd.pgd;
287 if (ret & _PAGE_PRESENT)
288 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
292 pte_t xen_make_pte(pteval_t pte)
294 if (pte & _PAGE_PRESENT) {
295 pte = phys_to_machine(XPADDR(pte)).maddr;
296 pte &= ~(_PAGE_PCD | _PAGE_PWT);
299 return (pte_t){ pte };
302 pgd_t xen_make_pgd(pgdval_t pgd)
304 if (pgd & _PAGE_PRESENT)
305 pgd = phys_to_machine(XPADDR(pgd)).maddr;
307 return (pgd_t){ pgd };
309 #endif /* CONFIG_X86_PAE */
312 (Yet another) pagetable walker. This one is intended for pinning a
313 pagetable. This means that it walks a pagetable and calls the
314 callback function on each page it finds making up the page table,
315 at every level. It walks the entire pagetable, but it only bothers
316 pinning pte pages which are below pte_limit. In the normal case
317 this will be TASK_SIZE, but at boot we need to pin up to
318 FIXADDR_TOP. But the important bit is that we don't pin beyond
319 there, because then we start getting into Xen's ptes.
321 static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
324 pgd_t *pgd = pgd_base;
326 unsigned long addr = 0;
327 unsigned long pgd_next;
329 BUG_ON(limit > FIXADDR_TOP);
331 if (xen_feature(XENFEAT_auto_translated_physmap))
334 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
336 unsigned long pud_limit, pud_next;
338 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
343 pud = pud_offset(pgd, 0);
345 if (PTRS_PER_PUD > 1) /* not folded */
346 flush |= (*func)(virt_to_page(pud), PT_PUD);
348 for (; addr != pud_limit; pud++, addr = pud_next) {
350 unsigned long pmd_limit;
352 pud_next = pud_addr_end(addr, pud_limit);
354 if (pud_next < limit)
355 pmd_limit = pud_next;
362 pmd = pmd_offset(pud, 0);
364 if (PTRS_PER_PMD > 1) /* not folded */
365 flush |= (*func)(virt_to_page(pmd), PT_PMD);
367 for (; addr != pmd_limit; pmd++) {
368 addr += (PAGE_SIZE * PTRS_PER_PTE);
369 if ((pmd_limit-1) < (addr-1)) {
377 flush |= (*func)(pmd_page(*pmd), PT_PTE);
382 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
387 static spinlock_t *lock_pte(struct page *page)
389 spinlock_t *ptl = NULL;
391 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
392 ptl = __pte_lockptr(page);
399 static void do_unlock(void *v)
405 static void xen_do_pin(unsigned level, unsigned long pfn)
407 struct mmuext_op *op;
408 struct multicall_space mcs;
410 mcs = __xen_mc_entry(sizeof(*op));
413 op->arg1.mfn = pfn_to_mfn(pfn);
414 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
417 static int pin_page(struct page *page, enum pt_level level)
419 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
423 flush = 0; /* already pinned */
424 else if (PageHighMem(page))
425 /* kmaps need flushing if we found an unpinned
429 void *pt = lowmem_page_address(page);
430 unsigned long pfn = page_to_pfn(page);
431 struct multicall_space mcs = __xen_mc_entry(0);
438 ptl = lock_pte(page);
440 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
441 pfn_pte(pfn, PAGE_KERNEL_RO),
442 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
445 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
448 /* Queue a deferred unlock for when this batch
450 xen_mc_callback(do_unlock, ptl);
457 /* This is called just after a mm has been created, but it has not
458 been used yet. We need to make sure that its pagetable is all
459 read-only, and can be pinned. */
460 void xen_pgd_pin(pgd_t *pgd)
466 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
467 /* re-enable interrupts for kmap_flush_unused */
473 #ifdef CONFIG_X86_PAE
474 level = MMUEXT_PIN_L3_TABLE;
476 level = MMUEXT_PIN_L2_TABLE;
479 xen_do_pin(level, PFN_DOWN(__pa(pgd)));
484 /* The init_mm pagetable is really pinned as soon as its created, but
485 that's before we have page structures to store the bits. So do all
486 the book-keeping now. */
487 static __init int mark_pinned(struct page *page, enum pt_level level)
493 void __init xen_mark_init_mm_pinned(void)
495 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
498 static int unpin_page(struct page *page, enum pt_level level)
500 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
502 if (pgfl && !PageHighMem(page)) {
503 void *pt = lowmem_page_address(page);
504 unsigned long pfn = page_to_pfn(page);
505 spinlock_t *ptl = NULL;
506 struct multicall_space mcs;
508 if (level == PT_PTE) {
509 ptl = lock_pte(page);
511 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
514 mcs = __xen_mc_entry(0);
516 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
517 pfn_pte(pfn, PAGE_KERNEL),
518 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
521 /* unlock when batch completed */
522 xen_mc_callback(do_unlock, ptl);
526 return 0; /* never need to flush on unpin */
529 /* Release a pagetables pages back as normal RW */
530 static void xen_pgd_unpin(pgd_t *pgd)
534 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
536 pgd_walk(pgd, unpin_page, TASK_SIZE);
541 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
543 spin_lock(&next->page_table_lock);
544 xen_pgd_pin(next->pgd);
545 spin_unlock(&next->page_table_lock);
548 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
550 spin_lock(&mm->page_table_lock);
551 xen_pgd_pin(mm->pgd);
552 spin_unlock(&mm->page_table_lock);
557 /* Another cpu may still have their %cr3 pointing at the pagetable, so
558 we need to repoint it somewhere else before we can unpin it. */
559 static void drop_other_mm_ref(void *info)
561 struct mm_struct *mm = info;
563 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
564 leave_mm(smp_processor_id());
566 /* If this cpu still has a stale cr3 reference, then make sure
567 it has been flushed. */
568 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
569 load_cr3(swapper_pg_dir);
570 arch_flush_lazy_cpu_mode();
574 static void drop_mm_ref(struct mm_struct *mm)
579 if (current->active_mm == mm) {
580 if (current->mm == mm)
581 load_cr3(swapper_pg_dir);
583 leave_mm(smp_processor_id());
584 arch_flush_lazy_cpu_mode();
587 /* Get the "official" set of cpus referring to our pagetable. */
588 mask = mm->cpu_vm_mask;
590 /* It's possible that a vcpu may have a stale reference to our
591 cr3, because its in lazy mode, and it hasn't yet flushed
592 its set of pending hypercalls yet. In this case, we can
593 look at its actual current cr3 value, and force it to flush
595 for_each_online_cpu(cpu) {
596 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
600 if (!cpus_empty(mask))
601 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
604 static void drop_mm_ref(struct mm_struct *mm)
606 if (current->active_mm == mm)
607 load_cr3(swapper_pg_dir);
612 * While a process runs, Xen pins its pagetables, which means that the
613 * hypervisor forces it to be read-only, and it controls all updates
614 * to it. This means that all pagetable updates have to go via the
615 * hypervisor, which is moderately expensive.
617 * Since we're pulling the pagetable down, we switch to use init_mm,
618 * unpin old process pagetable and mark it all read-write, which
619 * allows further operations on it to be simple memory accesses.
621 * The only subtle point is that another CPU may be still using the
622 * pagetable because of lazy tlb flushing. This means we need need to
623 * switch all CPUs off this pagetable before we can unpin it.
625 void xen_exit_mmap(struct mm_struct *mm)
627 get_cpu(); /* make sure we don't move around */
631 spin_lock(&mm->page_table_lock);
633 /* pgd may not be pinned in the error exit path of execve */
634 if (PagePinned(virt_to_page(mm->pgd)))
635 xen_pgd_unpin(mm->pgd);
637 spin_unlock(&mm->page_table_lock);