pgd = (pgd_t *)xen_start_info->pt_base;
+ if (!xen_initial_domain())
+ __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
+
+ __supported_pte_mask |= _PAGE_IOMAP;
/* Don't do the full vcpu_info placement stuff until we have a
possible map and a non-dummy shared_info. */
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/paravirt.h>
+#include <asm/e820.h>
#include <asm/linkage.h>
#include <asm/xen/hypercall.h>
static bool xen_iomap_pte(pte_t pte)
{
- return xen_initial_domain() && (pte_flags(pte) & _PAGE_IOMAP);
+ return pte_flags(pte) & _PAGE_IOMAP;
}
static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
pte_t xen_make_pte(pteval_t pte)
{
- if (unlikely(xen_initial_domain() && (pte & _PAGE_IOMAP)))
+ phys_addr_t addr = (pte & PTE_PFN_MASK);
+
+ /*
+ * Unprivileged domains are allowed to do IOMAPpings for
+ * PCI passthrough, but not map ISA space. The ISA
+ * mappings are just dummy local mappings to keep other
+ * parts of the kernel happy.
+ */
+ if (unlikely(pte & _PAGE_IOMAP) &&
+ (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
pte = iomap_pte(pte);
- else
+ } else {
+ pte &= ~_PAGE_IOMAP;
pte = pte_pfn_to_mfn(pte);
+ }
return native_make_pte(pte);
}