__xen_set_pte(ptep, pteval);
}
-void xen_set_clr_mmio_pvh_pte(unsigned long pfn, unsigned long mfn,
- int nr_mfns, int add_mapping)
-{
- struct physdev_map_iomem iomem;
-
- iomem.first_gfn = pfn;
- iomem.first_mfn = mfn;
- iomem.nr_mfns = nr_mfns;
- iomem.add_mapping = add_mapping;
-
- if (HYPERVISOR_physdev_op(PHYSDEVOP_map_iomem, &iomem))
- BUG();
-}
-
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
*identity += set_phys_range_identity(start_pfn, end_pfn);
}
-/* For PVH, the pfns [0..MAX] are mapped to mfn's in the EPT/NPT. The mfns
- * are released as part of this 1:1 mapping hypercall back to the dom heap.
- * Also, we map the entire IO space, ie, beyond max_pfn_mapped.
+
+/*
+ * PVH: xen has already mapped the IO space in the EPT/NPT for us, so we
+ * just need to adjust the released and identity count.
*/
-static void __init xen_pvh_identity_map_chunk(unsigned long start_pfn,
+static void __init xen_pvh_adjust_stats(unsigned long start_pfn,
unsigned long end_pfn, unsigned long *released,
unsigned long *identity, unsigned long max_pfn)
{
- unsigned long pfn;
- int numpfns = 1, add_mapping = 1;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn++)
- xen_set_clr_mmio_pvh_pte(pfn, pfn, numpfns, add_mapping);
-
if (start_pfn <= max_pfn) {
unsigned long end = min(max_pfn_mapped, end_pfn);
*released += end - start_pfn;
if (start_pfn < end_pfn) {
if (xlated_phys) {
- xen_pvh_identity_map_chunk(start_pfn,
+ xen_pvh_adjust_stats(start_pfn,
end_pfn, &released, &identity,
nr_pages);
} else {
} u;
};
-#define PHYSDEVOP_map_iomem 30
-struct physdev_map_iomem {
- /* IN */
- uint64_t first_gfn;
- uint64_t first_mfn;
- uint32_t nr_mfns;
- uint32_t add_mapping; /* 1 == add mapping; 0 == unmap */
-
-};
-
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **