XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,
- XEN_IRQ_WORK_VECTOR,
XEN_NR_IPIS,
};
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
#include <xen/page.h>
#include <xen/hvm.h>
#include <xen/hvc-console.h>
-#include <xen/acpi.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include "xen-ops.h"
#include "mmu.h"
-#include "smp.h"
#include "multicalls.h"
EXPORT_SYMBOL_GPL(hypercall_page);
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
apic->set_apic_id = xen_set_apic_id;
apic->get_apic_id = xen_get_apic_id;
-
-#ifdef CONFIG_SMP
- apic->send_IPI_allbutself = xen_send_IPI_allbutself;
- apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
- apic->send_IPI_mask = xen_send_IPI_mask;
- apic->send_IPI_all = xen_send_IPI_all;
- apic->send_IPI_self = xen_send_IPI_self;
-#endif
}
#endif
/* Make sure ACS will be enabled */
pci_request_acs();
-
- xen_acpi_sleep_register();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
return true;
}
-static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
+static bool __init __early_alloc_p2m(unsigned long pfn)
{
unsigned topidx, mididx, idx;
- unsigned long *p2m;
- unsigned long *mid_mfn_p;
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
/* Pfff.. No boundary cross-over, lets get out. */
- if (!idx && check_boundary)
+ if (!idx)
return false;
WARN(p2m_top[topidx][mididx] == p2m_identity,
return false;
/* Boundary cross-over for the edges: */
- p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_init(p2m);
-
- p2m_top[topidx][mididx] = p2m;
-
- /* For save/restore we need to MFN of the P2M saved */
-
- mid_mfn_p = p2m_top_mfn_p[topidx];
- WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
- "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
- topidx, mididx);
- mid_mfn_p[mididx] = virt_to_mfn(p2m);
-
- return true;
-}
-
-static bool __init early_alloc_p2m(unsigned long pfn)
-{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long *mid_mfn_p;
- unsigned long **mid;
-
- mid = p2m_top[topidx];
- mid_mfn_p = p2m_top_mfn_p[topidx];
- if (mid == p2m_mid_missing) {
- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid);
+ if (idx) {
+ unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ unsigned long *mid_mfn_p;
- p2m_top[topidx] = mid;
+ p2m_init(p2m);
- BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
- }
- /* And the save/restore P2M tables.. */
- if (mid_mfn_p == p2m_mid_missing_mfn) {
- mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_mfn_init(mid_mfn_p);
+ p2m_top[topidx][mididx] = p2m;
- p2m_top_mfn_p[topidx] = mid_mfn_p;
- p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
- /* Note: we don't set mid_mfn_p[midix] here,
- * look in early_alloc_p2m_middle */
- }
- return true;
-}
-bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- if (!early_alloc_p2m(pfn))
- return false;
-
- if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
- return false;
+ /* For save/restore we need to MFN of the P2M saved */
+
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
+ "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
+ topidx, mididx);
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
- if (!__set_phys_to_machine(pfn, mfn))
- return false;
}
-
- return true;
+ return idx != 0;
}
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
{
- WARN_ON(!early_alloc_p2m(pfn));
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned long *mid_mfn_p;
+ unsigned long **mid;
+
+ mid = p2m_top[topidx];
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ if (mid == p2m_mid_missing) {
+ mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
+
+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+ }
+ /* And the save/restore P2M tables.. */
+ if (mid_mfn_p == p2m_mid_missing_mfn) {
+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(mid_mfn_p);
+
+ p2m_top_mfn_p[topidx] = mid_mfn_p;
+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+ /* Note: we don't set mid_mfn_p[midix] here,
+ * look in __early_alloc_p2m */
+ }
}
- early_alloc_p2m_middle(pfn_s, true);
- early_alloc_p2m_middle(pfn_e, true);
+ __early_alloc_p2m(pfn_s);
+ __early_alloc_p2m(pfn_e);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
+
#include "xen-ops.h"
#include "vdso.h"
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_do_chunk(unsigned long start,
- unsigned long end, bool release)
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
unsigned long pfn;
int ret;
- for (pfn = start; pfn < end; pfn++) {
- unsigned long frame;
+ for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
- if (release) {
- /* Make sure pfn exists to start with */
- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
- continue;
- frame = mfn;
- } else {
- if (mfn != INVALID_P2M_ENTRY)
- continue;
- frame = pfn;
- }
- set_xen_guest_handle(reservation.extent_start, &frame);
+ /* Make sure pfn exists to start with */
+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+ continue;
+
+ set_xen_guest_handle(reservation.extent_start, &mfn);
reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
- WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
- release ? "release" : "populate", pfn, ret);
-
+ WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
- if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
- if (release)
- break;
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
- &reservation);
- break;
- }
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
- } else
- break;
+ }
}
- if (len)
- printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
- release ? "Freeing" : "Populating",
- start, end, len,
- release ? "freed" : "added");
+ printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
+ start, end, len);
return len;
}
-static unsigned long __init xen_populate_chunk(
- const struct e820entry *list, size_t map_size,
- unsigned long max_pfn, unsigned long *last_pfn,
- unsigned long credits_left)
-{
- const struct e820entry *entry;
- unsigned int i;
- unsigned long done = 0;
- unsigned long dest_pfn;
- for (i = 0, entry = list; i < map_size; i++, entry++) {
- unsigned long credits = credits_left;
- unsigned long s_pfn;
- unsigned long e_pfn;
- unsigned long pfns;
- long capacity;
-
- if (credits <= 0)
- break;
-
- if (entry->type != E820_RAM)
- continue;
-
- e_pfn = PFN_UP(entry->addr + entry->size);
-
- /* We only care about E820 after the xen_start_info->nr_pages */
- if (e_pfn <= max_pfn)
- continue;
-
- s_pfn = PFN_DOWN(entry->addr);
- /* If the E820 falls within the nr_pages, we want to start
- * at the nr_pages PFN.
- * If that would mean going past the E820 entry, skip it
- */
- if (s_pfn <= max_pfn) {
- capacity = e_pfn - max_pfn;
- dest_pfn = max_pfn;
- } else {
- /* last_pfn MUST be within E820_RAM regions */
- if (*last_pfn && e_pfn >= *last_pfn)
- s_pfn = *last_pfn;
- capacity = e_pfn - s_pfn;
- dest_pfn = s_pfn;
- }
- /* If we had filled this E820_RAM entry, go to the next one. */
- if (capacity <= 0)
- continue;
-
- if (credits > capacity)
- credits = capacity;
-
- pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
- done += pfns;
- credits_left -= pfns;
- *last_pfn = (dest_pfn + pfns);
- }
- return done;
-}
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
+
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
if (start_pfn < end_pfn) {
if (start_pfn < nr_pages)
- released += xen_do_chunk(
- start_pfn, min(end_pfn, nr_pages), true);
+ released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
identity += set_phys_range_identity(
start_pfn, end_pfn);
}
}
- if (released)
- printk(KERN_INFO "Released %lu pages of unused memory\n", released);
- if (identity)
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
* the current maximum rather than the static maximum. In this
* case the e820 map provided to us will cover the static
* maximum region.
- *
- * The dom0_mem=min:X,max:Y tweaks options differently depending
- * on the version, but in general this is what we get:
- * | XENMEM_maximum_reser | nr_pages
- * --------------++-----------------------+-------------------
- * no dom0_mem | INT_MAX | max_phys_pfn
- * =3G | INT_MAX | 786432
- * =max:3G | 786432 | 786432
- * =min:1G,max:3G| INT_MAX | max_phys_fn
- * =1G,max:3G | INT_MAX | 262144
- * =min:1G,max:3G,2G | INT_MAX | max_phys_fn
- *
- * The =3G is often used and it lead to us initially setting
- * 786432 and allowing dom0 to balloon up to the max_physical_pfn.
- * This is at odd with the classic XenOClassic so lets emulate
- * the classic behavior.
*/
if (xen_initial_domain()) {
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
if (ret > 0)
max_pages = ret;
- if (ret == -1UL)
- max_pages = xen_start_info->nr_pages;
}
return min(max_pages, MAX_DOMAIN_PAGES);
}
-static unsigned long xen_get_current_pages(void)
-{
- domid_t domid = DOMID_SELF;
- int ret;
- ret = HYPERVISOR_memory_op(XENMEM_current_reservation, &domid);
- if (ret > 0)
- return ret;
- return 0;
-}
+
static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
{
u64 end = start + size;
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
- unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
- unsigned long populated;
int i;
int op;
*/
xen_released_pages = xen_set_identity_and_release(
map, memmap.nr_entries, max_pfn);
+ extra_pages += xen_released_pages;
- /*
- * Populate back the non-RAM pages and E820 gaps that had been
- * released. But cap it as certain regions cannot be repopulated.
- */
- if (xen_get_current_pages())
- xen_released_pages = min(max_pfn - xen_get_current_pages(),
- xen_released_pages);
- populated = xen_populate_chunk(map, memmap.nr_entries,
- max_pfn, &last_pfn, xen_released_pages);
-
- extra_pages += (xen_released_pages - populated);
-
- if (last_pfn > max_pfn) {
- max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
- mem_end = PFN_PHYS(max_pfn);
- }
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
*/
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages);
+
i = 0;
while (i < memmap.nr_entries) {
u64 addr = map[i].addr;
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/irq_work.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
-static DEFINE_PER_CPU(int, xen_irq_work);
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
-static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
/*
* Reschedule call back.
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
- callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
- rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
- cpu,
- xen_irq_work_interrupt,
- IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
- callfunc_name,
- NULL);
- if (rc < 0)
- goto fail;
- per_cpu(xen_irq_work, cpu) = rc;
-
return 0;
fail:
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
- if (per_cpu(xen_irq_work, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
return rc;
}
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
-static void __xen_send_IPI_mask(const struct cpumask *mask,
- int vector)
+static void xen_send_IPI_mask(const struct cpumask *mask,
+ enum ipi_vector vector)
{
unsigned cpu;
{
int cpu;
- __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+ xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu(cpu, mask) {
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
+ xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
-static inline int xen_map_vector(int vector)
-{
- int xen_vector;
-
- switch (vector) {
- case RESCHEDULE_VECTOR:
- xen_vector = XEN_RESCHEDULE_VECTOR;
- break;
- case CALL_FUNCTION_VECTOR:
- xen_vector = XEN_CALL_FUNCTION_VECTOR;
- break;
- case CALL_FUNCTION_SINGLE_VECTOR:
- xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
- break;
- case IRQ_WORK_VECTOR:
- xen_vector = XEN_IRQ_WORK_VECTOR;
- break;
- default:
- xen_vector = -1;
- printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
- vector);
- }
-
- return xen_vector;
-}
-
-void xen_send_IPI_mask(const struct cpumask *mask,
- int vector)
-{
- int xen_vector = xen_map_vector(vector);
-
- if (xen_vector >= 0)
- __xen_send_IPI_mask(mask, xen_vector);
-}
-
-void xen_send_IPI_all(int vector)
-{
- int xen_vector = xen_map_vector(vector);
-
- if (xen_vector >= 0)
- __xen_send_IPI_mask(cpu_online_mask, xen_vector);
-}
-
-void xen_send_IPI_self(int vector)
-{
- int xen_vector = xen_map_vector(vector);
-
- if (xen_vector >= 0)
- xen_send_IPI_one(smp_processor_id(), xen_vector);
-}
-
-void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector)
-{
- unsigned cpu;
- unsigned int this_cpu = smp_processor_id();
-
- if (!(num_online_cpus() > 1))
- return;
-
- for_each_cpu_and(cpu, mask, cpu_online_mask) {
- if (this_cpu == cpu)
- continue;
-
- xen_smp_send_call_function_single_ipi(cpu);
- }
-}
-
-void xen_send_IPI_allbutself(int vector)
-{
- int xen_vector = xen_map_vector(vector);
-
- if (xen_vector >= 0)
- xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
-}
-
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
irq_enter();
return IRQ_HANDLED;
}
-static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
-{
- irq_enter();
- irq_work_run();
- inc_irq_stat(apic_irq_work_irqs);
- irq_exit();
-
- return IRQ_HANDLED;
-}
-
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
native_cpu_die(cpu);
}
+++ /dev/null
-#ifndef _XEN_SMP_H
-
-extern void xen_send_IPI_mask(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_allbutself(int vector);
-extern void physflat_send_IPI_allbutself(int vector);
-extern void xen_send_IPI_all(int vector);
-extern void xen_send_IPI_self(int vector);
-
-#endif
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
-obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
+obj-$(CONFIG_XEN_DOM0) += pci.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
+++ /dev/null
-/******************************************************************************
- * acpi.c
- * acpi file for domain 0 kernel
- *
- * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- * Copyright (c) 2011 Yu Ke ke.yu@intel.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-#include <asm/xen/hypervisor.h>
-
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
- u32 pm1a_cnt, u32 pm1b_cnt)
-{
- struct xen_platform_op op = {
- .cmd = XENPF_enter_acpi_sleep,
- .interface_version = XENPF_INTERFACE_VERSION,
- .u = {
- .enter_acpi_sleep = {
- .pm1a_cnt_val = (u16)pm1a_cnt,
- .pm1b_cnt_val = (u16)pm1b_cnt,
- .sleep_state = sleep_state,
- },
- },
- };
-
- if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
- WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
- "Email xen-devel@lists.xensource.com Thank you.\n", \
- pm1a_cnt, pm1b_cnt);
- return -1;
- }
-
- HYPERVISOR_dom0_op(&op);
- return 1;
-}
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <linux/hardirq.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
struct page **pages, unsigned int count)
{
int i, ret;
- bool lazy = false;
pte_t *pte;
unsigned long mfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
- if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
return ret;
}
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
- bool lazy = false;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
- if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
- arch_enter_lazy_mmu_mode();
- lazy = true;
- }
-
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
- if (lazy)
- arch_leave_lazy_mmu_mode();
-
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+++ /dev/null
-/******************************************************************************
- * acpi.h
- * acpi file for domain 0 kernel
- *
- * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- * Copyright (c) 2011 Yu Ke <ke.yu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _XEN_ACPI_H
-#define _XEN_ACPI_H
-
-#include <linux/types.h>
-
-#ifdef CONFIG_XEN_DOM0
-#include <asm/xen/hypervisor.h>
-#include <xen/xen.h>
-#include <linux/acpi.h>
-
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
- u32 pm1a_cnt, u32 pm1b_cnd);
-
-static inline void xen_acpi_sleep_register(void)
-{
- if (xen_initial_domain())
- acpi_os_set_prepare_sleep(
- &xen_acpi_notify_hypervisor_state);
-}
-#else
-static inline void xen_acpi_sleep_register(void)
-{
-}
-#endif
-
-#endif /* _XEN_ACPI_H */