From: Konrad Rzeszutek Wilk Date: Mon, 7 May 2012 20:40:44 +0000 (-0400) Subject: Revert "Merge branch 'stable/for-linus-3.5' into linux-next" X-Git-Tag: next-20120724~28^2~18 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=fa070070031743e71504b4ae766aa3b48696b61d;p=karo-tx-linux.git Revert "Merge branch 'stable/for-linus-3.5' into linux-next" This reverts commit cbcc4ca92cce2c89fe4c03ec25f8ca0883204a5a, reversing changes made to e51f9d116f2f0c1dd794a51004f0b9eea80de101. Conflicts: arch/x86/xen/enlighten.c --- diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h index cc146d51449e..1df35417c412 100644 --- a/arch/x86/include/asm/xen/events.h +++ b/arch/x86/include/asm/xen/events.h @@ -6,7 +6,6 @@ enum ipi_vector { XEN_CALL_FUNCTION_VECTOR, XEN_CALL_FUNCTION_SINGLE_VECTOR, XEN_SPIN_UNLOCK_VECTOR, - XEN_IRQ_WORK_VECTOR, XEN_NR_IPIS, }; diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 93971e841dd5..c34f96c2f7a0 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -44,7 +44,6 @@ extern unsigned long machine_to_phys_nr; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); -extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern unsigned long set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 278e7aced20e..95dccce8e979 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include @@ -76,7 +75,6 @@ #include "xen-ops.h" #include "mmu.h" -#include "smp.h" #include "multicalls.h" EXPORT_SYMBOL_GPL(hypercall_page); @@ -885,14 +883,6 @@ static void set_xen_basic_apic_ops(void) apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; apic->set_apic_id = xen_set_apic_id; apic->get_apic_id = xen_get_apic_id; - -#ifdef CONFIG_SMP - apic->send_IPI_allbutself = xen_send_IPI_allbutself; - apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself; - apic->send_IPI_mask = xen_send_IPI_mask; - apic->send_IPI_all = xen_send_IPI_all; - apic->send_IPI_self = xen_send_IPI_self; -#endif } #endif @@ -1408,8 +1398,6 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); - - xen_acpi_sleep_register(); } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 04512d3a76f4..7ece122c6372 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -499,18 +499,16 @@ static bool alloc_p2m(unsigned long pfn) return true; } -static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary) +static bool __init __early_alloc_p2m(unsigned long pfn) { unsigned topidx, mididx, idx; - unsigned long *p2m; - unsigned long *mid_mfn_p; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* Pfff.. No boundary cross-over, lets get out. */ - if (!idx && check_boundary) + if (!idx) return false; WARN(p2m_top[topidx][mididx] == p2m_identity, @@ -524,66 +522,24 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary return false; /* Boundary cross-over for the edges: */ - p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); - - p2m_init(p2m); - - p2m_top[topidx][mididx] = p2m; - - /* For save/restore we need to MFN of the P2M saved */ - - mid_mfn_p = p2m_top_mfn_p[topidx]; - WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), - "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", - topidx, mididx); - mid_mfn_p[mididx] = virt_to_mfn(p2m); - - return true; -} - -static bool __init early_alloc_p2m(unsigned long pfn) -{ - unsigned topidx = p2m_top_index(pfn); - unsigned long *mid_mfn_p; - unsigned long **mid; - - mid = p2m_top[topidx]; - mid_mfn_p = p2m_top_mfn_p[topidx]; - if (mid == p2m_mid_missing) { - mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - - p2m_mid_init(mid); + if (idx) { + unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); + unsigned long *mid_mfn_p; - p2m_top[topidx] = mid; + p2m_init(p2m); - BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); - } - /* And the save/restore P2M tables.. */ - if (mid_mfn_p == p2m_mid_missing_mfn) { - mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p); + p2m_top[topidx][mididx] = p2m; - p2m_top_mfn_p[topidx] = mid_mfn_p; - p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); - /* Note: we don't set mid_mfn_p[midix] here, - * look in early_alloc_p2m_middle */ - } - return true; -} -bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - if (unlikely(!__set_phys_to_machine(pfn, mfn))) { - if (!early_alloc_p2m(pfn)) - return false; - - if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) - return false; + /* For save/restore we need to MFN of the P2M saved */ + + mid_mfn_p = p2m_top_mfn_p[topidx]; + WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), + "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", + topidx, mididx); + mid_mfn_p[mididx] = virt_to_mfn(p2m); - if (!__set_phys_to_machine(pfn, mfn)) - return false; } - - return true; + return idx != 0; } unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) @@ -603,11 +559,35 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { - WARN_ON(!early_alloc_p2m(pfn)); + unsigned topidx = p2m_top_index(pfn); + unsigned long *mid_mfn_p; + unsigned long **mid; + + mid = p2m_top[topidx]; + mid_mfn_p = p2m_top_mfn_p[topidx]; + if (mid == p2m_mid_missing) { + mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + + p2m_mid_init(mid); + + p2m_top[topidx] = mid; + + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); + } + /* And the save/restore P2M tables.. */ + if (mid_mfn_p == p2m_mid_missing_mfn) { + mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_mfn_init(mid_mfn_p); + + p2m_top_mfn_p[topidx] = mid_mfn_p; + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); + /* Note: we don't set mid_mfn_p[midix] here, + * look in __early_alloc_p2m */ + } } - early_alloc_p2m_middle(pfn_s, true); - early_alloc_p2m_middle(pfn_e, true); + __early_alloc_p2m(pfn_s); + __early_alloc_p2m(pfn_e); for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 8e7dcfd537ef..1ba8dff26753 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -26,6 +26,7 @@ #include #include #include + #include "xen-ops.h" #include "vdso.h" @@ -83,8 +84,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } -static unsigned long __init xen_do_chunk(unsigned long start, - unsigned long end, bool release) +static unsigned long __init xen_release_chunk(unsigned long start, + unsigned long end) { struct xen_memory_reservation reservation = { .address_bits = 0, @@ -95,108 +96,30 @@ static unsigned long __init xen_do_chunk(unsigned long start, unsigned long pfn; int ret; - for (pfn = start; pfn < end; pfn++) { - unsigned long frame; + for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); - if (release) { - /* Make sure pfn exists to start with */ - if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) - continue; - frame = mfn; - } else { - if (mfn != INVALID_P2M_ENTRY) - continue; - frame = pfn; - } - set_xen_guest_handle(reservation.extent_start, &frame); + /* Make sure pfn exists to start with */ + if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) + continue; + + set_xen_guest_handle(reservation.extent_start, &mfn); reservation.nr_extents = 1; - ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap, + ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); - WARN(ret != 1, "Failed to %s pfn %lx err=%d\n", - release ? "release" : "populate", pfn, ret); - + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { - if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) { - if (release) - break; - set_xen_guest_handle(reservation.extent_start, &frame); - reservation.nr_extents = 1; - ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &reservation); - break; - } + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; - } else - break; + } } - if (len) - printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n", - release ? "Freeing" : "Populating", - start, end, len, - release ? "freed" : "added"); + printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", + start, end, len); return len; } -static unsigned long __init xen_populate_chunk( - const struct e820entry *list, size_t map_size, - unsigned long max_pfn, unsigned long *last_pfn, - unsigned long credits_left) -{ - const struct e820entry *entry; - unsigned int i; - unsigned long done = 0; - unsigned long dest_pfn; - for (i = 0, entry = list; i < map_size; i++, entry++) { - unsigned long credits = credits_left; - unsigned long s_pfn; - unsigned long e_pfn; - unsigned long pfns; - long capacity; - - if (credits <= 0) - break; - - if (entry->type != E820_RAM) - continue; - - e_pfn = PFN_UP(entry->addr + entry->size); - - /* We only care about E820 after the xen_start_info->nr_pages */ - if (e_pfn <= max_pfn) - continue; - - s_pfn = PFN_DOWN(entry->addr); - /* If the E820 falls within the nr_pages, we want to start - * at the nr_pages PFN. - * If that would mean going past the E820 entry, skip it - */ - if (s_pfn <= max_pfn) { - capacity = e_pfn - max_pfn; - dest_pfn = max_pfn; - } else { - /* last_pfn MUST be within E820_RAM regions */ - if (*last_pfn && e_pfn >= *last_pfn) - s_pfn = *last_pfn; - capacity = e_pfn - s_pfn; - dest_pfn = s_pfn; - } - /* If we had filled this E820_RAM entry, go to the next one. */ - if (capacity <= 0) - continue; - - if (credits > capacity) - credits = capacity; - - pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false); - done += pfns; - credits_left -= pfns; - *last_pfn = (dest_pfn + pfns); - } - return done; -} static unsigned long __init xen_set_identity_and_release( const struct e820entry *list, size_t map_size, unsigned long nr_pages) { @@ -219,6 +142,7 @@ static unsigned long __init xen_set_identity_and_release( */ for (i = 0, entry = list; i < map_size; i++, entry++) { phys_addr_t end = entry->addr + entry->size; + if (entry->type == E820_RAM || i == map_size - 1) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); @@ -228,8 +152,8 @@ static unsigned long __init xen_set_identity_and_release( if (start_pfn < end_pfn) { if (start_pfn < nr_pages) - released += xen_do_chunk( - start_pfn, min(end_pfn, nr_pages), true); + released += xen_release_chunk( + start_pfn, min(end_pfn, nr_pages)); identity += set_phys_range_identity( start_pfn, end_pfn); @@ -238,10 +162,8 @@ static unsigned long __init xen_set_identity_and_release( } } - if (released) - printk(KERN_INFO "Released %lu pages of unused memory\n", released); - if (identity) - printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); + printk(KERN_INFO "Released %lu pages of unused memory\n", released); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); return released; } @@ -260,42 +182,16 @@ static unsigned long __init xen_get_max_pages(void) * the current maximum rather than the static maximum. In this * case the e820 map provided to us will cover the static * maximum region. - * - * The dom0_mem=min:X,max:Y tweaks options differently depending - * on the version, but in general this is what we get: - * | XENMEM_maximum_reser | nr_pages - * --------------++-----------------------+------------------- - * no dom0_mem | INT_MAX | max_phys_pfn - * =3G | INT_MAX | 786432 - * =max:3G | 786432 | 786432 - * =min:1G,max:3G| INT_MAX | max_phys_fn - * =1G,max:3G | INT_MAX | 262144 - * =min:1G,max:3G,2G | INT_MAX | max_phys_fn - * - * The =3G is often used and it lead to us initially setting - * 786432 and allowing dom0 to balloon up to the max_physical_pfn. - * This is at odd with the classic XenOClassic so lets emulate - * the classic behavior. */ if (xen_initial_domain()) { ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); if (ret > 0) max_pages = ret; - if (ret == -1UL) - max_pages = xen_start_info->nr_pages; } return min(max_pages, MAX_DOMAIN_PAGES); } -static unsigned long xen_get_current_pages(void) -{ - domid_t domid = DOMID_SELF; - int ret; - ret = HYPERVISOR_memory_op(XENMEM_current_reservation, &domid); - if (ret > 0) - return ret; - return 0; -} + static void xen_align_and_add_e820_region(u64 start, u64 size, int type) { u64 end = start + size; @@ -321,9 +217,7 @@ char * __init xen_memory_setup(void) int rc; struct xen_memory_map memmap; unsigned long max_pages; - unsigned long last_pfn = 0; unsigned long extra_pages = 0; - unsigned long populated; int i; int op; @@ -363,23 +257,8 @@ char * __init xen_memory_setup(void) */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); + extra_pages += xen_released_pages; - /* - * Populate back the non-RAM pages and E820 gaps that had been - * released. But cap it as certain regions cannot be repopulated. - */ - if (xen_get_current_pages()) - xen_released_pages = min(max_pfn - xen_get_current_pages(), - xen_released_pages); - populated = xen_populate_chunk(map, memmap.nr_entries, - max_pfn, &last_pfn, xen_released_pages); - - extra_pages += (xen_released_pages - populated); - - if (last_pfn > max_pfn) { - max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); - mem_end = PFN_PHYS(max_pfn); - } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base @@ -393,6 +272,7 @@ char * __init xen_memory_setup(void) */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); + i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 25d7853942c8..0503c0c493a9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -42,12 +41,10 @@ cpumask_var_t xen_cpu_initialized_map; static DEFINE_PER_CPU(int, xen_resched_irq); static DEFINE_PER_CPU(int, xen_callfunc_irq); static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); -static DEFINE_PER_CPU(int, xen_irq_work); static DEFINE_PER_CPU(int, xen_debug_irq) = -1; static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); -static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); /* * Reschedule call back. @@ -146,17 +143,6 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; - callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); - rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, - cpu, - xen_irq_work_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, - callfunc_name, - NULL); - if (rc < 0) - goto fail; - per_cpu(xen_irq_work, cpu) = rc; - return 0; fail: @@ -169,8 +155,6 @@ static int xen_smp_intr_init(unsigned int cpu) if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - if (per_cpu(xen_irq_work, cpu) >= 0) - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); return rc; } @@ -496,8 +480,8 @@ static void xen_smp_send_reschedule(int cpu) xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); } -static void __xen_send_IPI_mask(const struct cpumask *mask, - int vector) +static void xen_send_IPI_mask(const struct cpumask *mask, + enum ipi_vector vector) { unsigned cpu; @@ -509,7 +493,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) { int cpu; - __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); + xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); /* Make sure other vcpus get a chance to run if they need to. */ for_each_cpu(cpu, mask) { @@ -522,86 +506,10 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) static void xen_smp_send_call_function_single_ipi(int cpu) { - __xen_send_IPI_mask(cpumask_of(cpu), + xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); } -static inline int xen_map_vector(int vector) -{ - int xen_vector; - - switch (vector) { - case RESCHEDULE_VECTOR: - xen_vector = XEN_RESCHEDULE_VECTOR; - break; - case CALL_FUNCTION_VECTOR: - xen_vector = XEN_CALL_FUNCTION_VECTOR; - break; - case CALL_FUNCTION_SINGLE_VECTOR: - xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; - break; - case IRQ_WORK_VECTOR: - xen_vector = XEN_IRQ_WORK_VECTOR; - break; - default: - xen_vector = -1; - printk(KERN_ERR "xen: vector 0x%x is not implemented\n", - vector); - } - - return xen_vector; -} - -void xen_send_IPI_mask(const struct cpumask *mask, - int vector) -{ - int xen_vector = xen_map_vector(vector); - - if (xen_vector >= 0) - __xen_send_IPI_mask(mask, xen_vector); -} - -void xen_send_IPI_all(int vector) -{ - int xen_vector = xen_map_vector(vector); - - if (xen_vector >= 0) - __xen_send_IPI_mask(cpu_online_mask, xen_vector); -} - -void xen_send_IPI_self(int vector) -{ - int xen_vector = xen_map_vector(vector); - - if (xen_vector >= 0) - xen_send_IPI_one(smp_processor_id(), xen_vector); -} - -void xen_send_IPI_mask_allbutself(const struct cpumask *mask, - int vector) -{ - unsigned cpu; - unsigned int this_cpu = smp_processor_id(); - - if (!(num_online_cpus() > 1)) - return; - - for_each_cpu_and(cpu, mask, cpu_online_mask) { - if (this_cpu == cpu) - continue; - - xen_smp_send_call_function_single_ipi(cpu); - } -} - -void xen_send_IPI_allbutself(int vector) -{ - int xen_vector = xen_map_vector(vector); - - if (xen_vector >= 0) - xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector); -} - static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) { irq_enter(); @@ -622,16 +530,6 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) -{ - irq_enter(); - irq_work_run(); - inc_irq_stat(apic_irq_work_irqs); - irq_exit(); - - return IRQ_HANDLED; -} - static const struct smp_ops xen_smp_ops __initconst = { .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, .smp_prepare_cpus = xen_smp_prepare_cpus, @@ -678,7 +576,6 @@ static void xen_hvm_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); native_cpu_die(cpu); } diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h deleted file mode 100644 index 8981a76d081a..000000000000 --- a/arch/x86/xen/smp.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _XEN_SMP_H - -extern void xen_send_IPI_mask(const struct cpumask *mask, - int vector); -extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, - int vector); -extern void xen_send_IPI_allbutself(int vector); -extern void physflat_send_IPI_allbutself(int vector); -extern void xen_send_IPI_all(int vector); -extern void xen_send_IPI_self(int vector); - -#endif diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index fc3488631136..9adc5be57b13 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_PVHVM) += platform-pci.o obj-$(CONFIG_XEN_TMEM) += tmem.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o -obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o +obj-$(CONFIG_XEN_DOM0) += pci.o obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c deleted file mode 100644 index 119d42a2bf57..000000000000 --- a/drivers/xen/acpi.c +++ /dev/null @@ -1,62 +0,0 @@ -/****************************************************************************** - * acpi.c - * acpi file for domain 0 kernel - * - * Copyright (c) 2011 Konrad Rzeszutek Wilk - * Copyright (c) 2011 Yu Ke ke.yu@intel.com - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include -#include -#include -#include - -int xen_acpi_notify_hypervisor_state(u8 sleep_state, - u32 pm1a_cnt, u32 pm1b_cnt) -{ - struct xen_platform_op op = { - .cmd = XENPF_enter_acpi_sleep, - .interface_version = XENPF_INTERFACE_VERSION, - .u = { - .enter_acpi_sleep = { - .pm1a_cnt_val = (u16)pm1a_cnt, - .pm1b_cnt_val = (u16)pm1b_cnt, - .sleep_state = sleep_state, - }, - }, - }; - - if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) { - WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!" - "Email xen-devel@lists.xensource.com Thank you.\n", \ - pm1a_cnt, pm1b_cnt); - return -1; - } - - HYPERVISOR_dom0_op(&op); - return 1; -} diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 0bfc1ef11259..e570c6f67e17 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -828,7 +827,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct page **pages, unsigned int count) { int i, ret; - bool lazy = false; pte_t *pte; unsigned long mfn; @@ -839,11 +837,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, if (xen_feature(XENFEAT_auto_translated_physmap)) return ret; - if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { - arch_enter_lazy_mmu_mode(); - lazy = true; - } - for (i = 0; i < count; i++) { /* Do not add to override if the map failed. */ if (map_ops[i].status) @@ -862,9 +855,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, return ret; } - if (lazy) - arch_leave_lazy_mmu_mode(); - return ret; } EXPORT_SYMBOL_GPL(gnttab_map_refs); @@ -873,7 +863,6 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct page **pages, unsigned int count, bool clear_pte) { int i, ret; - bool lazy = false; ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); if (ret) @@ -882,20 +871,12 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, if (xen_feature(XENFEAT_auto_translated_physmap)) return ret; - if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { - arch_enter_lazy_mmu_mode(); - lazy = true; - } - for (i = 0; i < count; i++) { ret = m2p_remove_override(pages[i], clear_pte); if (ret) return ret; } - if (lazy) - arch_leave_lazy_mmu_mode(); - return ret; } EXPORT_SYMBOL_GPL(gnttab_unmap_refs); diff --git a/include/xen/acpi.h b/include/xen/acpi.h deleted file mode 100644 index 48a9c0171b65..000000000000 --- a/include/xen/acpi.h +++ /dev/null @@ -1,58 +0,0 @@ -/****************************************************************************** - * acpi.h - * acpi file for domain 0 kernel - * - * Copyright (c) 2011 Konrad Rzeszutek Wilk - * Copyright (c) 2011 Yu Ke - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _XEN_ACPI_H -#define _XEN_ACPI_H - -#include - -#ifdef CONFIG_XEN_DOM0 -#include -#include -#include - -int xen_acpi_notify_hypervisor_state(u8 sleep_state, - u32 pm1a_cnt, u32 pm1b_cnd); - -static inline void xen_acpi_sleep_register(void) -{ - if (xen_initial_domain()) - acpi_os_set_prepare_sleep( - &xen_acpi_notify_hypervisor_state); -} -#else -static inline void xen_acpi_sleep_register(void) -{ -} -#endif - -#endif /* _XEN_ACPI_H */