]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
x86/paravirt: remove lazy mode in interrupts
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Wed, 18 Feb 2009 07:05:19 +0000 (23:05 -0800)
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Mon, 30 Mar 2009 06:35:38 +0000 (23:35 -0700)
Impact: simplification, robustness

Make paravirt_lazy_mode() always return PARAVIRT_LAZY_NONE
when in an interrupt.  This prevents interrupt code from
accidentally inheriting an outer lazy state, and instead
does everything synchronously.  Outer batched operations
are left deferred.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/paravirt.c
arch/x86/mm/fault.c
arch/x86/mm/highmem_32.c
arch/x86/mm/iomap_32.c
arch/x86/mm/pageattr.c

index 63dd358d8ee13be0708c0139bf348d5d237aecfb..8ab250ac498b6b403e99cb563cc10240a5725e71 100644 (file)
@@ -282,6 +282,9 @@ void paravirt_leave_lazy_cpu(void)
 
 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
 {
+       if (in_interrupt())
+               return PARAVIRT_LAZY_NONE;
+
        return __get_cpu_var(paravirt_lazy_mode);
 }
 
index a03b7279efa018850def9042339fec4af4249042..cfbb4a7380116dffb8fc9237c94794542b83ce23 100644 (file)
@@ -225,12 +225,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        if (!pmd_present(*pmd_k))
                return NULL;
 
-       if (!pmd_present(*pmd)) {
+       if (!pmd_present(*pmd))
                set_pmd(pmd, *pmd_k);
-               arch_flush_lazy_mmu_mode();
-       } else {
+       else
                BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-       }
 
        return pmd_k;
 }
index 00f127c80b0e38fa50eec531e8e85c8d1f18fd6d..e81dfa4081578c839a8a1cb14f56061b1a3a4f3b 100644 (file)
@@ -87,7 +87,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
-       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -117,7 +116,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
 #endif
        }
 
-       arch_flush_lazy_mmu_mode();
        pagefault_enable();
 }
 
index 04102d42ff4250873a519620958855e9b50770ff..b6a61f3d7ef828a3c03de42454382895faa996e6 100644 (file)
@@ -74,7 +74,6 @@ iounmap_atomic(void *kvaddr, enum km_type type)
        if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
                kpte_clear_flush(kmap_pte-idx, vaddr);
 
-       arch_flush_lazy_mmu_mode();
        pagefault_enable();
 }
 EXPORT_SYMBOL_GPL(iounmap_atomic);
index 9c4294986af779ed62ab088af0dae0f0048eb0c9..9015e5e412b51c59450d6dad805fd79ecd0d1389 100644 (file)
@@ -824,13 +824,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 
        vm_unmap_aliases();
 
-       /*
-        * If we're called with lazy mmu updates enabled, the
-        * in-memory pte state may be stale.  Flush pending updates to
-        * bring them up to date.
-        */
-       arch_flush_lazy_mmu_mode();
-
        cpa.vaddr = addr;
        cpa.numpages = numpages;
        cpa.mask_set = mask_set;
@@ -873,13 +866,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
        } else
                cpa_flush_all(cache);
 
-       /*
-        * If we've been called with lazy mmu updates enabled, then
-        * make sure that everything gets flushed out before we
-        * return.
-        */
-       arch_flush_lazy_mmu_mode();
-
 out:
        return ret;
 }