]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: add tlb_remove_pmd_tlb_entry
authorShaohua Li <shaohua.li@intel.com>
Wed, 30 Nov 2011 04:12:00 +0000 (15:12 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 1 Dec 2011 04:08:56 +0000 (15:08 +1100)
We have tlb_remove_tlb_entry to indicate a pte tlb flush entry should be
flushed, but not a corresponding API for pmd entry.  This isn't a problem
so far because THP is only for x86 currently and tlb_flush() under x86
will flush entire TLB.  But this is confusion and could be missed if thp
is ported to other arch.

Also convert tlb->need_flush = 1 to a VM_BUG_ON(!tlb->need_flush) in
__tlb_remove_page() as suggested by Andrea Arcangeli.  __tlb_remove_page()
is supposed to be called after tlb_remove_xxx_tlb_entry() and we can catch
any misuse.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/asm-generic/tlb.h
include/linux/huge_mm.h
mm/huge_memory.c
mm/memory.c

index e58fa777fa09abe91831026852bfe21bdfa4f8d4..f96a5b58a975c6ccc1ebadecf746f84dd47a38ea 100644 (file)
@@ -139,6 +139,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
+/**
+ * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
+ * This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pmd_tlb_entry
+#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)           \
+       do {                                                    \
+               tlb->need_flush = 1;                            \
+               __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+       } while (0)
+
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                tlb->need_flush = 1;                            \
index 33e47d8aeddb52c8b76a4ffd4627a50fb97154f5..7246cfa602d0517accd022bd0c3837c86699b3f0 100644 (file)
@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
                                          unsigned int flags);
 extern int zap_huge_pmd(struct mmu_gather *tlb,
                        struct vm_area_struct *vma,
-                       pmd_t *pmd);
+                       pmd_t *pmd, unsigned long addr);
 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, unsigned long end,
                        unsigned char *vec);
index 964fc5a2edd2cfcabf83a0e4f8288ad59acab47d..5a595554bd8c842d930890fdf75168535fbec493 100644 (file)
@@ -1026,7 +1026,7 @@ out:
 }
 
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
-                pmd_t *pmd)
+                pmd_t *pmd, unsigned long addr)
 {
        int ret = 0;
 
@@ -1042,6 +1042,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        pgtable = get_pmd_huge_pte(tlb->mm);
                        page = pmd_page(*pmd);
                        pmd_clear(pmd);
+                       tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
                        page_remove_rmap(page);
                        VM_BUG_ON(page_mapcount(page) < 0);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
index 67c8a52c4117545c6caee1d13db94dcb8151613d..f4dac118f16c6fa471287b89b899579b3de57794 100644 (file)
@@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
        struct mmu_gather_batch *batch;
 
-       tlb->need_flush = 1;
+       VM_BUG_ON(!tlb->need_flush);
 
        if (tlb_fast_mode(tlb)) {
                free_page_and_swap_cache(page);
@@ -1231,7 +1231,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        if (next-addr != HPAGE_PMD_SIZE) {
                                VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
                                split_huge_page_pmd(vma->vm_mm, pmd);
-                       } else if (zap_huge_pmd(tlb, vma, pmd))
+                       } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                continue;
                        /* fall through */
                }