tlb_flush(tlb);
}
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
+ if (tlb->nr == tlb->max)
+ return true;
tlb->pages[tlb->nr++] = page;
- VM_BUG_ON(tlb->nr > tlb->max);
- return tlb->max - tlb->nr;
+ return false;
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (!__tlb_remove_page(tlb, page))
+ if (__tlb_remove_page(tlb, page)) {
tlb_flush_mmu(tlb);
+ __tlb_remove_page(tlb, page);
+ }
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
+ if (tlb->nr == tlb->max)
+ return true;
+
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
- VM_BUG_ON(tlb->nr > tlb->max);
-
- return tlb->max - tlb->nr;
+ return false;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (!__tlb_remove_page(tlb, page))
+ if (__tlb_remove_page(tlb, page)) {
tlb_flush_mmu(tlb);
+ __tlb_remove_page(tlb, page);
+ }
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
}
/*
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
free_page_and_swap_cache(page);
}
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
/*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_page(tlb, page);
}
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
{
tlb->need_flush = 1;
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_page(tlb, page);
}
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
+ /*
+ * __tlb_adjust_range will track the new addr here,
+ * that that we can adjust the range after the flush
+ */
+ unsigned long addr;
};
#define HAVE_GENERIC_MMU_GATHER
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
-
-/* tlb_remove_page
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- * required.
- */
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (!__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
+bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address)
{
tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + PAGE_SIZE);
+ /*
+ * Track the last address with which we adjusted the range. This
+ * will be used later to adjust again after a mmu_flush due to
+ * failed __tlb_remove_page
+ */
+ tlb->addr = address;
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+/* tlb_remove_page
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ * required.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (__tlb_remove_page(tlb, page)) {
+ tlb_flush_mmu(tlb);
+ __tlb_adjust_range(tlb, tlb->addr);
+ __tlb_remove_page(tlb, page);
+ }
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
+{
+ /* active->nr should be zero when we call this */
+ VM_BUG_ON_PAGE(tlb->active->nr, page);
+ __tlb_adjust_range(tlb, tlb->addr);
+ return __tlb_remove_page(tlb, page);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
+ *returns true if the caller should flush.
*/
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
batch = tlb->active;
- batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
- return 0;
+ return true;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
- return batch->max - batch->nr;
+ batch->pages[batch->nr++] = page;
+ return false;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
+ struct page *pending_page = NULL;
again:
init_rss_vec(rss);
page_remove_rmap(page, false);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
- if (unlikely(!__tlb_remove_page(tlb, page))) {
+ if (unlikely(__tlb_remove_page(tlb, page))) {
force_flush = 1;
+ pending_page = page;
addr += PAGE_SIZE;
break;
}
if (force_flush) {
force_flush = 0;
tlb_flush_mmu_free(tlb);
-
+ if (pending_page) {
+ /* remove the page with new size */
+ __tlb_remove_pte_page(tlb, pending_page);
+ pending_page = NULL;
+ }
if (addr != end)
goto again;
}