-static inline void
-tlb_flush_mmu(struct mmu_gather *tlb)
-{
- struct mmu_gather_batch *batch;
-
- if (!tlb->need_flush)
- return;
- tlb->need_flush = 0;
- tlb_flush(tlb);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
-
- if (tlb_fast_mode(tlb))
- return;
-
- for (batch = &tlb->local; batch; batch = batch->next) {
- free_pages_and_swap_cache(batch->pages, batch->nr);
- batch->nr = 0;
- }
- tlb->active = &tlb->local;
-}
-
-/* tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- struct mmu_gather_batch *batch, *next;
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- for (batch = tlb->local.next; batch; batch = next) {
- next = batch->next;
- free_pages((unsigned long)batch, 0);
- }
- tlb->local.next = NULL;
-}
-
-/* __tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- * handling the additional races in SMP caused by other CPUs caching valid
- * mappings in their TLBs. Returns the number of free page slots left.
- * When out of page slots we must call tlb_flush_mmu().
- */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- struct mmu_gather_batch *batch;
-
- tlb->need_flush = 1;
-
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu() */
- }
-
- batch = tlb->active;
- batch->pages[batch->nr++] = page;
- VM_BUG_ON(batch->nr > batch->max);
- if (batch->nr == batch->max) {
- if (!tlb_next_batch(tlb))
- return 0;
- }
-
- return batch->max - batch->nr;
-}
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_flush_mmu(struct mmu_gather *tlb);
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
+int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);