return atomic_read(&compound_head(page)->_count);
}
+#ifdef CONFIG_HUGETLB_PAGE
+extern int PageHeadHuge(struct page *page_head);
+#else /* CONFIG_HUGETLB_PAGE */
+static inline int PageHeadHuge(struct page *page_head)
+{
+ return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static inline bool __compound_tail_refcounted(struct page *page)
+{
+ return !PageSlab(page) && !PageHeadHuge(page);
+}
+
+/*
+ * This takes a head page as parameter and tells if the
+ * tail page reference counting can be skipped.
+ *
+ * For this to be safe, PageSlab and PageHeadHuge must remain true on
+ * any given page where they return true here, until all tail pins
+ * have been released.
+ */
+static inline bool compound_tail_refcounted(struct page *page)
+{
+ VM_BUG_ON(!PageHead(page));
+ return __compound_tail_refcounted(page);
+}
+
static inline void get_huge_page_tail(struct page *page)
{
/*
* __split_huge_page_refcount() cannot run
* from under us.
+ * In turn no need of compound_trans_head here.
*/
VM_BUG_ON(page_mapcount(page) < 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
- atomic_inc(&page->_mapcount);
+ if (compound_tail_refcounted(compound_head(page)))
+ atomic_inc(&page->_mapcount);
}
extern bool __get_page_tail(struct page *page);
#if USE_SPLIT_PTE_PTLOCKS
#if BLOATED_SPINLOCKS
+void __init ptlock_cache_init(void);
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
return page->ptl;
}
#else /* BLOATED_SPINLOCKS */
+static inline void ptlock_cache_init(void) {}
static inline bool ptlock_alloc(struct page *page)
{
return true;
{
return &mm->page_table_lock;
}
+static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
+static inline void pgtable_init(void)
+{
+ ptlock_cache_init();
+ pgtable_cache_init();
+}
+
static inline bool pgtable_page_ctor(struct page *page)
{
inc_zone_page_state(page, NR_PAGETABLE);