return atomic_read(&compound_head(page)->_count);
}
-extern int __get_page_tail(struct page *page);
+extern bool __get_page_tail(struct page *page);
static inline void get_page(struct page *page)
{
atomic_inc(&page->_mapcount);
}
+/*
+ * This is meant to be called as the FOLL_GET operation of
+ * follow_page() and it must be called while holding the proper PT
+ * lock while the pte (or pmd_trans_huge) is still mapping the page.
+ */
static inline void get_page_foll(struct page *page)
{
if (unlikely(PageTail(page)))
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page);
+
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
unsigned long flags;
}
EXPORT_SYMBOL(put_page);
-int __get_page_tail(struct page *page)
+/*
+ * This function is exported but must not be called by anything other
+ * than get_page(). It implements the slow path of get_page().
+ */
+bool __get_page_tail(struct page *page)
{
/*
* This takes care of get_page() if run on a tail page
* split_huge_page().
*/
unsigned long flags;
- int got = 0;
+ bool got = false;
struct page *page_head = compound_trans_head(page);
+
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/*
* page_head wasn't a dangling pointer but it
/* here __split_huge_page_refcount won't run anymore */
if (likely(PageTail(page))) {
__get_page_tail_foll(page, false);
- got = 1;
+ got = true;
}
compound_unlock_irqrestore(page_head, flags);
if (unlikely(!got))