#include <linux/mm_types.h>
#include <linux/range.h>
#include <linux/pfn.h>
+#include <linux/bit_spinlock.h>
struct mempolicy;
struct anon_vma;
}
#endif
+static inline void compound_lock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bit_spin_lock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline void compound_unlock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bit_spin_unlock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline unsigned long compound_lock_irqsave(struct page *page)
+{
+ unsigned long uninitialized_var(flags);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ local_irq_save(flags);
+ compound_lock(page);
+#endif
+ return flags;
+}
+
+static inline void compound_unlock_irqrestore(struct page *page,
+ unsigned long flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ compound_unlock(page);
+ local_irq_restore(flags);
+#endif
+}
+
static inline struct page *compound_head(struct page *page)
{
if (unlikely(PageTail(page)))
static inline void get_page(struct page *page)
{
- page = compound_head(page);
- VM_BUG_ON(atomic_read(&page->_count) == 0);
+ /*
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count. Only if
+ * we're getting a tail page, the elevated page->_count is
+ * required only in the head page, so for tail pages the
+ * bugcheck only verifies that the page->_count isn't
+ * negative.
+ */
+ VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
atomic_inc(&page->_count);
+ /*
+ * Getting a tail page will elevate both the head and tail
+ * page->_count(s).
+ */
+ if (unlikely(PageTail(page))) {
+ /*
+ * This is safe only because
+ * __split_huge_page_refcount can't run under
+ * get_page().
+ */
+ VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+ atomic_inc(&page->first_page->_count);
+ }
}
static inline struct page *virt_to_head_page(const void *x)
page[1].lru.prev = (void *)order;
}
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
+ * servicing faults for write access. In the normal case, do always want
+ * pte_mkwrite. But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pte = pte_mkwrite(pte);
+ return pte;
+}
+
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
mapping = NULL;
return mapping;
}
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
/*
pte_unmap(pte); \
} while (0)
-#define pte_alloc_map(mm, pmd, address) \
- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
- NULL: pte_offset_map(pmd, address))
+#define pte_alloc_map(mm, vma, pmd, address) \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
+ pmd, address))? \
+ NULL: pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
+ pmd, address))? \
NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
NULL: pte_offset_kernel(pmd, address))
extern void free_area_init(unsigned long * zones_size);
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
+#define FOLL_MLOCK 0x40 /* mark page as mlocked */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern void dump_page(struct page *page);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+extern void clear_huge_page(struct page *page,
+ unsigned long addr,
+ unsigned int pages_per_huge_page);
+extern void copy_user_huge_page(struct page *dst, struct page *src,
+ unsigned long addr, struct vm_area_struct *vma,
+ unsigned int pages_per_huge_page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */