* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
* allocate the first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd sizes are
- * arranged so that a single pmd covers 4GB (giving a full LP64
+ * arranged so that a single pmd covers 4GB (giving a full 64-bit
* process access to 8TB) so our lookups are effectively L2 for the
* first 4GB of the kernel (i.e. for all ILP32 processes and all the
* kernel for machines with under 4GB of memory) */
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
return actual_pgd;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
return pmd;
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
-static inline struct page *
+static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (page)
+ pgtable_page_ctor(page);
return page;
}
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-#define pte_free(page) pte_free_kernel(page_address(page))
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ pte_free_kernel(mm, page_address(pte));
+}
#define check_pgt_cache() do { } while (0)