#define pmd_none(pmd) (!pmd_val(pmd))
+/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
+ * very simple, it's just the physical address. PTE tables are of
+ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
+ * the top bits outside of the range of any physical address size we
+ * support are clear as well. We also validate the physical itself.
+ */
+#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
+ !__kern_addr_valid(pmd_val(pmd)))
+
+#define pud_none(pud) (!pud_val(pud))
+
+#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
+ !__kern_addr_valid(pud_val(pud)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t pmd);
+ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
#else
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
- extern void paging_init(void);
- extern unsigned long find_ecache_flush_span(unsigned long size);
+ void paging_init(void);
+ unsigned long find_ecache_flush_span(unsigned long size);
struct seq_file;
- extern void mmu_info(struct seq_file *);
+ void mmu_info(struct seq_file *);
struct vm_area_struct;
- extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+ void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd);
+ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
- extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
+ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
#define __HAVE_ARCH_PGTABLE_WITHDRAW
- extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
/* Encode and de-code a swap entry */
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/* File offset in PTE support. */
- extern unsigned long pte_file(pte_t);
+ unsigned long pte_file(pte_t);
#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
- extern pte_t pgoff_to_pte(unsigned long);
+ pte_t pgoff_to_pte(unsigned long);
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
- extern int page_in_phys_avail(unsigned long paddr);
-extern unsigned long sparc64_valid_addr_bitmap[];
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-static inline bool kern_addr_valid(unsigned long addr)
-{
- unsigned long paddr = __pa(addr);
-
- if ((paddr >> 41UL) != 0UL)
- return false;
- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
-}
-
+ int page_in_phys_avail(unsigned long paddr);
/*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in