X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=include%2Fasm-xtensa%2Fpgtable.h;h=2d4b5db6ea6340406c62e99be1bc1398daa5770d;hb=edb16bec41db68b22799a5fbad82c3891e637565;hp=883ebc2d75d69090f0c044e2556b7df9b324d965;hpb=e3e3679cfc1c6689e035f6d69606253b1eda63ca;p=mv-sheeva.git diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index 883ebc2d75d..2d4b5db6ea6 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h @@ -1,5 +1,5 @@ /* - * linux/include/asm-xtensa/page.h + * linux/include/asm-xtensa/pgtable.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version2 as @@ -14,45 +14,6 @@ #include #include -/* Assertions. */ - -#ifdef CONFIG_MMU - - -#if (XCHAL_MMU_RINGS < 2) -# error Linux build assumes at least 2 ring levels. -#endif - -#if (XCHAL_MMU_CA_BITS != 4) -# error We assume exactly four bits for CA. -#endif - -#if (XCHAL_MMU_SR_BITS != 0) -# error We have no room for SR bits. -#endif - -/* - * Use the first min-wired way for mapping page-table pages. - * Page coloring requires a second min-wired way. - */ - -#if (XCHAL_DTLB_MINWIRED_SETS == 0) -# error Need a min-wired way for mapping page-table pages -#endif - -#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY) - -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK -# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2 -# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1) -# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2) -# else -# error Page coloring requires its own wired dtlb way! -# endif -#endif - -#endif /* CONFIG_MMU */ - /* * We only use two ring levels, user and kernel space. */ @@ -97,7 +58,7 @@ #define PGD_ORDER 0 #define PMD_ORDER 0 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) -#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR +#define FIRST_USER_ADDRESS 0 #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) /* virtual memory area. We keep a distance to other memory regions to be @@ -218,7 +179,7 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; /* * The pmd contains the kernel virtual address of the pte page. */ -#define pmd_page_kernel(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) /* @@ -278,6 +239,8 @@ static inline void update_pte(pte_t *ptep, pte_t pteval) #endif } +struct mm_struct; + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { @@ -294,6 +257,7 @@ set_pmd(pmd_t *pmdp, pmd_t pmdval) #endif } +struct vm_area_struct; static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, @@ -346,7 +310,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) /* Find an entry in the third-level page table.. */ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir,addr) \ - ((pte_t*) pmd_page_kernel(*(dir)) + pte_index(addr)) + ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) @@ -441,11 +405,11 @@ extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte); /* - * remap a physical address `phys' of size `size' with page protection `prot' + * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' */ -#define io_remap_page_range(vma,from,phys,size,prot) \ - remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot) +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) /* No page table caches to init */