]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/arm/mm/mmu.c
Merge branch 'late/clksrc' into late/cleanup
[karo-tx-linux.git] / arch / arm / mm / mmu.c
index ce328c7f5c94556cdda6021e8aef1b950aba5aa2..78978945492a2a105f1a9a2d18e81cd76dc1e7b6 100644 (file)
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
 static unsigned int ecc_mask __initdata = 0;
 pgprot_t pgprot_user;
 pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
 
 EXPORT_SYMBOL(pgprot_user);
 EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
        unsigned int    cr_mask;
        pmdval_t        pmd;
        pteval_t        pte;
+       pteval_t        pte_s2;
 };
 
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy)      policy
+#else
+#define s2_policy(policy)      0
+#endif
+
 static struct cachepolicy cache_policies[] __initdata = {
        {
                .policy         = "uncached",
                .cr_mask        = CR_W|CR_C,
                .pmd            = PMD_SECT_UNCACHED,
                .pte            = L_PTE_MT_UNCACHED,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "buffered",
                .cr_mask        = CR_C,
                .pmd            = PMD_SECT_BUFFERED,
                .pte            = L_PTE_MT_BUFFERABLE,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "writethrough",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WT,
                .pte            = L_PTE_MT_WRITETHROUGH,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
        }, {
                .policy         = "writeback",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WB,
                .pte            = L_PTE_MT_WRITEBACK,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }, {
                .policy         = "writealloc",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WBWA,
                .pte            = L_PTE_MT_WRITEALLOC,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }
 };
 
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
        struct cachepolicy *cp;
        unsigned int cr = get_cr();
        pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+       pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
        int cpu_arch = cpu_architecture();
        int i;
 
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
         */
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+       s2_pgprot = cp->pte_s2;
+       hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
 
        /*
         * ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
                        user_pgprot |= L_PTE_SHARED;
                        kern_pgprot |= L_PTE_SHARED;
                        vecs_pgprot |= L_PTE_SHARED;
+                       s2_pgprot |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
                        mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
        pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
        pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
                                 L_PTE_DIRTY | kern_pgprot);
+       pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+       pgprot_s2_device  = __pgprot(s2_device_pgprot);
+       pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
 
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -576,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
-                                     unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+                       unsigned long end, phys_addr_t phys,
+                       const struct mem_type *type)
 {
-       pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
        /*
-        * Try a section mapping - end, addr and phys must all be aligned
-        * to a section boundary.  Note that PMDs refer to the individual
-        * L1 entries, whereas PGDs refer to a group of L1 entries making
-        * up one logical pointer to an L2 table.
+        * In classic MMU format, puds and pmds are folded in to
+        * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+        * group of L1 entries making up one logical pointer to
+        * an L2 table (2MB), where as PMDs refer to the individual
+        * L1 entries (1MB). Hence increment to get the correct
+        * offset for odd 1MB sections.
+        * (See arch/arm/include/asm/pgtable-2level.h)
         */
-       if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
-               pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
-               if (addr & SECTION_SIZE)
-                       pmd++;
+       if (addr & SECTION_SIZE)
+               pmd++;
 #endif
+       do {
+               *pmd = __pmd(phys | type->prot_sect);
+               phys += SECTION_SIZE;
+       } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-               do {
-                       *pmd = __pmd(phys | type->prot_sect);
-                       phys += SECTION_SIZE;
-               } while (pmd++, addr += SECTION_SIZE, addr != end);
+       flush_pmd_entry(pmd);
+}
 
-               flush_pmd_entry(p);
-       } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+                                     unsigned long end, phys_addr_t phys,
+                                     const struct mem_type *type)
+{
+       pmd_t *pmd = pmd_offset(pud, addr);
+       unsigned long next;
+
+       do {
                /*
-                * No need to loop; pte's aren't interested in the
-                * individual L1 entries.
+                * With LPAE, we must loop over to map
+                * all the pmds for the given range.
                 */
-               alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
-       }
+               next = pmd_addr_end(addr, end);
+
+               /*
+                * Try a section mapping - addr, next and phys must all be
+                * aligned to a section boundary.
+                */
+               if (type->prot_sect &&
+                               ((addr | next | phys) & ~SECTION_MASK) == 0) {
+                       map_init_section(pmd, addr, next, phys, type);
+               } else {
+                       alloc_init_pte(pmd, addr, next,
+                                               __phys_to_pfn(phys), type);
+               }
+
+               phys += next - addr;
+
+       } while (pmd++, addr = next, addr != end);
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -619,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_section(pud, addr, next, phys, type);
+               alloc_init_pmd(pud, addr, next, phys, type);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
@@ -757,21 +800,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 {
        struct map_desc *md;
        struct vm_struct *vm;
+       struct static_vm *svm;
 
        if (!nr)
                return;
 
-       vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+       svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 
        for (md = io_desc; nr; md++, nr--) {
                create_mapping(md);
+
+               vm = &svm->vm;
                vm->addr = (void *)(md->virtual & PAGE_MASK);
                vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
                vm->phys_addr = __pfn_to_phys(md->pfn);
                vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
                vm->flags |= VM_ARM_MTYPE(md->type);
                vm->caller = iotable_init;
-               vm_area_add_early(vm++);
+               add_static_vm_early(svm++);
        }
 }
 
@@ -779,13 +825,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
                                  void *caller)
 {
        struct vm_struct *vm;
+       struct static_vm *svm;
+
+       svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
 
-       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm = &svm->vm;
        vm->addr = (void *)addr;
        vm->size = size;
        vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = caller;
-       vm_area_add_early(vm);
+       add_static_vm_early(svm);
 }
 
 #ifndef CONFIG_ARM_LPAE
@@ -810,14 +859,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
 
 static void __init fill_pmd_gaps(void)
 {
+       struct static_vm *svm;
        struct vm_struct *vm;
        unsigned long addr, next = 0;
        pmd_t *pmd;
 
-       /* we're still single threaded hence no lock needed here */
-       for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
-                       continue;
+       list_for_each_entry(svm, &static_vmlist, list) {
+               vm = &svm->vm;
                addr = (unsigned long)vm->addr;
                if (addr < next)
                        continue;
@@ -857,19 +905,12 @@ static void __init fill_pmd_gaps(void)
 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
 static void __init pci_reserve_io(void)
 {
-       struct vm_struct *vm;
-       unsigned long addr;
+       struct static_vm *svm;
 
-       /* we're still single threaded hence no lock needed here */
-       for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
-                       continue;
-               addr = (unsigned long)vm->addr;
-               addr &= ~(SZ_2M - 1);
-               if (addr == PCI_IO_VIRT_BASE)
-                       return;
+       svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
+       if (svm)
+               return;
 
-       }
        vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
 }
 #else