]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/arm/mm/mmu.c
[ARM] spelling fixes
[karo-tx-linux.git] / arch / arm / mm / mmu.c
index 32139800d939affee71890f792d07960eaa638a6..02e050ae59f6871e89d20481c66e251063486412 100644 (file)
@@ -92,7 +92,7 @@ static struct cachepolicy cache_policies[] __initdata = {
 };
 
 /*
- * These are useful for identifing cache coherency
+ * These are useful for identifying cache coherency
  * problems by allowing the cache or the cache and
  * writebuffer to be turned off.  (Note: the write
  * buffer should not be on and the cache off).
@@ -176,28 +176,42 @@ void adjust_cr(unsigned long mask, unsigned long set)
 }
 #endif
 
-struct mem_type {
-       unsigned int    prot_pte;
-       unsigned int    prot_l1;
-       unsigned int    prot_sect;
-       unsigned int    domain;
-};
+#define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
+#define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
 
-static struct mem_type mem_types[] __initdata = {
-       [MT_DEVICE] = {
-               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-                               L_PTE_WRITE,
-               .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
-                               PMD_SECT_AP_WRITE,
-               .domain    = DOMAIN_IO,
+static struct mem_type mem_types[] = {
+       [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
+               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+               .domain         = DOMAIN_IO,
+       },
+       [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
+               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_pte_ext   = PTE_EXT_TEX(2),
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
+               .domain         = DOMAIN_IO,
+       },
+       [MT_DEVICE_CACHED] = {    /* ioremap_cached */
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
+               .domain         = DOMAIN_IO,
+       },      
+       [MT_DEVICE_IXP2000] = {   /* IXP2400 requires XCB=101 for on-chip I/O */
+               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
+                                 PMD_SECT_TEX(1),
+               .domain         = DOMAIN_IO,
        },
        [MT_CACHECLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MINICLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_LOW_VECTORS] = {
@@ -213,30 +227,20 @@ static struct mem_type mem_types[] __initdata = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_ROM] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+               .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
-               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-                               L_PTE_WRITE,
-               .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
-                               PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
-                               PMD_SECT_TEX(1),
-               .domain    = DOMAIN_IO,
-       },
-       [MT_NONSHARED_DEVICE] = {
-               .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
-                               PMD_SECT_AP_WRITE,
-               .domain    = DOMAIN_IO,
-       }
 };
 
+const struct mem_type *get_mem_type(unsigned int type)
+{
+       return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
+}
+
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
@@ -262,20 +266,23 @@ static void __init build_mem_type_table(void)
        }
 
        /*
-        * Xscale must not have PMD bit 4 set for section mappings.
+        * ARMv5 and lower, bit 4 must be set for page tables.
+        * (was: cache "update-able on write" bit on ARM610)
+        * However, Xscale cores require this bit to be cleared.
         */
-       if (cpu_is_xscale())
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+       if (cpu_is_xscale()) {
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        mem_types[i].prot_sect &= ~PMD_BIT4;
-
-       /*
-        * ARMv5 and lower, excluding Xscale, bit 4 must be set for
-        * page tables.
-        */
-       if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_l1 &= ~PMD_BIT4;
+               }
+       } else if (cpu_arch < CPU_ARCH_ARMv6) {
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        if (mem_types[i].prot_l1)
                                mem_types[i].prot_l1 |= PMD_BIT4;
+                       if (mem_types[i].prot_sect)
+                               mem_types[i].prot_sect |= PMD_BIT4;
+               }
+       }
 
        cp = &cache_policies[cachepolicy];
        kern_pgprot = user_pgprot = cp->pte;
@@ -295,13 +302,6 @@ static void __init build_mem_type_table(void)
         * ARMv6 and above have extended page tables.
         */
        if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
-               /*
-                * bit 4 becomes XN which we must clear for the
-                * kernel memory mapping.
-                */
-               mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
-               mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
-
                /*
                 * Mark cache clean areas and XIP ROM read only
                 * from SVC mode and no access from userspace.
@@ -380,45 +380,56 @@ static void __init build_mem_type_table(void)
 
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
-/*
- * Create a SECTION PGD between VIRT and PHYS in domain
- * DOMAIN with protection PROT.  This operates on half-
- * pgdir entry increments.
- */
-static inline void
-alloc_init_section(unsigned long virt, unsigned long phys, int prot)
+static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+                                 unsigned long end, unsigned long pfn,
+                                 const struct mem_type *type)
 {
-       pmd_t *pmdp = pmd_off_k(virt);
+       pte_t *pte;
 
-       if (virt & (1 << 20))
-               pmdp++;
+       if (pmd_none(*pmd)) {
+               pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
+               __pmd_populate(pmd, __pa(pte) | type->prot_l1);
+       }
 
-       *pmdp = __pmd(phys | prot);
-       flush_pmd_entry(pmdp);
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
+                           type->prot_pte_ext);
+               pfn++;
+       } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-/*
- * Add a PAGE mapping between VIRT and PHYS in domain
- * DOMAIN with protection PROT.  Note that due to the
- * way we map the PTEs, we must allocate two PTE_SIZE'd
- * blocks - one for the Linux pte table, and one for
- * the hardware pte table.
- */
-static inline void
-alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *type)
+static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
+                                     unsigned long end, unsigned long phys,
+                                     const struct mem_type *type)
 {
-       pmd_t *pmdp = pmd_off_k(virt);
-       pte_t *ptep;
+       pmd_t *pmd = pmd_offset(pgd, addr);
 
-       if (pmd_none(*pmdp)) {
-               ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
-                                              sizeof(pte_t));
+       /*
+        * Try a section mapping - end, addr and phys must all be aligned
+        * to a section boundary.  Note that PMDs refer to the individual
+        * L1 entries, whereas PGDs refer to a group of L1 entries making
+        * up one logical pointer to an L2 table.
+        */
+       if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+               pmd_t *p = pmd;
 
-               __pmd_populate(pmdp, __pa(ptep) | type->prot_l1);
-       }
-       ptep = pte_offset_kernel(pmdp, virt);
+               if (addr & SECTION_SIZE)
+                       pmd++;
+
+               do {
+                       *pmd = __pmd(phys | type->prot_sect);
+                       phys += SECTION_SIZE;
+               } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-       set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0);
+               flush_pmd_entry(p);
+       } else {
+               /*
+                * No need to loop; pte's aren't interested in the
+                * individual L1 entries.
+                */
+               alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
+       }
 }
 
 static void __init create_36bit_mapping(struct map_desc *md,
@@ -488,9 +499,9 @@ static void __init create_36bit_mapping(struct map_desc *md,
  */
 void __init create_mapping(struct map_desc *md)
 {
-       unsigned long virt, length;
-       unsigned long off = (u32)__pfn_to_phys(md->pfn);
+       unsigned long phys, addr, length, end;
        const struct mem_type *type;
+       pgd_t *pgd;
 
        if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
                printk(KERN_WARNING "BUG: not creating mapping for "
@@ -516,41 +527,27 @@ void __init create_mapping(struct map_desc *md)
                return;
        }
 
-       virt   = md->virtual;
-       off   -= virt;
-       length = md->length;
+       addr = md->virtual;
+       phys = (unsigned long)__pfn_to_phys(md->pfn);
+       length = PAGE_ALIGN(md->length);
 
-       if (type->prot_l1 == 0 &&
-           (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
+       if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
                printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
                       "be mapped using pages, ignoring.\n",
-                      __pfn_to_phys(md->pfn), md->virtual);
+                      __pfn_to_phys(md->pfn), addr);
                return;
        }
 
-       while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
-               alloc_init_page(virt, virt + off, type);
-
-               virt   += PAGE_SIZE;
-               length -= PAGE_SIZE;
-       }
-
-       /*
-        * A section mapping covers half a "pgdir" entry.
-        */
-       while (length >= (PGDIR_SIZE / 2)) {
-               alloc_init_section(virt, virt + off, type->prot_sect);
-
-               virt   += (PGDIR_SIZE / 2);
-               length -= (PGDIR_SIZE / 2);
-       }
+       pgd = pgd_offset_k(addr);
+       end = addr + length;
+       do {
+               unsigned long next = pgd_addr_end(addr, end);
 
-       while (length >= PAGE_SIZE) {
-               alloc_init_page(virt, virt + off, type);
+               alloc_init_section(pgd, addr, next, phys, type);
 
-               virt   += PAGE_SIZE;
-               length -= PAGE_SIZE;
-       }
+               phys += next - addr;
+               addr = next;
+       } while (pgd++, addr != end);
 }
 
 /*