]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
s390/mm,vmemmap: use 1MB frames for vmemmap
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Wed, 17 Oct 2012 10:18:05 +0000 (12:18 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 17 Oct 2012 15:31:27 +0000 (17:31 +0200)
Use 1MB frames for vmemmap if EDAT1 is available in order to
reduce TLB pressure
Always use a 1MB frame even if its only partially needed for
struct pages. Otherwise we would end up with a mix of large
frame and page mappings, because vmemmap_populate gets called
for each section (256MB -> 3.5MB memmap) separately.
Worst case is that we would waste 512KB.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/vmem.c

index bf37a094a46bc8dc24b8bc1491b5f68a9ee7bb59..383a8ee09b3434bfb86cba674ec0cd6407c68ff3 100644 (file)
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
        start_addr = (unsigned long) start;
        end_addr = (unsigned long) (start + nr);
 
-       for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+       for (address = start_addr; address < end_addr;) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
                        pu_dir = vmem_pud_alloc();
@@ -224,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
 
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir)) {
+#ifdef CONFIG_64BIT
+                       /* Use 1MB frames for vmemmap if available. We always
+                        * use large frames even if they are only partially
+                        * used.
+                        * Otherwise we would have also page tables since
+                        * vmemmap_populate gets called for each section
+                        * separately. */
+                       if (MACHINE_HAS_EDAT1 && !(address & ~PMD_MASK)) {
+                               void *new_page;
+
+                               new_page = vmemmap_alloc_block(PMD_SIZE, node);
+                               if (!new_page)
+                                       goto out;
+                               pte = mk_pte_phys(__pa(new_page), PAGE_RW);
+                               pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+                               pmd_val(*pm_dir) = pte_val(pte);
+                               address += PMD_SIZE;
+                               continue;
+                       }
+#endif
                        pt_dir = vmem_pte_alloc(address);
                        if (!pt_dir)
                                goto out;
                        pmd_populate(&init_mm, pm_dir, pt_dir);
+               } else if (pmd_large(*pm_dir)) {
+                       address = (address + PMD_SIZE) & PMD_MASK;
+                       continue;
                }
 
                pt_dir = pte_offset_kernel(pm_dir, address);
@@ -240,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
                        pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
                        *pt_dir = pte;
                }
+               address += PAGE_SIZE;
        }
        memset(start, 0, nr * sizeof(struct page));
        ret = 0;