From 523d6e9fae9333a0e2a7baf4d11c8bcca544790e Mon Sep 17 00:00:00 2001 From: "zhichang.yuan" Date: Tue, 9 Dec 2014 07:26:47 +0000 Subject: [PATCH] arm64:mm: free the useless initial page table For 64K page system, after mapping a PMD section, the corresponding initial page table is not needed any more. That page can be freed. Signed-off-by: Zhichang Yuan [catalin.marinas@arm.com: added BUG_ON() to catch late memblock freeing] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 3 +++ arch/arm64/mm/mmu.c | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 59079248529d..67f6ede39474 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -342,9 +342,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #ifdef CONFIG_ARM64_64K_PAGES #define pud_sect(pud) (0) +#define pud_table(pud) (1) #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) +#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ + PUD_TYPE_TABLE) #endif static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 155cbb0a74b6..eb293febfb56 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -153,8 +153,14 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, * Check for previous table entries created during * boot (__create_page_tables) and flush them. */ - if (!pmd_none(old_pmd)) + if (!pmd_none(old_pmd)) { flush_tlb_all(); + if (pmd_table(old_pmd)) { + phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); + BUG_ON(alloc != early_alloc); + memblock_free(table, PAGE_SIZE); + } + } } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), prot, alloc); @@ -209,9 +215,12 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, * Look up the old pmd table and free it. */ if (!pud_none(old_pud)) { - phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); - memblock_free(table, PAGE_SIZE); flush_tlb_all(); + if (pud_table(old_pud)) { + phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); + BUG_ON(alloc != early_alloc); + memblock_free(table, PAGE_SIZE); + } } } else { alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc); -- 2.39.5