]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/arm64/mm/kasan_init.c
Merge remote-tracking branch 'nfs/linux-next'
[karo-tx-linux.git] / arch / arm64 / mm / kasan_init.c
index cab7a5be40aa85cbd933635d48208d2af54bcf1c..66c246871d2e360f36748375d1a0b9e59cd3024e 100644 (file)
 #include <linux/memblock.h>
 #include <linux/start_kernel.h>
 
+#include <asm/mmu_context.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 #include <asm/tlbflush.h>
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -32,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
        if (pmd_none(*pmd))
                pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_offset_kimg(pmd, addr);
        do {
                next = addr + PAGE_SIZE;
                set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -50,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
        if (pud_none(*pud))
                pud_populate(&init_mm, pud, kasan_zero_pmd);
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_offset_kimg(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                kasan_early_pte_populate(pmd, addr, next);
@@ -67,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
        if (pgd_none(*pgd))
                pgd_populate(&init_mm, pgd, kasan_zero_pud);
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset_kimg(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                kasan_early_pmd_populate(pud, addr, next);
@@ -96,6 +99,21 @@ asmlinkage void __init kasan_early_init(void)
        kasan_map_early_shadow();
 }
 
+/*
+ * Copy the current shadow region into a new pgdir.
+ */
+void __init kasan_copy_shadow(pgd_t *pgdir)
+{
+       pgd_t *pgd, *pgd_new, *pgd_end;
+
+       pgd = pgd_offset_k(KASAN_SHADOW_START);
+       pgd_end = pgd_offset_k(KASAN_SHADOW_END);
+       pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+       do {
+               set_pgd(pgd_new, *pgd);
+       } while (pgd++, pgd_new++, pgd != pgd_end);
+}
+
 static void __init clear_pgds(unsigned long start,
                        unsigned long end)
 {
@@ -108,20 +126,15 @@ static void __init clear_pgds(unsigned long start,
                set_pgd(pgd_offset_k(start), __pgd(0));
 }
 
-static void __init cpu_set_ttbr1(unsigned long ttbr1)
-{
-       asm(
-       "       msr     ttbr1_el1, %0\n"
-       "       isb"
-       :
-       : "r" (ttbr1));
-}
-
 void __init kasan_init(void)
 {
+       u64 kimg_shadow_start, kimg_shadow_end;
        struct memblock_region *reg;
        int i;
 
+       kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
+       kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
+
        /*
         * We are going to perform proper setup of shadow memory.
         * At first we should unmap early shadow (clear_pgds() call bellow).
@@ -130,13 +143,30 @@ void __init kasan_init(void)
         * setup will be finished.
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
-       cpu_set_ttbr1(__pa(tmp_pg_dir));
-       flush_tlb_all();
+       dsb(ishst);
+       cpu_replace_ttbr1(tmp_pg_dir);
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
+       vmemmap_populate(kimg_shadow_start, kimg_shadow_end, NUMA_NO_NODE);
+
+       /*
+        * vmemmap_populate() has populated the shadow region that covers the
+        * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
+        * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
+        * kasan_populate_zero_shadow() from replacing the PMD block mappings
+        * with PMD table mappings at the edges of the shadow region for the
+        * kernel image.
+        */
+       if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+               kimg_shadow_start = round_down(kimg_shadow_start,
+                                              SWAPPER_BLOCK_SIZE);
+               kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+       }
        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
-                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+                                  (void *)kimg_shadow_start);
+       kasan_populate_zero_shadow((void *)kimg_shadow_end,
+                                  kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
        for_each_memblock(memory, reg) {
                void *start = (void *)__phys_to_virt(reg->base);
@@ -165,8 +195,7 @@ void __init kasan_init(void)
                        pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
 
        memset(kasan_zero_page, 0, PAGE_SIZE);
-       cpu_set_ttbr1(__pa(swapper_pg_dir));
-       flush_tlb_all();
+       cpu_replace_ttbr1(swapper_pg_dir);
 
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;