2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
6 * Derived from x86 and arm implementation:
7 * (C) Copyright 2008 Intel Corporation
9 * Author: Arjan van de Ven <arjan@linux.intel.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; version 2
16 #include <linux/debugfs.h>
17 #include <linux/errno.h>
20 #include <linux/init.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
25 #include <asm/fixmap.h>
26 #include <asm/kasan.h>
27 #include <asm/memory.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgtable-hwdef.h>
32 unsigned long start_address;
36 static const struct addr_marker address_markers[] = {
38 { KASAN_SHADOW_START, "Kasan shadow start" },
39 { KASAN_SHADOW_END, "Kasan shadow end" },
41 { MODULES_VADDR, "Modules start" },
42 { MODULES_END, "Modules end" },
43 { VMALLOC_START, "vmalloc() Area" },
44 { VMALLOC_END, "vmalloc() End" },
45 { FIXADDR_START, "Fixmap start" },
46 { FIXADDR_TOP, "Fixmap end" },
47 { PCI_IO_START, "PCI I/O start" },
48 { PCI_IO_END, "PCI I/O end" },
49 #ifdef CONFIG_SPARSEMEM_VMEMMAP
50 { VMEMMAP_START, "vmemmap start" },
51 { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
53 { PAGE_OFFSET, "Linear Mapping" },
58 * The page dumper groups page table entries of the same type into a single
59 * description. It uses pg_state to track the range information while
60 * iterating over the pte entries. When the continuity is broken it then
61 * dumps out a description of the range.
65 const struct addr_marker *marker;
66 unsigned long start_address;
78 static const struct prot_bits pte_bits[] = {
120 .mask = PTE_TABLE_BIT,
121 .val = PTE_TABLE_BIT,
129 .mask = PTE_ATTRINDX_MASK,
130 .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
131 .set = "DEVICE/nGnRnE",
133 .mask = PTE_ATTRINDX_MASK,
134 .val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
135 .set = "DEVICE/nGnRE",
137 .mask = PTE_ATTRINDX_MASK,
138 .val = PTE_ATTRINDX(MT_DEVICE_GRE),
141 .mask = PTE_ATTRINDX_MASK,
142 .val = PTE_ATTRINDX(MT_NORMAL_NC),
143 .set = "MEM/NORMAL-NC",
145 .mask = PTE_ATTRINDX_MASK,
146 .val = PTE_ATTRINDX(MT_NORMAL),
152 const struct prot_bits *bits;
158 static struct pg_level pg_level[] = {
163 .num = ARRAY_SIZE(pte_bits),
165 .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
167 .num = ARRAY_SIZE(pte_bits),
169 .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
171 .num = ARRAY_SIZE(pte_bits),
175 .num = ARRAY_SIZE(pte_bits),
179 static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
184 for (i = 0; i < num; i++, bits++) {
187 if ((st->current_prot & bits->mask) == bits->val)
193 seq_printf(st->seq, " %s", s);
197 static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
200 static const char units[] = "KMGTPE";
201 u64 prot = val & pg_level[level].mask;
205 st->current_prot = prot;
206 st->start_address = addr;
207 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
208 } else if (prot != st->current_prot || level != st->level ||
209 addr >= st->marker[1].start_address) {
210 const char *unit = units;
213 if (st->current_prot) {
214 seq_printf(st->seq, "0x%016lx-0x%016lx ",
215 st->start_address, addr);
217 delta = (addr - st->start_address) >> 10;
218 while (!(delta & 1023) && unit[1]) {
222 seq_printf(st->seq, "%9lu%c %s", delta, *unit,
223 pg_level[st->level].name);
224 if (pg_level[st->level].bits)
225 dump_prot(st, pg_level[st->level].bits,
226 pg_level[st->level].num);
227 seq_puts(st->seq, "\n");
230 if (addr >= st->marker[1].start_address) {
232 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
235 st->start_address = addr;
236 st->current_prot = prot;
240 if (addr >= st->marker[1].start_address) {
242 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
247 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
249 pte_t *pte = pte_offset_kernel(pmd, 0);
253 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
254 addr = start + i * PAGE_SIZE;
255 note_page(st, addr, 4, pte_val(*pte));
259 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
261 pmd_t *pmd = pmd_offset(pud, 0);
265 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
266 addr = start + i * PMD_SIZE;
267 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
268 note_page(st, addr, 3, pmd_val(*pmd));
270 BUG_ON(pmd_bad(*pmd));
271 walk_pte(st, pmd, addr);
276 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
278 pud_t *pud = pud_offset(pgd, 0);
282 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
283 addr = start + i * PUD_SIZE;
284 if (pud_none(*pud) || pud_sect(*pud)) {
285 note_page(st, addr, 2, pud_val(*pud));
287 BUG_ON(pud_bad(*pud));
288 walk_pmd(st, pud, addr);
293 static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
295 pgd_t *pgd = pgd_offset(mm, 0UL);
299 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
300 addr = start + i * PGDIR_SIZE;
301 if (pgd_none(*pgd)) {
302 note_page(st, addr, 1, pgd_val(*pgd));
304 BUG_ON(pgd_bad(*pgd));
305 walk_pud(st, pgd, addr);
310 static int ptdump_show(struct seq_file *m, void *v)
312 struct pg_state st = {
314 .marker = address_markers,
317 walk_pgd(&st, &init_mm, VA_START);
319 note_page(&st, 0, 0, 0);
323 static int ptdump_open(struct inode *inode, struct file *file)
325 return single_open(file, ptdump_show, NULL);
328 static const struct file_operations ptdump_fops = {
332 .release = single_release,
335 static int ptdump_init(void)
340 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
341 if (pg_level[i].bits)
342 for (j = 0; j < pg_level[i].num; j++)
343 pg_level[i].mask |= pg_level[i].bits[j].mask;
345 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
347 return pe ? 0 : -ENOMEM;
349 device_initcall(ptdump_init);