2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
16 #include <linux/kasan.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
22 #include <asm/pgtable.h>
25 * The dumper groups pagetable entries of the same type into one, and for
26 * that it needs to keep some state when walking, and flush this state
27 * when a "break" in the continuity is found.
31 pgprot_t current_prot;
32 unsigned long start_address;
33 unsigned long current_address;
34 const struct addr_marker *marker;
38 unsigned long wx_pages;
42 unsigned long start_address;
44 unsigned long max_lines;
47 /* indices for address_markers; keep sync'd w/ address_markers below */
48 enum address_markers_idx {
56 KASAN_SHADOW_START_NR,
59 # ifdef CONFIG_X86_ESPFIX64
69 # ifdef CONFIG_HIGHMEM
76 /* Address space markers hints */
77 static struct addr_marker address_markers[] = {
80 { 0x8000000000000000UL, "Kernel Space" },
81 { 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
82 { 0/* VMALLOC_START */, "vmalloc() Area" },
83 { 0/* VMEMMAP_START */, "Vmemmap" },
85 { KASAN_SHADOW_START, "KASAN shadow" },
86 { KASAN_SHADOW_END, "KASAN shadow end" },
88 # ifdef CONFIG_X86_ESPFIX64
89 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
92 { EFI_VA_END, "EFI Runtime Services" },
94 { __START_KERNEL_map, "High Kernel Mapping" },
95 { MODULES_VADDR, "Modules" },
96 { MODULES_END, "End Modules" },
98 { PAGE_OFFSET, "Kernel Mapping" },
99 { 0/* VMALLOC_START */, "vmalloc() Area" },
100 { 0/*VMALLOC_END*/, "vmalloc() End" },
101 # ifdef CONFIG_HIGHMEM
102 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
104 { 0/*FIXADDR_START*/, "Fixmap Area" },
106 { -1, NULL } /* End of list */
109 /* Multipliers for offsets within the PTEs */
110 #define PTE_LEVEL_MULT (PAGE_SIZE)
111 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
112 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
113 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
114 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
116 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
119 printk(KERN_INFO fmt, ##args); \
122 seq_printf(m, fmt, ##args); \
125 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
128 printk(KERN_CONT fmt, ##args); \
131 seq_printf(m, fmt, ##args); \
135 * Print a readable form of a pgprot_t to the seq_file
137 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
139 pgprotval_t pr = pgprot_val(prot);
140 static const char * const level_name[] =
141 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
143 if (!pgprot_val(prot)) {
145 pt_dump_cont_printf(m, dmsg, " ");
148 pt_dump_cont_printf(m, dmsg, "USR ");
150 pt_dump_cont_printf(m, dmsg, " ");
152 pt_dump_cont_printf(m, dmsg, "RW ");
154 pt_dump_cont_printf(m, dmsg, "ro ");
156 pt_dump_cont_printf(m, dmsg, "PWT ");
158 pt_dump_cont_printf(m, dmsg, " ");
160 pt_dump_cont_printf(m, dmsg, "PCD ");
162 pt_dump_cont_printf(m, dmsg, " ");
164 /* Bit 7 has a different meaning on level 3 vs 4 */
165 if (level <= 4 && pr & _PAGE_PSE)
166 pt_dump_cont_printf(m, dmsg, "PSE ");
168 pt_dump_cont_printf(m, dmsg, " ");
169 if ((level == 5 && pr & _PAGE_PAT) ||
170 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
171 pt_dump_cont_printf(m, dmsg, "PAT ");
173 pt_dump_cont_printf(m, dmsg, " ");
174 if (pr & _PAGE_GLOBAL)
175 pt_dump_cont_printf(m, dmsg, "GLB ");
177 pt_dump_cont_printf(m, dmsg, " ");
179 pt_dump_cont_printf(m, dmsg, "NX ");
181 pt_dump_cont_printf(m, dmsg, "x ");
183 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
187 * On 64 bits, sign-extend the 48 bit address to 64 bit
189 static unsigned long normalize_addr(unsigned long u)
192 if (!IS_ENABLED(CONFIG_X86_64))
195 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
196 return (signed long)(u << shift) >> shift;
200 * This function gets called on a break in a continuous series
201 * of PTE entries; the next one is different so we need to
202 * print what we collected so far.
204 static void note_page(struct seq_file *m, struct pg_state *st,
205 pgprot_t new_prot, int level)
207 pgprotval_t prot, cur;
208 static const char units[] = "BKMGTPE";
211 * If we have a "break" in the series, we need to flush the state that
212 * we have now. "break" is either changing perms, levels or
213 * address space marker.
215 prot = pgprot_val(new_prot);
216 cur = pgprot_val(st->current_prot);
220 st->current_prot = new_prot;
222 st->marker = address_markers;
224 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
226 } else if (prot != cur || level != st->level ||
227 st->current_address >= st->marker[1].start_address) {
228 const char *unit = units;
230 int width = sizeof(unsigned long) * 2;
231 pgprotval_t pr = pgprot_val(st->current_prot);
233 if (st->check_wx && (pr & _PAGE_RW) && !(pr & _PAGE_NX)) {
235 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
236 (void *)st->start_address,
237 (void *)st->start_address);
238 st->wx_pages += (st->current_address -
239 st->start_address) / PAGE_SIZE;
243 * Now print the actual finished series
245 if (!st->marker->max_lines ||
246 st->lines < st->marker->max_lines) {
247 pt_dump_seq_printf(m, st->to_dmesg,
249 width, st->start_address,
250 width, st->current_address);
252 delta = st->current_address - st->start_address;
253 while (!(delta & 1023) && unit[1]) {
257 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
259 printk_prot(m, st->current_prot, st->level,
265 * We print markers for special areas of address space,
266 * such as the start of vmalloc space etc.
267 * This helps in the interpretation.
269 if (st->current_address >= st->marker[1].start_address) {
270 if (st->marker->max_lines &&
271 st->lines > st->marker->max_lines) {
272 unsigned long nskip =
273 st->lines - st->marker->max_lines;
274 pt_dump_seq_printf(m, st->to_dmesg,
275 "... %lu entr%s skipped ... \n",
277 nskip == 1 ? "y" : "ies");
281 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
285 st->start_address = st->current_address;
286 st->current_prot = new_prot;
291 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, unsigned long P)
297 start = (pte_t *)pmd_page_vaddr(addr);
298 for (i = 0; i < PTRS_PER_PTE; i++) {
299 prot = pte_flags(*start);
300 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
301 note_page(m, st, __pgprot(prot), 5);
308 * This is an optimization for KASAN=y case. Since all kasan page tables
309 * eventually point to the kasan_zero_page we could call note_page()
310 * right away without walking through lower level page tables. This saves
311 * us dozens of seconds (minutes for 5-level config) while checking for
312 * W+X mapping or reading kernel_page_tables debugfs file.
314 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
317 if (__pa(pt) == __pa(kasan_zero_pmd) ||
318 #ifdef CONFIG_X86_5LEVEL
319 __pa(pt) == __pa(kasan_zero_p4d) ||
321 __pa(pt) == __pa(kasan_zero_pud)) {
322 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
323 note_page(m, st, __pgprot(prot), 5);
329 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
338 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P)
341 pmd_t *start, *pmd_start;
344 pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
345 for (i = 0; i < PTRS_PER_PMD; i++) {
346 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
347 if (!pmd_none(*start)) {
348 if (pmd_large(*start) || !pmd_present(*start)) {
349 prot = pmd_flags(*start);
350 note_page(m, st, __pgprot(prot), 4);
351 } else if (!kasan_page_table(m, st, pmd_start)) {
352 walk_pte_level(m, st, *start,
353 P + i * PMD_LEVEL_MULT);
356 note_page(m, st, __pgprot(0), 4);
362 #define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
363 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
364 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
369 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P)
372 pud_t *start, *pud_start;
374 pud_t *prev_pud = NULL;
376 pud_start = start = (pud_t *)p4d_page_vaddr(addr);
378 for (i = 0; i < PTRS_PER_PUD; i++) {
379 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
380 if (!pud_none(*start)) {
381 if (pud_large(*start) || !pud_present(*start)) {
382 prot = pud_flags(*start);
383 note_page(m, st, __pgprot(prot), 3);
384 } else if (!kasan_page_table(m, st, pud_start)) {
385 walk_pmd_level(m, st, *start,
386 P + i * PUD_LEVEL_MULT);
389 note_page(m, st, __pgprot(0), 3);
397 #define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(p4d_val(a)),p)
398 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
399 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
404 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P)
407 p4d_t *start, *p4d_start;
410 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
412 for (i = 0; i < PTRS_PER_P4D; i++) {
413 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
414 if (!p4d_none(*start)) {
415 if (p4d_large(*start) || !p4d_present(*start)) {
416 prot = p4d_flags(*start);
417 note_page(m, st, __pgprot(prot), 2);
418 } else if (!kasan_page_table(m, st, p4d_start)) {
419 walk_pud_level(m, st, *start,
420 P + i * P4D_LEVEL_MULT);
423 note_page(m, st, __pgprot(0), 2);
430 #define walk_p4d_level(m,s,a,p) walk_pud_level(m,s,__p4d(pgd_val(a)),p)
431 #define pgd_large(a) p4d_large(__p4d(pgd_val(a)))
432 #define pgd_none(a) p4d_none(__p4d(pgd_val(a)))
435 static inline bool is_hypervisor_range(int idx)
439 * ffff800000000000 - ffff87ffffffffff is reserved for
442 return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
443 (idx < pgd_index(__PAGE_OFFSET));
449 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
453 pgd_t *start = (pgd_t *) &init_top_pgt;
455 pgd_t *start = swapper_pg_dir;
459 struct pg_state st = {};
466 st.check_wx = checkwx;
470 for (i = 0; i < PTRS_PER_PGD; i++) {
471 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
472 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
473 if (pgd_large(*start) || !pgd_present(*start)) {
474 prot = pgd_flags(*start);
475 note_page(m, &st, __pgprot(prot), 1);
477 walk_p4d_level(m, &st, *start,
481 note_page(m, &st, __pgprot(0), 1);
487 /* Flush out the last page */
488 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
489 note_page(m, &st, __pgprot(0), 0);
493 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
496 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
499 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
501 ptdump_walk_pgd_level_core(m, pgd, false);
503 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
505 void ptdump_walk_pgd_level_checkwx(void)
507 ptdump_walk_pgd_level_core(NULL, NULL, true);
510 static int __init pt_dump_init(void)
513 * Various markers are not compile-time constants, so assign them
517 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
518 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
519 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
522 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
523 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
524 # ifdef CONFIG_HIGHMEM
525 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
527 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
532 __initcall(pt_dump_init);