2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3 * which are designed to protect kernel memory from needless exposure
4 * and overwrite under many unintended conditions. This code is based
5 * on PAX_USERCOPY, which is:
7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/thread_info.h>
23 #include <asm/sections.h>
26 * Checks if a given pointer and length is contained by the current
27 * stack frame (if possible).
30 * NOT_STACK: not at all on the stack
31 * GOOD_FRAME: fully within a valid stack frame
32 * GOOD_STACK: fully on the stack (when can't do frame-checking)
33 * BAD_STACK: error condition (invalid stack position or bad stack frame)
35 static noinline int check_stack_object(const void *obj, unsigned long len)
37 const void * const stack = task_stack_page(current);
38 const void * const stackend = stack + THREAD_SIZE;
41 /* Object is not on the stack at all. */
42 if (obj + len <= stack || stackend <= obj)
46 * Reject: object partially overlaps the stack (passing the
47 * the check above means at least one end is within the stack,
48 * so if this check fails, the other end is outside the stack).
50 if (obj < stack || stackend < obj + len)
53 /* Check if object is safely within a valid frame. */
54 ret = arch_within_stack_frames(stack, stackend, obj, len);
62 * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an
63 * unexpected state during a copy_from_user() or copy_to_user() call.
64 * There are several checks being performed on the buffer by the
65 * __check_object_size() function. Normal stack buffer usage should never
66 * trip the checks, and kernel text addressing will always trip the check.
67 * For cache objects, copies must be within the object size.
69 void __noreturn usercopy_abort(const char *name, const char *detail,
70 bool to_user, unsigned long offset,
73 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
74 to_user ? "exposure" : "overwrite",
75 to_user ? "from" : "to",
77 detail ? " '" : "", detail ? : "", detail ? "'" : "",
81 * For greater effect, it would be nice to do do_group_exit(),
82 * but BUG() actually hooks all the lock-breaking and per-arch
83 * Oops code, so that is used here instead.
88 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
89 static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
92 unsigned long check_low = (uintptr_t)ptr;
93 unsigned long check_high = check_low + n;
95 /* Does not overlap if entirely above or entirely below. */
96 if (check_low >= high || check_high <= low)
102 /* Is this address range in the kernel text area? */
103 static inline const char *check_kernel_text_object(const void *ptr,
106 unsigned long textlow = (unsigned long)_stext;
107 unsigned long texthigh = (unsigned long)_etext;
108 unsigned long textlow_linear, texthigh_linear;
110 if (overlaps(ptr, n, textlow, texthigh))
111 return "<kernel text>";
114 * Some architectures have virtual memory mappings with a secondary
115 * mapping of the kernel text, i.e. there is more than one virtual
116 * kernel address that points to the kernel image. It is usually
117 * when there is a separate linear physical memory mapping, in that
118 * __pa() is not just the reverse of __va(). This can be detected
121 textlow_linear = (unsigned long)lm_alias(textlow);
122 /* No different mapping: we're done. */
123 if (textlow_linear == textlow)
126 /* Check the secondary mapping... */
127 texthigh_linear = (unsigned long)lm_alias(texthigh);
128 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
129 return "<linear kernel text>";
134 static inline const char *check_bogus_address(const void *ptr, unsigned long n)
136 /* Reject if object wraps past end of memory. */
137 if ((unsigned long)ptr + n < (unsigned long)ptr)
138 return "<wrapped address>";
140 /* Reject if NULL or ZERO-allocation. */
141 if (ZERO_OR_NULL_PTR(ptr))
147 /* Checks for allocs that are marked in some way as spanning multiple pages. */
148 static inline const char *check_page_span(const void *ptr, unsigned long n,
149 struct page *page, bool to_user)
151 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
152 const void *end = ptr + n - 1;
153 struct page *endpage;
154 bool is_reserved, is_cma;
157 * Sometimes the kernel data regions are not marked Reserved (see
158 * check below). And sometimes [_sdata,_edata) does not cover
159 * rodata and/or bss, so check each range explicitly.
162 /* Allow reads of kernel rodata region (if not marked as Reserved). */
163 if (ptr >= (const void *)__start_rodata &&
164 end <= (const void *)__end_rodata) {
170 /* Allow kernel data region (if not marked as Reserved). */
171 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
174 /* Allow kernel bss region (if not marked as Reserved). */
175 if (ptr >= (const void *)__bss_start &&
176 end <= (const void *)__bss_stop)
179 /* Is the object wholly within one base page? */
180 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
181 ((unsigned long)end & (unsigned long)PAGE_MASK)))
184 /* Allow if fully inside the same compound (__GFP_COMP) page. */
185 endpage = virt_to_head_page(end);
186 if (likely(endpage == page))
190 * Reject if range is entirely either Reserved (i.e. special or
191 * device memory), or CMA. Otherwise, reject since the object spans
192 * several independently allocated pages.
194 is_reserved = PageReserved(page);
195 is_cma = is_migrate_cma_page(page);
196 if (!is_reserved && !is_cma)
197 return "<spans multiple pages>";
199 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
200 page = virt_to_head_page(ptr);
201 if (is_reserved && !PageReserved(page))
202 return "<spans Reserved and non-Reserved pages>";
203 if (is_cma && !is_migrate_cma_page(page))
204 return "<spans CMA and non-CMA pages>";
211 static inline const char *check_heap_object(const void *ptr, unsigned long n,
216 if (!virt_addr_valid(ptr))
219 page = virt_to_head_page(ptr);
221 /* Check slab allocator for flags and size. */
223 return __check_heap_object(ptr, n, page);
225 /* Verify object does not incorrectly span multiple pages. */
226 return check_page_span(ptr, n, page, to_user);
230 * Validates that the given object is:
231 * - not bogus address
232 * - known-safe heap or stack object
233 * - not in kernel text
235 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
239 /* Skip all tests if size is zero. */
243 /* Check for invalid addresses. */
244 err = check_bogus_address(ptr, n);
248 /* Check for bad heap object. */
249 err = check_heap_object(ptr, n, to_user);
253 /* Check for bad stack object. */
254 switch (check_stack_object(ptr, n)) {
256 /* Object is not touching the current process stack. */
261 * Object is either in the correct frame (when it
262 * is possible to check) or just generally on the
263 * process stack (when frame checking not available).
267 err = "<process stack>";
271 /* Check for object in kernel to avoid text exposure. */
272 err = check_kernel_text_object(ptr, n);
277 usercopy_abort(err, NULL, to_user, 0, n);
279 EXPORT_SYMBOL(__check_object_size);