]> asedeno.scripts.mit.edu Git - linux.git/blob - mm/kasan/common.c
Merge tag 'docs-5.6' of git://git.lwn.net/linux
[linux.git] / mm / kasan / common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common generic and tag-based KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kasan.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/sched.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/string.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/bug.h>
37 #include <linux/uaccess.h>
38
39 #include <asm/cacheflush.h>
40 #include <asm/tlbflush.h>
41
42 #include "kasan.h"
43 #include "../slab.h"
44
45 static inline int in_irqentry_text(unsigned long ptr)
46 {
47         return (ptr >= (unsigned long)&__irqentry_text_start &&
48                 ptr < (unsigned long)&__irqentry_text_end) ||
49                 (ptr >= (unsigned long)&__softirqentry_text_start &&
50                  ptr < (unsigned long)&__softirqentry_text_end);
51 }
52
53 static inline unsigned int filter_irq_stacks(unsigned long *entries,
54                                              unsigned int nr_entries)
55 {
56         unsigned int i;
57
58         for (i = 0; i < nr_entries; i++) {
59                 if (in_irqentry_text(entries[i])) {
60                         /* Include the irqentry function into the stack. */
61                         return i + 1;
62                 }
63         }
64         return nr_entries;
65 }
66
67 static inline depot_stack_handle_t save_stack(gfp_t flags)
68 {
69         unsigned long entries[KASAN_STACK_DEPTH];
70         unsigned int nr_entries;
71
72         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
73         nr_entries = filter_irq_stacks(entries, nr_entries);
74         return stack_depot_save(entries, nr_entries, flags);
75 }
76
77 static inline void set_track(struct kasan_track *track, gfp_t flags)
78 {
79         track->pid = current->pid;
80         track->stack = save_stack(flags);
81 }
82
83 void kasan_enable_current(void)
84 {
85         current->kasan_depth++;
86 }
87
88 void kasan_disable_current(void)
89 {
90         current->kasan_depth--;
91 }
92
93 bool __kasan_check_read(const volatile void *p, unsigned int size)
94 {
95         return check_memory_region((unsigned long)p, size, false, _RET_IP_);
96 }
97 EXPORT_SYMBOL(__kasan_check_read);
98
99 bool __kasan_check_write(const volatile void *p, unsigned int size)
100 {
101         return check_memory_region((unsigned long)p, size, true, _RET_IP_);
102 }
103 EXPORT_SYMBOL(__kasan_check_write);
104
105 #undef memset
106 void *memset(void *addr, int c, size_t len)
107 {
108         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
109
110         return __memset(addr, c, len);
111 }
112
113 #undef memmove
114 void *memmove(void *dest, const void *src, size_t len)
115 {
116         check_memory_region((unsigned long)src, len, false, _RET_IP_);
117         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
118
119         return __memmove(dest, src, len);
120 }
121
122 #undef memcpy
123 void *memcpy(void *dest, const void *src, size_t len)
124 {
125         check_memory_region((unsigned long)src, len, false, _RET_IP_);
126         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
127
128         return __memcpy(dest, src, len);
129 }
130
131 /*
132  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
133  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
134  */
135 void kasan_poison_shadow(const void *address, size_t size, u8 value)
136 {
137         void *shadow_start, *shadow_end;
138
139         /*
140          * Perform shadow offset calculation based on untagged address, as
141          * some of the callers (e.g. kasan_poison_object_data) pass tagged
142          * addresses to this function.
143          */
144         address = reset_tag(address);
145
146         shadow_start = kasan_mem_to_shadow(address);
147         shadow_end = kasan_mem_to_shadow(address + size);
148
149         __memset(shadow_start, value, shadow_end - shadow_start);
150 }
151
152 void kasan_unpoison_shadow(const void *address, size_t size)
153 {
154         u8 tag = get_tag(address);
155
156         /*
157          * Perform shadow offset calculation based on untagged address, as
158          * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
159          * addresses to this function.
160          */
161         address = reset_tag(address);
162
163         kasan_poison_shadow(address, size, tag);
164
165         if (size & KASAN_SHADOW_MASK) {
166                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
167
168                 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
169                         *shadow = tag;
170                 else
171                         *shadow = size & KASAN_SHADOW_MASK;
172         }
173 }
174
175 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
176 {
177         void *base = task_stack_page(task);
178         size_t size = sp - base;
179
180         kasan_unpoison_shadow(base, size);
181 }
182
183 /* Unpoison the entire stack for a task. */
184 void kasan_unpoison_task_stack(struct task_struct *task)
185 {
186         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
187 }
188
189 /* Unpoison the stack for the current task beyond a watermark sp value. */
190 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
191 {
192         /*
193          * Calculate the task stack base address.  Avoid using 'current'
194          * because this function is called by early resume code which hasn't
195          * yet set up the percpu register (%gs).
196          */
197         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
198
199         kasan_unpoison_shadow(base, watermark - base);
200 }
201
202 /*
203  * Clear all poison for the region between the current SP and a provided
204  * watermark value, as is sometimes required prior to hand-crafted asm function
205  * returns in the middle of functions.
206  */
207 void kasan_unpoison_stack_above_sp_to(const void *watermark)
208 {
209         const void *sp = __builtin_frame_address(0);
210         size_t size = watermark - sp;
211
212         if (WARN_ON(sp > watermark))
213                 return;
214         kasan_unpoison_shadow(sp, size);
215 }
216
217 void kasan_alloc_pages(struct page *page, unsigned int order)
218 {
219         u8 tag;
220         unsigned long i;
221
222         if (unlikely(PageHighMem(page)))
223                 return;
224
225         tag = random_tag();
226         for (i = 0; i < (1 << order); i++)
227                 page_kasan_tag_set(page + i, tag);
228         kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
229 }
230
231 void kasan_free_pages(struct page *page, unsigned int order)
232 {
233         if (likely(!PageHighMem(page)))
234                 kasan_poison_shadow(page_address(page),
235                                 PAGE_SIZE << order,
236                                 KASAN_FREE_PAGE);
237 }
238
239 /*
240  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
241  * For larger allocations larger redzones are used.
242  */
243 static inline unsigned int optimal_redzone(unsigned int object_size)
244 {
245         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
246                 return 0;
247
248         return
249                 object_size <= 64        - 16   ? 16 :
250                 object_size <= 128       - 32   ? 32 :
251                 object_size <= 512       - 64   ? 64 :
252                 object_size <= 4096      - 128  ? 128 :
253                 object_size <= (1 << 14) - 256  ? 256 :
254                 object_size <= (1 << 15) - 512  ? 512 :
255                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
256 }
257
258 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
259                         slab_flags_t *flags)
260 {
261         unsigned int orig_size = *size;
262         unsigned int redzone_size;
263         int redzone_adjust;
264
265         /* Add alloc meta. */
266         cache->kasan_info.alloc_meta_offset = *size;
267         *size += sizeof(struct kasan_alloc_meta);
268
269         /* Add free meta. */
270         if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
271             (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
272              cache->object_size < sizeof(struct kasan_free_meta))) {
273                 cache->kasan_info.free_meta_offset = *size;
274                 *size += sizeof(struct kasan_free_meta);
275         }
276
277         redzone_size = optimal_redzone(cache->object_size);
278         redzone_adjust = redzone_size - (*size - cache->object_size);
279         if (redzone_adjust > 0)
280                 *size += redzone_adjust;
281
282         *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
283                         max(*size, cache->object_size + redzone_size));
284
285         /*
286          * If the metadata doesn't fit, don't enable KASAN at all.
287          */
288         if (*size <= cache->kasan_info.alloc_meta_offset ||
289                         *size <= cache->kasan_info.free_meta_offset) {
290                 cache->kasan_info.alloc_meta_offset = 0;
291                 cache->kasan_info.free_meta_offset = 0;
292                 *size = orig_size;
293                 return;
294         }
295
296         *flags |= SLAB_KASAN;
297 }
298
299 size_t kasan_metadata_size(struct kmem_cache *cache)
300 {
301         return (cache->kasan_info.alloc_meta_offset ?
302                 sizeof(struct kasan_alloc_meta) : 0) +
303                 (cache->kasan_info.free_meta_offset ?
304                 sizeof(struct kasan_free_meta) : 0);
305 }
306
307 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
308                                         const void *object)
309 {
310         return (void *)object + cache->kasan_info.alloc_meta_offset;
311 }
312
313 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
314                                       const void *object)
315 {
316         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
317         return (void *)object + cache->kasan_info.free_meta_offset;
318 }
319
320
321 static void kasan_set_free_info(struct kmem_cache *cache,
322                 void *object, u8 tag)
323 {
324         struct kasan_alloc_meta *alloc_meta;
325         u8 idx = 0;
326
327         alloc_meta = get_alloc_info(cache, object);
328
329 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
330         idx = alloc_meta->free_track_idx;
331         alloc_meta->free_pointer_tag[idx] = tag;
332         alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
333 #endif
334
335         set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
336 }
337
338 void kasan_poison_slab(struct page *page)
339 {
340         unsigned long i;
341
342         for (i = 0; i < compound_nr(page); i++)
343                 page_kasan_tag_reset(page + i);
344         kasan_poison_shadow(page_address(page), page_size(page),
345                         KASAN_KMALLOC_REDZONE);
346 }
347
348 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
349 {
350         kasan_unpoison_shadow(object, cache->object_size);
351 }
352
353 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
354 {
355         kasan_poison_shadow(object,
356                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
357                         KASAN_KMALLOC_REDZONE);
358 }
359
360 /*
361  * This function assigns a tag to an object considering the following:
362  * 1. A cache might have a constructor, which might save a pointer to a slab
363  *    object somewhere (e.g. in the object itself). We preassign a tag for
364  *    each object in caches with constructors during slab creation and reuse
365  *    the same tag each time a particular object is allocated.
366  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
367  *    accessed after being freed. We preassign tags for objects in these
368  *    caches as well.
369  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
370  *    is stored as an array of indexes instead of a linked list. Assign tags
371  *    based on objects indexes, so that objects that are next to each other
372  *    get different tags.
373  */
374 static u8 assign_tag(struct kmem_cache *cache, const void *object,
375                         bool init, bool keep_tag)
376 {
377         /*
378          * 1. When an object is kmalloc()'ed, two hooks are called:
379          *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
380          *    tag only in the first one.
381          * 2. We reuse the same tag for krealloc'ed objects.
382          */
383         if (keep_tag)
384                 return get_tag(object);
385
386         /*
387          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
388          * set, assign a tag when the object is being allocated (init == false).
389          */
390         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
391                 return init ? KASAN_TAG_KERNEL : random_tag();
392
393         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
394 #ifdef CONFIG_SLAB
395         /* For SLAB assign tags based on the object index in the freelist. */
396         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
397 #else
398         /*
399          * For SLUB assign a random tag during slab creation, otherwise reuse
400          * the already assigned tag.
401          */
402         return init ? random_tag() : get_tag(object);
403 #endif
404 }
405
406 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
407                                                 const void *object)
408 {
409         struct kasan_alloc_meta *alloc_info;
410
411         if (!(cache->flags & SLAB_KASAN))
412                 return (void *)object;
413
414         alloc_info = get_alloc_info(cache, object);
415         __memset(alloc_info, 0, sizeof(*alloc_info));
416
417         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
418                 object = set_tag(object,
419                                 assign_tag(cache, object, true, false));
420
421         return (void *)object;
422 }
423
424 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
425 {
426         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
427                 return shadow_byte < 0 ||
428                         shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
429
430         /* else CONFIG_KASAN_SW_TAGS: */
431         if ((u8)shadow_byte == KASAN_TAG_INVALID)
432                 return true;
433         if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
434                 return true;
435
436         return false;
437 }
438
439 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
440                               unsigned long ip, bool quarantine)
441 {
442         s8 shadow_byte;
443         u8 tag;
444         void *tagged_object;
445         unsigned long rounded_up_size;
446
447         tag = get_tag(object);
448         tagged_object = object;
449         object = reset_tag(object);
450
451         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
452             object)) {
453                 kasan_report_invalid_free(tagged_object, ip);
454                 return true;
455         }
456
457         /* RCU slabs could be legally used after free within the RCU period */
458         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
459                 return false;
460
461         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
462         if (shadow_invalid(tag, shadow_byte)) {
463                 kasan_report_invalid_free(tagged_object, ip);
464                 return true;
465         }
466
467         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
468         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
469
470         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
471                         unlikely(!(cache->flags & SLAB_KASAN)))
472                 return false;
473
474         kasan_set_free_info(cache, object, tag);
475
476         quarantine_put(get_free_info(cache, object), cache);
477
478         return IS_ENABLED(CONFIG_KASAN_GENERIC);
479 }
480
481 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
482 {
483         return __kasan_slab_free(cache, object, ip, true);
484 }
485
486 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
487                                 size_t size, gfp_t flags, bool keep_tag)
488 {
489         unsigned long redzone_start;
490         unsigned long redzone_end;
491         u8 tag = 0xff;
492
493         if (gfpflags_allow_blocking(flags))
494                 quarantine_reduce();
495
496         if (unlikely(object == NULL))
497                 return NULL;
498
499         redzone_start = round_up((unsigned long)(object + size),
500                                 KASAN_SHADOW_SCALE_SIZE);
501         redzone_end = round_up((unsigned long)object + cache->object_size,
502                                 KASAN_SHADOW_SCALE_SIZE);
503
504         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
505                 tag = assign_tag(cache, object, false, keep_tag);
506
507         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
508         kasan_unpoison_shadow(set_tag(object, tag), size);
509         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
510                 KASAN_KMALLOC_REDZONE);
511
512         if (cache->flags & SLAB_KASAN)
513                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
514
515         return set_tag(object, tag);
516 }
517
518 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
519                                         gfp_t flags)
520 {
521         return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
522 }
523
524 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
525                                 size_t size, gfp_t flags)
526 {
527         return __kasan_kmalloc(cache, object, size, flags, true);
528 }
529 EXPORT_SYMBOL(kasan_kmalloc);
530
531 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
532                                                 gfp_t flags)
533 {
534         struct page *page;
535         unsigned long redzone_start;
536         unsigned long redzone_end;
537
538         if (gfpflags_allow_blocking(flags))
539                 quarantine_reduce();
540
541         if (unlikely(ptr == NULL))
542                 return NULL;
543
544         page = virt_to_page(ptr);
545         redzone_start = round_up((unsigned long)(ptr + size),
546                                 KASAN_SHADOW_SCALE_SIZE);
547         redzone_end = (unsigned long)ptr + page_size(page);
548
549         kasan_unpoison_shadow(ptr, size);
550         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
551                 KASAN_PAGE_REDZONE);
552
553         return (void *)ptr;
554 }
555
556 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
557 {
558         struct page *page;
559
560         if (unlikely(object == ZERO_SIZE_PTR))
561                 return (void *)object;
562
563         page = virt_to_head_page(object);
564
565         if (unlikely(!PageSlab(page)))
566                 return kasan_kmalloc_large(object, size, flags);
567         else
568                 return __kasan_kmalloc(page->slab_cache, object, size,
569                                                 flags, true);
570 }
571
572 void kasan_poison_kfree(void *ptr, unsigned long ip)
573 {
574         struct page *page;
575
576         page = virt_to_head_page(ptr);
577
578         if (unlikely(!PageSlab(page))) {
579                 if (ptr != page_address(page)) {
580                         kasan_report_invalid_free(ptr, ip);
581                         return;
582                 }
583                 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
584         } else {
585                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
586         }
587 }
588
589 void kasan_kfree_large(void *ptr, unsigned long ip)
590 {
591         if (ptr != page_address(virt_to_head_page(ptr)))
592                 kasan_report_invalid_free(ptr, ip);
593         /* The object will be poisoned by page_alloc. */
594 }
595
596 #ifndef CONFIG_KASAN_VMALLOC
597 int kasan_module_alloc(void *addr, size_t size)
598 {
599         void *ret;
600         size_t scaled_size;
601         size_t shadow_size;
602         unsigned long shadow_start;
603
604         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
605         scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
606         shadow_size = round_up(scaled_size, PAGE_SIZE);
607
608         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
609                 return -EINVAL;
610
611         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
612                         shadow_start + shadow_size,
613                         GFP_KERNEL,
614                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
615                         __builtin_return_address(0));
616
617         if (ret) {
618                 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
619                 find_vm_area(addr)->flags |= VM_KASAN;
620                 kmemleak_ignore(ret);
621                 return 0;
622         }
623
624         return -ENOMEM;
625 }
626
627 void kasan_free_shadow(const struct vm_struct *vm)
628 {
629         if (vm->flags & VM_KASAN)
630                 vfree(kasan_mem_to_shadow(vm->addr));
631 }
632 #endif
633
634 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
635
636 void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
637 {
638         unsigned long flags = user_access_save();
639         __kasan_report(addr, size, is_write, ip);
640         user_access_restore(flags);
641 }
642
643 #ifdef CONFIG_MEMORY_HOTPLUG
644 static bool shadow_mapped(unsigned long addr)
645 {
646         pgd_t *pgd = pgd_offset_k(addr);
647         p4d_t *p4d;
648         pud_t *pud;
649         pmd_t *pmd;
650         pte_t *pte;
651
652         if (pgd_none(*pgd))
653                 return false;
654         p4d = p4d_offset(pgd, addr);
655         if (p4d_none(*p4d))
656                 return false;
657         pud = pud_offset(p4d, addr);
658         if (pud_none(*pud))
659                 return false;
660
661         /*
662          * We can't use pud_large() or pud_huge(), the first one is
663          * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
664          * pud_bad(), if pud is bad then it's bad because it's huge.
665          */
666         if (pud_bad(*pud))
667                 return true;
668         pmd = pmd_offset(pud, addr);
669         if (pmd_none(*pmd))
670                 return false;
671
672         if (pmd_bad(*pmd))
673                 return true;
674         pte = pte_offset_kernel(pmd, addr);
675         return !pte_none(*pte);
676 }
677
678 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
679                         unsigned long action, void *data)
680 {
681         struct memory_notify *mem_data = data;
682         unsigned long nr_shadow_pages, start_kaddr, shadow_start;
683         unsigned long shadow_end, shadow_size;
684
685         nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
686         start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
687         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
688         shadow_size = nr_shadow_pages << PAGE_SHIFT;
689         shadow_end = shadow_start + shadow_size;
690
691         if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
692                 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
693                 return NOTIFY_BAD;
694
695         switch (action) {
696         case MEM_GOING_ONLINE: {
697                 void *ret;
698
699                 /*
700                  * If shadow is mapped already than it must have been mapped
701                  * during the boot. This could happen if we onlining previously
702                  * offlined memory.
703                  */
704                 if (shadow_mapped(shadow_start))
705                         return NOTIFY_OK;
706
707                 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
708                                         shadow_end, GFP_KERNEL,
709                                         PAGE_KERNEL, VM_NO_GUARD,
710                                         pfn_to_nid(mem_data->start_pfn),
711                                         __builtin_return_address(0));
712                 if (!ret)
713                         return NOTIFY_BAD;
714
715                 kmemleak_ignore(ret);
716                 return NOTIFY_OK;
717         }
718         case MEM_CANCEL_ONLINE:
719         case MEM_OFFLINE: {
720                 struct vm_struct *vm;
721
722                 /*
723                  * shadow_start was either mapped during boot by kasan_init()
724                  * or during memory online by __vmalloc_node_range().
725                  * In the latter case we can use vfree() to free shadow.
726                  * Non-NULL result of the find_vm_area() will tell us if
727                  * that was the second case.
728                  *
729                  * Currently it's not possible to free shadow mapped
730                  * during boot by kasan_init(). It's because the code
731                  * to do that hasn't been written yet. So we'll just
732                  * leak the memory.
733                  */
734                 vm = find_vm_area((void *)shadow_start);
735                 if (vm)
736                         vfree((void *)shadow_start);
737         }
738         }
739
740         return NOTIFY_OK;
741 }
742
743 static int __init kasan_memhotplug_init(void)
744 {
745         hotplug_memory_notifier(kasan_mem_notifier, 0);
746
747         return 0;
748 }
749
750 core_initcall(kasan_memhotplug_init);
751 #endif
752
753 #ifdef CONFIG_KASAN_VMALLOC
754 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
755                                       void *unused)
756 {
757         unsigned long page;
758         pte_t pte;
759
760         if (likely(!pte_none(*ptep)))
761                 return 0;
762
763         page = __get_free_page(GFP_KERNEL);
764         if (!page)
765                 return -ENOMEM;
766
767         memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
768         pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
769
770         spin_lock(&init_mm.page_table_lock);
771         if (likely(pte_none(*ptep))) {
772                 set_pte_at(&init_mm, addr, ptep, pte);
773                 page = 0;
774         }
775         spin_unlock(&init_mm.page_table_lock);
776         if (page)
777                 free_page(page);
778         return 0;
779 }
780
781 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
782 {
783         unsigned long shadow_start, shadow_end;
784         int ret;
785
786         if (!is_vmalloc_or_module_addr((void *)addr))
787                 return 0;
788
789         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
790         shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
791         shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
792         shadow_end = ALIGN(shadow_end, PAGE_SIZE);
793
794         ret = apply_to_page_range(&init_mm, shadow_start,
795                                   shadow_end - shadow_start,
796                                   kasan_populate_vmalloc_pte, NULL);
797         if (ret)
798                 return ret;
799
800         flush_cache_vmap(shadow_start, shadow_end);
801
802         /*
803          * We need to be careful about inter-cpu effects here. Consider:
804          *
805          *   CPU#0                                CPU#1
806          * WRITE_ONCE(p, vmalloc(100));         while (x = READ_ONCE(p)) ;
807          *                                      p[99] = 1;
808          *
809          * With compiler instrumentation, that ends up looking like this:
810          *
811          *   CPU#0                                CPU#1
812          * // vmalloc() allocates memory
813          * // let a = area->addr
814          * // we reach kasan_populate_vmalloc
815          * // and call kasan_unpoison_shadow:
816          * STORE shadow(a), unpoison_val
817          * ...
818          * STORE shadow(a+99), unpoison_val     x = LOAD p
819          * // rest of vmalloc process           <data dependency>
820          * STORE p, a                           LOAD shadow(x+99)
821          *
822          * If there is no barrier between the end of unpoisioning the shadow
823          * and the store of the result to p, the stores could be committed
824          * in a different order by CPU#0, and CPU#1 could erroneously observe
825          * poison in the shadow.
826          *
827          * We need some sort of barrier between the stores.
828          *
829          * In the vmalloc() case, this is provided by a smp_wmb() in
830          * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
831          * get_vm_area() and friends, the caller gets shadow allocated but
832          * doesn't have any pages mapped into the virtual address space that
833          * has been reserved. Mapping those pages in will involve taking and
834          * releasing a page-table lock, which will provide the barrier.
835          */
836
837         return 0;
838 }
839
840 /*
841  * Poison the shadow for a vmalloc region. Called as part of the
842  * freeing process at the time the region is freed.
843  */
844 void kasan_poison_vmalloc(const void *start, unsigned long size)
845 {
846         if (!is_vmalloc_or_module_addr(start))
847                 return;
848
849         size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
850         kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
851 }
852
853 void kasan_unpoison_vmalloc(const void *start, unsigned long size)
854 {
855         if (!is_vmalloc_or_module_addr(start))
856                 return;
857
858         kasan_unpoison_shadow(start, size);
859 }
860
861 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
862                                         void *unused)
863 {
864         unsigned long page;
865
866         page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
867
868         spin_lock(&init_mm.page_table_lock);
869
870         if (likely(!pte_none(*ptep))) {
871                 pte_clear(&init_mm, addr, ptep);
872                 free_page(page);
873         }
874         spin_unlock(&init_mm.page_table_lock);
875
876         return 0;
877 }
878
879 /*
880  * Release the backing for the vmalloc region [start, end), which
881  * lies within the free region [free_region_start, free_region_end).
882  *
883  * This can be run lazily, long after the region was freed. It runs
884  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
885  * infrastructure.
886  *
887  * How does this work?
888  * -------------------
889  *
890  * We have a region that is page aligned, labelled as A.
891  * That might not map onto the shadow in a way that is page-aligned:
892  *
893  *                    start                     end
894  *                    v                         v
895  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
896  *  -------- -------- --------          -------- --------
897  *      |        |       |                 |        |
898  *      |        |       |         /-------/        |
899  *      \-------\|/------/         |/---------------/
900  *              |||                ||
901  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
902  *                 (1)      (2)      (3)
903  *
904  * First we align the start upwards and the end downwards, so that the
905  * shadow of the region aligns with shadow page boundaries. In the
906  * example, this gives us the shadow page (2). This is the shadow entirely
907  * covered by this allocation.
908  *
909  * Then we have the tricky bits. We want to know if we can free the
910  * partially covered shadow pages - (1) and (3) in the example. For this,
911  * we are given the start and end of the free region that contains this
912  * allocation. Extending our previous example, we could have:
913  *
914  *  free_region_start                                    free_region_end
915  *  |                 start                     end      |
916  *  v                 v                         v        v
917  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
918  *  -------- -------- --------          -------- --------
919  *      |        |       |                 |        |
920  *      |        |       |         /-------/        |
921  *      \-------\|/------/         |/---------------/
922  *              |||                ||
923  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
924  *                 (1)      (2)      (3)
925  *
926  * Once again, we align the start of the free region up, and the end of
927  * the free region down so that the shadow is page aligned. So we can free
928  * page (1) - we know no allocation currently uses anything in that page,
929  * because all of it is in the vmalloc free region. But we cannot free
930  * page (3), because we can't be sure that the rest of it is unused.
931  *
932  * We only consider pages that contain part of the original region for
933  * freeing: we don't try to free other pages from the free region or we'd
934  * end up trying to free huge chunks of virtual address space.
935  *
936  * Concurrency
937  * -----------
938  *
939  * How do we know that we're not freeing a page that is simultaneously
940  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
941  *
942  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
943  * at the same time. While we run under free_vmap_area_lock, the population
944  * code does not.
945  *
946  * free_vmap_area_lock instead operates to ensure that the larger range
947  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
948  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
949  * no space identified as free will become used while we are running. This
950  * means that so long as we are careful with alignment and only free shadow
951  * pages entirely covered by the free region, we will not run in to any
952  * trouble - any simultaneous allocations will be for disjoint regions.
953  */
954 void kasan_release_vmalloc(unsigned long start, unsigned long end,
955                            unsigned long free_region_start,
956                            unsigned long free_region_end)
957 {
958         void *shadow_start, *shadow_end;
959         unsigned long region_start, region_end;
960         unsigned long size;
961
962         region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
963         region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
964
965         free_region_start = ALIGN(free_region_start,
966                                   PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
967
968         if (start != region_start &&
969             free_region_start < region_start)
970                 region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
971
972         free_region_end = ALIGN_DOWN(free_region_end,
973                                      PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
974
975         if (end != region_end &&
976             free_region_end > region_end)
977                 region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
978
979         shadow_start = kasan_mem_to_shadow((void *)region_start);
980         shadow_end = kasan_mem_to_shadow((void *)region_end);
981
982         if (shadow_end > shadow_start) {
983                 size = shadow_end - shadow_start;
984                 apply_to_existing_page_range(&init_mm,
985                                              (unsigned long)shadow_start,
986                                              size, kasan_depopulate_vmalloc_pte,
987                                              NULL);
988                 flush_tlb_kernel_range((unsigned long)shadow_start,
989                                        (unsigned long)shadow_end);
990         }
991 }
992 #endif