]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: compat: Refactor aarch32_alloc_vdso_pages()
authorVincenzo Frascino <vincenzo.frascino@arm.com>
Mon, 15 Apr 2019 09:49:36 +0000 (10:49 +0100)
committerWill Deacon <will.deacon@arm.com>
Tue, 23 Apr 2019 17:01:58 +0000 (18:01 +0100)
aarch32_alloc_vdso_pages() needs to be refactored to make it
easier to disable kuser helpers.

Divide the function in aarch32_alloc_kuser_vdso_page() and
aarch32_alloc_sigreturn_vdso_page().

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
[will: Inlined sigpage allocation to simplify error paths]
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/kernel/vdso.c

index 6bb7038dc96c8912f41d78455bcc0f3e048ccbd9..41f4d75bbc1437357cb5600938fc3b61d93eb59f 100644 (file)
@@ -68,43 +68,43 @@ static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
        },
 };
 
-static int __init aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_kuser_vdso_page(void)
 {
        extern char __kuser_helper_start[], __kuser_helper_end[];
-       extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
-       int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
-       unsigned long vdso_pages[C_PAGES];
-
-       vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC);
-       if (!vdso_pages[C_VECTORS])
-               return -ENOMEM;
+       unsigned long vdso_page;
 
-       vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC);
-       if (!vdso_pages[C_SIGPAGE]) {
-               free_page(vdso_pages[C_VECTORS]);
+       vdso_page = get_zeroed_page(GFP_ATOMIC);
+       if (!vdso_page)
                return -ENOMEM;
-       }
 
-       /* kuser helpers */
-       memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz),
-              __kuser_helper_start,
+       memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
               kuser_sz);
+       aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+       flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+       return 0;
+}
 
-       /* sigreturn code */
-       memcpy((void *)vdso_pages[C_SIGPAGE], __aarch32_sigret_code_start,
-              sigret_sz);
+static int __init aarch32_alloc_vdso_pages(void)
+{
+       extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+       int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+       unsigned long sigpage;
+       int ret;
 
-       flush_icache_range(vdso_pages[C_VECTORS],
-                          vdso_pages[C_VECTORS] + PAGE_SIZE);
-       flush_icache_range(vdso_pages[C_SIGPAGE],
-                          vdso_pages[C_SIGPAGE] + PAGE_SIZE);
+       sigpage = get_zeroed_page(GFP_ATOMIC);
+       if (!sigpage)
+               return -ENOMEM;
 
-       aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]);
-       aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]);
+       memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+       aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
+       flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
 
-       return 0;
+       ret = aarch32_alloc_kuser_vdso_page();
+       if (ret)
+               free_page(sigpage);
+
+       return ret;
 }
 arch_initcall(aarch32_alloc_vdso_pages);