1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/random.h>
9 #include <linux/memblock.h>
10 #include <asm/alternative.h>
11 #include <asm/debug-monitors.h>
13 #include <asm/kvm_mmu.h>
16 * The LSB of the random hyp VA tag or 0 if no randomization is used.
20 * The random hyp VA tag value with the region bit if hyp randomization is used
25 __init void kvm_compute_layout(void)
27 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
31 /* Where is my RAM region? */
32 hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
33 hyp_va_msb ^= BIT(vabits_actual - 1);
35 kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
36 (u64)(high_memory - 1));
38 if (kva_msb == (vabits_actual - 1)) {
40 * No space in the address, let's compute the mask so
41 * that it covers (vabits_actual - 1) bits, and the region
42 * bit. The tag stays set to zero.
44 va_mask = BIT(vabits_actual - 1) - 1;
45 va_mask |= hyp_va_msb;
48 * We do have some free bits to insert a random tag.
49 * Hyp VAs are now created from kernel linear map VAs
50 * using the following formula (with V == vabits_actual):
52 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
53 * ---------------------------------------------------------
54 * | 0000000 | hyp_va_msb | random tag | kern linear VA |
57 va_mask = GENMASK_ULL(tag_lsb - 1, 0);
58 tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
59 tag_val |= hyp_va_msb;
64 static u32 compute_instruction(int n, u32 rd, u32 rn)
66 u32 insn = AARCH64_BREAK_FAULT;
70 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
71 AARCH64_INSN_VARIANT_64BIT,
76 /* ROR is a variant of EXTR with Rm = Rn */
77 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
83 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
84 tag_val & GENMASK(11, 0),
85 AARCH64_INSN_VARIANT_64BIT,
86 AARCH64_INSN_ADSB_ADD);
90 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
91 tag_val & GENMASK(23, 12),
92 AARCH64_INSN_VARIANT_64BIT,
93 AARCH64_INSN_ADSB_ADD);
97 /* ROR is a variant of EXTR with Rm = Rn */
98 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
99 rn, rn, rd, 64 - tag_lsb);
106 void __init kvm_update_va_mask(struct alt_instr *alt,
107 __le32 *origptr, __le32 *updptr, int nr_inst)
111 BUG_ON(nr_inst != 5);
113 for (i = 0; i < nr_inst; i++) {
114 u32 rd, rn, insn, oinsn;
117 * VHE doesn't need any address translation, let's NOP
120 * Alternatively, if we don't have any spare bits in
121 * the address, NOP everything after masking that
124 if (has_vhe() || (!tag_lsb && i > 0)) {
125 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
129 oinsn = le32_to_cpu(origptr[i]);
130 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
131 rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
133 insn = compute_instruction(i, rd, rn);
134 BUG_ON(insn == AARCH64_BREAK_FAULT);
136 updptr[i] = cpu_to_le32(insn);
140 void *__kvm_bp_vect_base;
141 int __kvm_harden_el2_vector_slot;
143 void kvm_patch_vector_branch(struct alt_instr *alt,
144 __le32 *origptr, __le32 *updptr, int nr_inst)
149 BUG_ON(nr_inst != 5);
151 if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
152 WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
157 * Compute HYP VA by using the same computation as kern_hyp_va()
159 addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
161 addr |= tag_val << tag_lsb;
163 /* Use PC[10:7] to branch to the same vector in KVM */
164 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
167 * Branch over the preamble in order to avoid the initial store on
168 * the stack (which we already perform in the hardening vectors).
170 addr += KVM_VECTOR_PREAMBLE;
172 /* stp x0, x1, [sp, #-16]! */
173 insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
177 AARCH64_INSN_VARIANT_64BIT,
178 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
179 *updptr++ = cpu_to_le32(insn);
181 /* movz x0, #(addr & 0xffff) */
182 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
185 AARCH64_INSN_VARIANT_64BIT,
186 AARCH64_INSN_MOVEWIDE_ZERO);
187 *updptr++ = cpu_to_le32(insn);
189 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
190 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
193 AARCH64_INSN_VARIANT_64BIT,
194 AARCH64_INSN_MOVEWIDE_KEEP);
195 *updptr++ = cpu_to_le32(insn);
197 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
198 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
201 AARCH64_INSN_VARIANT_64BIT,
202 AARCH64_INSN_MOVEWIDE_KEEP);
203 *updptr++ = cpu_to_le32(insn);
206 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
207 AARCH64_INSN_BRANCH_NOLINK);
208 *updptr++ = cpu_to_le32(insn);