]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64/cpufeatures: Introduce ESR_ELx_SYS64_ISS_RT()
authorAnshuman Khandual <anshuman.khandual@arm.com>
Thu, 20 Sep 2018 04:06:19 +0000 (09:36 +0530)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 21 Sep 2018 10:05:25 +0000 (11:05 +0100)
Extracting target register from ESR.ISS encoding has already been required
at multiple instances. Just make it a macro definition and replace all the
existing use cases.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kernel/traps.c

index ce70c3ffb99368efc8adb6d1baa4fa9b249ea327..cc2d9e7bafe613019c4ad46fe4bde59d0f4693ab 100644 (file)
 
 #define ESR_ELx_SYS64_ISS_SYS_OP_MASK  (ESR_ELx_SYS64_ISS_SYS_MASK | \
                                         ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_RT(esr) \
+       (((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT)
 /*
  * User space cache operations have the following sysreg encoding
  * in System instructions.
index 6106a85ae0be70f91f8ad7b64bd7723e236843e5..21247870def7bf77d34ce403255b9d73c71542f1 100644 (file)
@@ -335,7 +335,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 {
        u32 esr = kvm_vcpu_get_hsr(vcpu);
-       return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       return ESR_ELx_SYS64_ISS_RT(esr);
 }
 
 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
index ce29973b9bfe90042aceb1064c31958541f981cb..abfb304e10255cdf6be2a18662c0b7d1bda84883 100644 (file)
@@ -438,7 +438,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
 {
        unsigned long address;
-       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       int rt = ESR_ELx_SYS64_ISS_RT(esr);
        int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
        int ret = 0;
 
@@ -473,7 +473,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
 
 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
 {
-       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       int rt = ESR_ELx_SYS64_ISS_RT(esr);
        unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
 
        pt_regs_write_reg(regs, rt, val);
@@ -483,7 +483,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
 
 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
 {
-       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       int rt = ESR_ELx_SYS64_ISS_RT(esr);
 
        pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
@@ -491,7 +491,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
 
 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
 {
-       int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+       int rt = ESR_ELx_SYS64_ISS_RT(esr);
 
        pt_regs_write_reg(regs, rt, arch_timer_get_rate());
        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);