On Thu, 20 May 2021 13:55:48 +0100, Zenghui Yu <yuzenghui@xxxxxxxxxx> wrote: > > On 2021/5/11 0:58, Marc Zyngier wrote: > > From: Jintack Lim <jintack.lim@xxxxxxxxxx> > > > > Support injecting exceptions and performing exception returns to and > > from virtual EL2. This must be done entirely in software except when > > taking an exception from vEL0 to vEL2 when the virtual HCR_EL2.{E2H,TGE} > > == {1,1} (a VHE guest hypervisor). > > > > Signed-off-by: Jintack Lim <jintack.lim@xxxxxxxxxx> > > Signed-off-by: Christoffer Dall <christoffer.dall@xxxxxxx> > > [maz: switch to common exception injection framework] > > Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> > > --- > > arch/arm64/include/asm/kvm_arm.h | 17 +++ > > arch/arm64/include/asm/kvm_emulate.h | 10 ++ > > arch/arm64/kvm/Makefile | 2 +- > > arch/arm64/kvm/emulate-nested.c | 176 +++++++++++++++++++++++++++ > > arch/arm64/kvm/hyp/exception.c | 45 +++++-- > > arch/arm64/kvm/inject_fault.c | 63 ++++++++-- > > arch/arm64/kvm/trace_arm.h | 59 +++++++++ > > 7 files changed, 354 insertions(+), 18 deletions(-) > > create mode 100644 arch/arm64/kvm/emulate-nested.c > > [...] > > > static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) > > { > > unsigned long cpsr = *vcpu_cpsr(vcpu); > > bool is_aarch32 = vcpu_mode_is_32bit(vcpu); > > u32 esr = 0; > > - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | > > - KVM_ARM64_EXCEPT_AA64_ELx_SYNC | > > - KVM_ARM64_PENDING_EXCEPTION); > > - > > - vcpu_write_sys_reg(vcpu, addr, FAR_EL1); > > + pend_sync_exception(vcpu); > > /* > > * Build an {i,d}abort, depending on the level and the > > @@ -45,16 +79,22 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr > > if (!is_iabt) > > esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT; > > - vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1); > > + esr |= ESR_ELx_FSC_EXTABT; > > + > > + if (vcpu->arch.flags & KVM_ARM64_EXCEPT_AA64_EL1) { > > This isn't the right way to pick between EL1 and EL2 since > KVM_ARM64_EXCEPT_AA64_EL1 is (0 << 11), we will not be able > to inject abort to EL1 that way. Indeed, well observed. I'm squashing the following fix in this patch. Thanks, M. diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 27397ecf9a23..fe781557e42c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -524,6 +524,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) +#define KVM_ARM64_EXCEPT_AA64_EL_MASK (1 << 11) /* * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 06df0bb848ca..5dcf3f8b08b8 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -52,6 +52,11 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu) } } +static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) +{ + return (vcpu->arch.flags & KVM_ARM64_EXCEPT_AA64_EL_MASK) == target; +} + static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -81,7 +86,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr esr |= ESR_ELx_FSC_EXTABT; - if (vcpu->arch.flags & KVM_ARM64_EXCEPT_AA64_EL1) { + if (match_target_el(vcpu, KVM_ARM64_EXCEPT_AA64_EL1)) { vcpu_write_sys_reg(vcpu, addr, FAR_EL1); vcpu_write_sys_reg(vcpu, esr, ESR_EL1); } else { @@ -103,7 +108,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) if (kvm_vcpu_trap_il_is32bit(vcpu)) esr |= ESR_ELx_IL; - if (vcpu->arch.flags & KVM_ARM64_EXCEPT_AA64_EL1) + if (match_target_el(vcpu, KVM_ARM64_EXCEPT_AA64_EL1)) vcpu_write_sys_reg(vcpu, esr, ESR_EL1); else vcpu_write_sys_reg(vcpu, esr, ESR_EL2); -- Without deviation from the norm, progress is not possible. _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm