For the time being, pretend that NV and SVE are incompatible. Things will shortly change... Or not. Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> --- arch/arm64/kvm/sys_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e711dde4511c..94affa43e86c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1324,7 +1324,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); u64 val = raz ? 0 : read_sanitised_ftr_reg(id); - if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { + if (id == SYS_ID_AA64PFR0_EL1 && + (!vcpu_has_sve(vcpu) || nested_virt_in_use(vcpu))) { val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | -- 2.20.1 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm