From: Marc Zyngier <maz@xxxxxxxxxx> When dealing with a guest with SVE enabled, make sure the host SVE state is pinned at EL2 S1, and that the shadow state is correctly initialised (and then unpinned on teardown). Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 ++++---- arch/arm64/kvm/hyp/nvhe/pkvm.c | 33 ++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 5d6cee7436f4..1e39dc7eab4d 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -416,8 +416,7 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state) if (host_flags & KVM_ARM64_PKVM_STATE_DIRTY) __flush_vcpu_state(shadow_state); - shadow_vcpu->arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); - shadow_vcpu->arch.sve_max_vl = host_vcpu->arch.sve_max_vl; + shadow_vcpu->arch.flags = host_flags; shadow_vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS & ~(HCR_RW | HCR_TWI | HCR_TWE); shadow_vcpu->arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2); @@ -488,8 +487,10 @@ static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state, BUG(); } - host_flags = READ_ONCE(host_vcpu->arch.flags) & - ~(KVM_ARM64_PENDING_EXCEPTION | KVM_ARM64_INCREMENT_PC); + host_flags = shadow_vcpu->arch.flags; + if (shadow_state_is_protected(shadow_state)) + host_flags &= ~(KVM_ARM64_PENDING_EXCEPTION | KVM_ARM64_INCREMENT_PC); + WRITE_ONCE(host_vcpu->arch.flags, host_flags); shadow_state->exit_code = exit_reason; } diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 51da5c1d7e0d..9feeb0b5433a 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -372,7 +372,19 @@ static void unpin_host_vcpus(struct kvm_shadow_vcpu_state *shadow_vcpu_states, for (i = 0; i < nr_vcpus; i++) { struct kvm_vcpu *host_vcpu = shadow_vcpu_states[i].host_vcpu; + struct kvm_vcpu *shadow_vcpu = &shadow_vcpu_states[i].shadow_vcpu; + size_t sve_state_size; + void *sve_state; + hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1); + + if (!test_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features)) + continue; + + sve_state = shadow_vcpu->arch.sve_state; + sve_state = kern_hyp_va(sve_state); + sve_state_size = vcpu_sve_state_size(shadow_vcpu); + hyp_unpin_shared_mem(sve_state, sve_state + sve_state_size); } } @@ -448,6 +460,27 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm, if (ret) return ret; + if (test_bit(KVM_ARM_VCPU_SVE, shadow_vcpu->arch.features)) { + size_t sve_state_size; + void *sve_state; + + shadow_vcpu->arch.sve_state = READ_ONCE(host_vcpu->arch.sve_state); + shadow_vcpu->arch.sve_max_vl = READ_ONCE(host_vcpu->arch.sve_max_vl); + + sve_state = kern_hyp_va(shadow_vcpu->arch.sve_state); + sve_state_size = vcpu_sve_state_size(shadow_vcpu); + + if (!shadow_vcpu->arch.sve_state || !sve_state_size || + hyp_pin_shared_mem(sve_state, + sve_state + sve_state_size)) { + clear_bit(KVM_ARM_VCPU_SVE, + shadow_vcpu->arch.features); + shadow_vcpu->arch.sve_state = NULL; + shadow_vcpu->arch.sve_max_vl = 0; + return -EINVAL; + } + } + pkvm_vcpu_init_traps(shadow_vcpu, host_vcpu); kvm_reset_pvm_sys_regs(shadow_vcpu); } -- 2.36.1.124.g0e6072fb45-goog