Check the task's state about FP in the host and update the vcpu flags before calling into hyp. This keeps the synchronization symmetrical around the call into hyp. kvm_arch_vcpu_ctxsync_fp() is renamed to kvm_arch_vcpu_sync_fp_after_hyp() so that its name can pair with the new kvm_arch_vcpu_sync_fp_before_hyp(). If the system doesn't support FPSIMD, avoid setting any of the vcpu's FPSIMD flags to match the previous behavior. Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx> Cc: Dave Martin <Dave.Martin@xxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 3 ++- arch/arm64/kvm/arm.c | 4 +++- arch/arm64/kvm/fpsimd.c | 26 ++++++++++++++++++++++++- arch/arm64/kvm/hyp/include/hyp/switch.h | 19 ------------------ arch/arm64/kvm/hyp/nvhe/switch.c | 3 +-- arch/arm64/kvm/hyp/vhe/switch.c | 3 +-- 6 files changed, 32 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6b33f720ce9c..f6a478d3a902 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -726,7 +726,8 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_sync_fp_before_hyp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_sync_fp_after_hyp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fc4c95dd2d26..26ccc369cf11 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -738,6 +738,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_disable(); + kvm_arch_vcpu_sync_fp_before_hyp(vcpu); + kvm_vgic_flush_hwstate(vcpu); /* @@ -825,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (static_branch_unlikely(&userspace_irqchip_in_use)) kvm_timer_sync_user(vcpu); - kvm_arch_vcpu_ctxsync_fp(vcpu); + kvm_arch_vcpu_sync_fp_after_hyp(vcpu); /* * We may have taken a host interrupt in HYP mode (ie diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 3e081d556e81..0c5e79be34d5 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -63,8 +63,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) BUG_ON(!current->mm); vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | + KVM_ARM64_FP_HOST | KVM_ARM64_HOST_SVE_IN_USE | KVM_ARM64_HOST_SVE_ENABLED); + + if (!system_supports_fpsimd()) + return; + vcpu->arch.flags |= KVM_ARM64_FP_HOST; if (test_thread_flag(TIF_SVE)) @@ -74,13 +79,32 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } + +/* + * If TIF_FOREIGN_FPSTATE is set, the FPSIMD regs do not contain the state of + * current or the guest. However, the state will have been saved where it was + * needed. This means the guest's state will have to be loaded if it is needed, + * without saving the FPSIMD regs. + */ +void kvm_arch_vcpu_sync_fp_before_hyp(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(!irqs_disabled()); + + if (!system_supports_fpsimd()) + return; + + if (test_thread_flag(TIF_FOREIGN_FPSTATE)) + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | + KVM_ARM64_FP_HOST); +} + /* * If the guest FPSIMD state was loaded, update the host's context * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu * so that they will be written back if the kernel clobbers them due to * kernel-mode NEON before re-entry into the guest. */ -void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) +void kvm_arch_vcpu_sync_fp_after_hyp(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(!irqs_disabled()); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 54f4860cd87c..8eb1f87f9119 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -28,31 +28,12 @@ #include <asm/fpsimd.h> #include <asm/debug-monitors.h> #include <asm/processor.h> -#include <asm/thread_info.h> extern const char __hyp_panic_string[]; extern struct exception_table_entry __start___kvm_ex_table; extern struct exception_table_entry __stop___kvm_ex_table; -/* Check whether the FP regs were dirtied while in the host-side run loop: */ -static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) -{ - /* - * When the system doesn't support FP/SIMD, we cannot rely on - * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an - * abort on the very first access to FP and thus we should never - * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always - * trap the accesses. - */ - if (!system_supports_fpsimd() || - vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) - vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | - KVM_ARM64_FP_HOST); - - return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED); -} - /* Save the 32-bit only FPSIMD system register state */ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index f3d0e9eca56c..6fc1e0a5adaa 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -26,7 +26,6 @@ #include <asm/fpsimd.h> #include <asm/debug-monitors.h> #include <asm/processor.h> -#include <asm/thread_info.h> /* Non-VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); @@ -42,7 +41,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val = CPTR_EL2_DEFAULT; val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; - if (!update_fp_enabled(vcpu)) { + if (!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) { val |= CPTR_EL2_TFP; __activate_traps_fpsimd32(vcpu); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index af8e940d0f03..f6f60a537b3e 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -25,7 +25,6 @@ #include <asm/fpsimd.h> #include <asm/debug-monitors.h> #include <asm/processor.h> -#include <asm/thread_info.h> const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; @@ -55,7 +54,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val |= CPTR_EL2_TAM; - if (update_fp_enabled(vcpu)) { + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu_has_sve(vcpu)) val |= CPACR_EL1_ZEN; } else { -- 2.30.1.766.gb4fecdf3b7-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm