Re: [PATCH 6.13 8/8] KVM: arm64: Eagerly switch ZCR_EL{1,2}

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[ Sasha's backport helper bot ]

Hi,

Summary of potential issues:
ℹ️ This is part 8/8 of a series
⚠️ Found matching upstream commit but patch is missing proper reference to it

Found matching upstream commit: 59419f10045bc955d2229819c7cf7a8b0b9c5b59

WARNING: Author mismatch between patch and found commit:
Backport author: Mark Brown<broonie@xxxxxxxxxx>
Commit author: Mark Rutland<mark.rutland@xxxxxxx>

Note: The patch differs from the upstream commit:
---
1:  59419f10045bc ! 1:  5c95d50c2deb9 KVM: arm64: Eagerly switch ZCR_EL{1,2}
    @@ Commit message
         Reviewed-by: Oliver Upton <oliver.upton@xxxxxxxxx>
         Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@xxxxxxx
         Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx>
    +    (cherry picked from commit 59419f10045bc955d2229819c7cf7a8b0b9c5b59)
    +    Signed-off-by: Mark Brown <broonie@xxxxxxxxxx>
     
      ## arch/arm64/kvm/fpsimd.c ##
     @@ arch/arm64/kvm/fpsimd.c: void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
    @@ arch/arm64/kvm/hyp/nvhe/hyp-main.c
      #include <asm/pgtable-types.h>
      #include <asm/kvm_asm.h>
     @@ arch/arm64/kvm/hyp/nvhe/hyp-main.c: static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
    - 
      		sync_hyp_vcpu(hyp_vcpu);
    + 		pkvm_put_hyp_vcpu(hyp_vcpu);
      	} else {
     +		struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
     +
      		/* The host is fully trusted, run its vCPU directly. */
    --		ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
    +-		ret = __kvm_vcpu_run(host_vcpu);
     +		fpsimd_lazy_switch_to_guest(vcpu);
     +		ret = __kvm_vcpu_run(vcpu);
     +		fpsimd_lazy_switch_to_host(vcpu);
      	}
    + 
      out:
    - 	cpu_reg(host_ctxt, 1) =  ret;
     @@ arch/arm64/kvm/hyp/nvhe/hyp-main.c: void handle_trap(struct kvm_cpu_context *host_ctxt)
      	case ESR_ELx_EC_SMC64:
      		handle_host_smc(host_ctxt);
      		break;
     -	case ESR_ELx_EC_SVE:
    --		cpacr_clear_set(0, CPACR_EL1_ZEN);
    +-		cpacr_clear_set(0, CPACR_ELx_ZEN);
     -		isb();
     -		sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
     -				       SYS_ZCR_EL2);
    @@ arch/arm64/kvm/hyp/nvhe/hyp-main.c: void handle_trap(struct kvm_cpu_context *hos
     
      ## arch/arm64/kvm/hyp/nvhe/switch.c ##
     @@ arch/arm64/kvm/hyp/nvhe/switch.c: static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
    - 
    - static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
      {
    --	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
    --
    + 	u64 val = CPTR_EL2_TAM;	/* Same bit irrespective of E2H */
    + 
    ++	if (!guest_owns_fp_regs())
    ++		__activate_traps_fpsimd32(vcpu);
    ++
      	if (has_hvhe()) {
    - 		u64 val = CPACR_EL1_FPEN;
    + 		val |= CPACR_ELx_TTA;
      
    --		if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
    -+		if (cpus_have_final_cap(ARM64_SVE))
    - 			val |= CPACR_EL1_ZEN;
    - 		if (cpus_have_final_cap(ARM64_SME))
    - 			val |= CPACR_EL1_SMEN;
    -@@ arch/arm64/kvm/hyp/nvhe/switch.c: static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
    +@@ arch/arm64/kvm/hyp/nvhe/switch.c: static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
    + 			if (vcpu_has_sve(vcpu))
    + 				val |= CPACR_ELx_ZEN;
    + 		}
    ++
    ++		write_sysreg(val, cpacr_el1);
      	} else {
    - 		u64 val = CPTR_NVHE_EL2_RES1;
    + 		val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
    + 
    +@@ arch/arm64/kvm/hyp/nvhe/switch.c: static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
      
    --		if (kvm_has_sve(kvm) && guest_owns_fp_regs())
    + 		if (!guest_owns_fp_regs())
    + 			val |= CPTR_EL2_TFP;
    ++
    ++		write_sysreg(val, cptr_el2);
    + 	}
    ++}
    + 
    +-	if (!guest_owns_fp_regs())
    +-		__activate_traps_fpsimd32(vcpu);
    ++static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
    ++{
    ++	if (has_hvhe()) {
    ++		u64 val = CPACR_ELx_FPEN;
    ++
    ++		if (cpus_have_final_cap(ARM64_SVE))
    ++			val |= CPACR_ELx_ZEN;
    ++		if (cpus_have_final_cap(ARM64_SME))
    ++			val |= CPACR_ELx_SMEN;
    ++
    ++		write_sysreg(val, cpacr_el1);
    ++	} else {
    ++		u64 val = CPTR_NVHE_EL2_RES1;
    ++
     +		if (!cpus_have_final_cap(ARM64_SVE))
    - 			val |= CPTR_EL2_TZ;
    - 		if (!cpus_have_final_cap(ARM64_SME))
    - 			val |= CPTR_EL2_TSM;
    ++			val |= CPTR_EL2_TZ;
    ++		if (!cpus_have_final_cap(ARM64_SME))
    ++			val |= CPTR_EL2_TSM;
    + 
    +-	kvm_write_cptr_el2(val);
    ++		write_sysreg(val, cptr_el2);
    ++	}
    + }
    + 
    + static void __activate_traps(struct kvm_vcpu *vcpu)
    +@@ arch/arm64/kvm/hyp/nvhe/switch.c: static void __deactivate_traps(struct kvm_vcpu *vcpu)
    + 
    + 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
    + 
    +-	kvm_reset_cptr_el2(vcpu);
    ++	__deactivate_cptr_traps(vcpu);
    + 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
    + }
    + 
     
      ## arch/arm64/kvm/hyp/vhe/switch.c ##
     @@ arch/arm64/kvm/hyp/vhe/switch.c: static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
---

NOTE: These results are for this patch alone. Full series testing will be
performed when all parts are received.

Results of testing on various branches:

| Branch                    | Patch Apply | Build Test |
|---------------------------|-------------|------------|
| stable/linux-6.13.y       |  Success    |  Success   |




[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux