Re: [PATCH 6.13 1/8] KVM: arm64: Calculate cptr_el2 traps on activating traps

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[ Sasha's backport helper bot ]

Hi,

✅ All tests passed successfully. No issues detected.
No action required from the submitter.

The upstream commit SHA1 provided is correct: 2fd5b4b0e7b440602455b79977bfa64dea101e6c

WARNING: Author mismatch between patch and upstream commit:
Backport author: Mark Brown<broonie@xxxxxxxxxx>
Commit author: Fuad Tabba<tabba@xxxxxxxxxx>

Note: The patch differs from the upstream commit:
---
1:  2fd5b4b0e7b44 ! 1:  06e6cdf67becb KVM: arm64: Calculate cptr_el2 traps on activating traps
    @@ Metadata
      ## Commit message ##
         KVM: arm64: Calculate cptr_el2 traps on activating traps
     
    +    [ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
    +
         Similar to VHE, calculate the value of cptr_el2 from scratch on
         activate traps. This removes the need to store cptr_el2 in every
         vcpu structure. Moreover, some traps, such as whether the guest
    @@ Commit message
         Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx>
         Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@xxxxxxxxxx
         Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx>
    +    Signed-off-by: Mark Brown <broonie@xxxxxxxxxx>
     
      ## arch/arm64/include/asm/kvm_host.h ##
     @@ arch/arm64/include/asm/kvm_host.h: struct kvm_vcpu_arch {
    @@ arch/arm64/kvm/arm.c: static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *
      	 * Handle the "start in power-off" case.
     
      ## arch/arm64/kvm/hyp/nvhe/pkvm.c ##
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
    - 	vcpu->arch.hcr_el2 = val;
    - }
    - 
    --static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
    --{
    --	struct kvm *kvm = vcpu->kvm;
    --	u64 val = vcpu->arch.cptr_el2;
    --
    --	if (!has_hvhe()) {
    --		val |= CPTR_NVHE_EL2_RES1;
    --		val &= ~(CPTR_NVHE_EL2_RES0);
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
    + 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
    + 	u64 hcr_set = HCR_RW;
    + 	u64 hcr_clear = 0;
    +-	u64 cptr_set = 0;
    +-	u64 cptr_clear = 0;
    + 
    + 	/* Protected KVM does not support AArch32 guests. */
    + 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
    + 	/* Trap AMU */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
    + 		hcr_clear |= HCR_AMVOFFEN;
    +-		cptr_set |= CPTR_EL2_TAM;
     -	}
     -
    --	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
    --		val |= CPTR_EL2_TAM;
    --
    --	/* SVE can be disabled by userspace even if supported. */
    --	if (!vcpu_has_sve(vcpu)) {
    +-	/* Trap SVE */
    +-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
     -		if (has_hvhe())
    --			val &= ~(CPACR_ELx_ZEN);
    +-			cptr_clear |= CPACR_ELx_ZEN;
     -		else
    --			val |= CPTR_EL2_TZ;
    --	}
    --
    --	/* No SME support in KVM. */
    --	BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
    --	if (has_hvhe())
    --		val &= ~(CPACR_ELx_SMEN);
    --	else
    --		val |= CPTR_EL2_TSM;
    --
    --	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
    +-			cptr_set |= CPTR_EL2_TZ;
    + 	}
    + 
    + 	vcpu->arch.hcr_el2 |= hcr_set;
    + 	vcpu->arch.hcr_el2 &= ~hcr_clear;
    +-	vcpu->arch.cptr_el2 |= cptr_set;
    +-	vcpu->arch.cptr_el2 &= ~cptr_clear;
    + }
    + 
    + /*
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
    + 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
    + 	u64 mdcr_set = 0;
    + 	u64 mdcr_clear = 0;
    +-	u64 cptr_set = 0;
    + 
    + 	/* Trap/constrain PMU */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
    + 		mdcr_set |= MDCR_EL2_TTRF;
    + 
    +-	/* Trap Trace */
    +-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
     -		if (has_hvhe())
    --			val |= CPACR_EL1_TTA;
    +-			cptr_set |= CPACR_EL1_TTA;
     -		else
    --			val |= CPTR_EL2_TTA;
    +-			cptr_set |= CPTR_EL2_TTA;
     -	}
     -
    --	vcpu->arch.cptr_el2 = val;
    --}
    --
    - static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
    - {
    - 	struct kvm *kvm = vcpu->kvm;
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
    - 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
    - 	int ret;
    + 	/* Trap External Trace */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
    + 		mdcr_clear |= MDCR_EL2_E2TB_MASK;
    + 
    + 	vcpu->arch.mdcr_el2 |= mdcr_set;
    + 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
    +-	vcpu->arch.cptr_el2 |= cptr_set;
    + }
      
    + /*
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
    + 	/* Clear res0 and set res1 bits to trap potential new features. */
    + 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
    + 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
    +-	if (!has_hvhe()) {
    +-		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
    +-		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
    +-	}
    + }
    + 
    + static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
    +  */
    + static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
    + {
     -	vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
      	vcpu->arch.mdcr_el2 = 0;
      
      	pkvm_vcpu_reset_hcr(vcpu);
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
    - 		return ret;
    - 
    - 	pvm_init_traps_hcr(vcpu);
    --	pvm_init_traps_cptr(vcpu);
    - 	pvm_init_traps_mdcr(vcpu);
    - 
    - 	return 0;
     @@ arch/arm64/kvm/hyp/nvhe/pkvm.c: int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
      		return ret;
      	}
---

Results of testing on various branches:

| Branch                    | Patch Apply | Build Test |
|---------------------------|-------------|------------|
| stable/linux-6.13.y       |  Success    |  Success   |




[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux