Define sstateen0 and add sstateen0 save/restore for guest VCPUs. Signed-off-by: Mayuresh Chitale <mchitale@xxxxxxxxxxxxxxxx> Reviewed-by: Andrew Jones <ajones@xxxxxxxxxxxxxxxx> --- arch/riscv/include/asm/csr.h | 1 + arch/riscv/include/asm/kvm_host.h | 8 ++++++++ arch/riscv/kvm/vcpu.c | 12 ++++++++++++ 3 files changed, 21 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index b52270278733..5168f37d8e75 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -286,6 +286,7 @@ #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 #define CSR_SENVCFG 0x10a +#define CSR_SSTATEEN0 0x10c #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 #define CSR_SCAUSE 0x142 diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index c3cc0cb39cf8..c9837772b109 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -170,6 +170,10 @@ struct kvm_vcpu_config { u64 hstateen0; }; +struct kvm_vcpu_smstateen_csr { + unsigned long sstateen0; +}; + struct kvm_vcpu_arch { /* VCPU ran at least once */ bool ran_atleast_once; @@ -190,6 +194,7 @@ struct kvm_vcpu_arch { unsigned long host_stvec; unsigned long host_scounteren; unsigned long host_senvcfg; + unsigned long host_sstateen0; /* CPU context of Host */ struct kvm_cpu_context host_context; @@ -200,6 +205,9 @@ struct kvm_vcpu_arch { /* CPU CSR context of Guest VCPU */ struct kvm_vcpu_csr guest_csr; + /* CPU Smstateen CSR context of Guest VCPU */ + struct kvm_vcpu_smstateen_csr smstateen_csr; + /* CPU context upon Guest VCPU reset */ struct kvm_cpu_context guest_reset_context; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 5764d22efa29..1f8e8b5f659b 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -1131,16 +1131,28 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, + smcsr->sstateen0); } static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && + (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) + smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, + vcpu->arch.host_sstateen0); } /* -- 2.34.1