Create a struct for the hypervisor state from the related fields in vcpu_arch. This is needed in future patches to reduce the scope of functions from the vcpu as a whole to only the relevant state, via this newly created struct. Create a new instance of this struct in vcpu_arch and fix the accessors to use the new fields. Remove the existing fields from vcpu_arch. Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 35 ++++++++++++++++++------------- arch/arm64/kernel/asm-offsets.c | 2 +- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3e5c173d2360..dc4b5e133d86 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -269,27 +269,35 @@ struct vcpu_reset_state { bool reset; }; +/* Holds the hyp-relevant data of a vcpu.*/ +struct vcpu_hyp_state { + /* HYP configuration */ + u64 hcr_el2; + u32 mdcr_el2; + + /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ + u64 vsesr_el2; + + /* Exception Information */ + struct kvm_vcpu_fault_info fault; + + /* Miscellaneous vcpu state flags */ + u64 flags; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; void *sve_state; unsigned int sve_max_vl; + struct vcpu_hyp_state hyp_state; + /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; - /* HYP configuration */ - u64 hcr_el2; - u32 mdcr_el2; - - /* Exception Information */ - struct kvm_vcpu_fault_info fault; - /* State of various workarounds, see kvm_asm.h for bit assignment */ u64 workaround_flags; - /* Miscellaneous vcpu state flags */ - u64 flags; - /* * We maintain more than a single set of debug registers to support * debugging the guest from the host and to maintain separate host and @@ -356,9 +364,6 @@ struct kvm_vcpu_arch { /* Detect first run of a vcpu */ bool has_run_once; - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ - u64 vsesr_el2; - /* Additional reset state */ struct vcpu_reset_state reset_state; @@ -373,7 +378,7 @@ struct kvm_vcpu_arch { } steal; }; -#define hyp_state(vcpu) ((vcpu)->arch) +#define hyp_state(vcpu) ((vcpu)->arch.hyp_state) /* Accessors for hyp_state parameters related to the hypervistor state. */ #define hyp_state_hcr_el2(hyps) (hyps)->hcr_el2 @@ -633,7 +638,7 @@ void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); #ifndef __KVM_NVHE_HYPERVISOR__ -#define kvm_call_hyp_nvhe(f, ...) \ +#define kvm_call_hyp_nvhe(f, ...) \ ({ \ struct arm_smccc_res res; \ \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index c2cc3a2813e6..1776efc3cc9d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -107,7 +107,7 @@ int main(void) BLANK(); #ifdef CONFIG_KVM DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); - DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); + DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.hyp_state.fault.disr_el1)); DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); -- 2.33.0.685.g46640cef36-goog