With the transition to kvm_arch_vcpu_run_pid_change() to handle the "run once" activities, it becomes obvious that has_run_once is now an exact shadow of vcpu->pid. Replace vcpu->arch.has_run_once with a new vcpu_has_run_once() helper that directly checks for vcpu->pid, and get rid of the now unused field. Reviewed-by: Andrew Jones <drjones@xxxxxxxxxx> Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 5 ++--- arch/arm64/kvm/arm.c | 8 +++----- arch/arm64/kvm/vgic/vgic-init.c | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d7107d627c54..e0de761ecbf2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -366,9 +366,6 @@ struct kvm_vcpu_arch { int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* Detect first run of a vcpu */ - bool has_run_once; - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; @@ -605,6 +602,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) + #ifndef __KVM_NVHE_HYPERVISOR__ #define kvm_call_hyp_nvhe(f, ...) \ ({ \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a52f6a76485d..222aabd57147 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -350,7 +350,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) + if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) static_branch_dec(&userspace_irqchip_in_use); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); @@ -608,7 +608,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (ret) return ret; - if (likely(vcpu->arch.has_run_once)) + if (likely(vcpu_has_run_once(vcpu))) return 0; kvm_arm_vcpu_init_debug(vcpu); @@ -639,8 +639,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static_branch_inc(&userspace_irqchip_in_use); } - vcpu->arch.has_run_once = true; - return ret; } @@ -1123,7 +1121,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, * need to invalidate the I-cache though, as FWB does *not* * imply CTR_EL0.DIC. */ - if (vcpu->arch.has_run_once) { + if (vcpu_has_run_once(vcpu)) { if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) stage2_unmap_vm(vcpu->kvm); else diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 340c51d87677..8d89ca15f613 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) return ret; kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) + if (vcpu_has_run_once(vcpu)) goto out_unlock; } ret = 0; -- 2.30.2 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm