Like ASID allocator, we copy the active_vmids into the reserved_vmids on a rollover. But it's unlikely that every CPU will have a vCPU as current task and we may end up unnecessarily reserving the VMID space. Hence, clear active_vmids when scheduling out a vCPU. Suggested-by: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/vmid.c | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bb993bce1363..d93141cb8d16 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -687,6 +687,7 @@ extern unsigned int kvm_arm_vmid_bits; int kvm_arm_vmid_alloc_init(void); void kvm_arm_vmid_alloc_free(void); void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); +void kvm_arm_vmid_clear_active(void); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 077e55a511a9..b134a1b89c84 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -435,6 +435,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); kvm_vcpu_pmu_restore_host(vcpu); + kvm_arm_vmid_clear_active(); vcpu->cpu = -1; } diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 5584e84aed95..5fd51f5445c1 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -116,6 +116,12 @@ static u64 new_vmid(struct kvm_vmid *kvm_vmid) return idx2vmid(vmid) | generation; } +/* Call with preemption disabled */ +void kvm_arm_vmid_clear_active(void) +{ + atomic64_set(this_cpu_ptr(&active_vmids), 0); +} + void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) { unsigned long flags; -- 2.17.1 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm