Factor out the code to update the NX hugepages state for an individual VM. This will be expanded in future commits to allow per-VM control of Nx hugepages. No functional change intended. Reviewed-by: David Matlack <dmatlack@xxxxxxxxxx> Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 69a30d6d1e2b..caaa610b7878 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6144,6 +6144,15 @@ static void __set_nx_huge_pages(bool val) nx_huge_pages = itlb_multihit_kvm_mitigation = val; } +static void kvm_update_nx_huge_pages(struct kvm *kvm) +{ + mutex_lock(&kvm->slots_lock); + kvm_mmu_zap_all_fast(kvm); + mutex_unlock(&kvm->slots_lock); + + wake_up_process(kvm->arch.nx_lpage_recovery_thread); +} + static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) { bool old_val = nx_huge_pages; @@ -6166,13 +6175,9 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) mutex_lock(&kvm_lock); - list_for_each_entry(kvm, &vm_list, vm_list) { - mutex_lock(&kvm->slots_lock); - kvm_mmu_zap_all_fast(kvm); - mutex_unlock(&kvm->slots_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_update_nx_huge_pages(kvm); - wake_up_process(kvm->arch.nx_lpage_recovery_thread); - } mutex_unlock(&kvm_lock); } -- 2.35.1.1178.g4f1659d476-goog