Factor out the code to update the NX hugepages state for an individual VM. This will be expanded in future commits to allow per-VM control of Nx hugepages. No functional change intended. Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3b8da8b0745e..1b59b56642f1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6195,6 +6195,15 @@ static void __set_nx_huge_pages(bool val) nx_huge_pages = itlb_multihit_kvm_mitigation = val; } +static int kvm_update_nx_huge_pages(struct kvm *kvm) +{ + mutex_lock(&kvm->slots_lock); + kvm_mmu_zap_all_fast(kvm); + mutex_unlock(&kvm->slots_lock); + + wake_up_process(kvm->arch.nx_lpage_recovery_thread); +} + static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) { bool old_val = nx_huge_pages; @@ -6217,13 +6226,8 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) mutex_lock(&kvm_lock); - list_for_each_entry(kvm, &vm_list, vm_list) { - mutex_lock(&kvm->slots_lock); - kvm_mmu_zap_all_fast(kvm); - mutex_unlock(&kvm->slots_lock); - - wake_up_process(kvm->arch.nx_lpage_recovery_thread); - } + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_set_nx_huge_pages(kvm); mutex_unlock(&kvm_lock); } -- 2.35.1.894.gb6a874cedc-goog