Plumb mmu_notifier_event enum all the way to sev function so that the enum can provide proper information for SEV/SEV-ES VMs to do the cache flush when necessary. Signed-off-by: Jacky Li <jackyli@xxxxxxxxxx> Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/svm/sev.c | 3 ++- arch/x86/kvm/svm/svm.h | 3 ++- arch/x86/kvm/x86.c | 5 +++-- include/linux/kvm_host.h | 3 ++- virt/kvm/kvm_main.c | 14 +++++++++----- 6 files changed, 20 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d7036982332e..c026e171a8c8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1741,7 +1741,8 @@ struct kvm_x86_ops { int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd); - void (*guest_memory_reclaimed)(struct kvm *kvm); + void (*guest_memory_reclaimed)(struct kvm *kvm, + unsigned int mmu_notifier_event); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7fbcb7dea2c0..8d30f6c5e872 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2329,7 +2329,8 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va) wbinvd_on_all_cpus(); } -void sev_guest_memory_reclaimed(struct kvm *kvm) +void sev_guest_memory_reclaimed(struct kvm *kvm, + unsigned int mmu_notifier_event) { if (!sev_guest(kvm)) return; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be67ab7fdd10..c8a911a02509 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -676,7 +676,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range); int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd); int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd); -void sev_guest_memory_reclaimed(struct kvm *kvm); +void sev_guest_memory_reclaimed(struct kvm *kvm, + unsigned int mmu_notifier_event); void pre_sev_run(struct vcpu_svm *svm, int cpu); void __init sev_set_cpu_caps(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2c924075f6f1..2cde9a836bf7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10592,9 +10592,10 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); } -void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm, + unsigned int mmu_notifier_event) { - static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); + static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm, mmu_notifier_event); } static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4944136efaa2..8984414c5b7a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2270,7 +2270,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm, + unsigned int mmu_notifier_event); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 486800a7024b..18526e198993 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,7 +154,8 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); -__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm, + unsigned int mmu_notifier_event) { } @@ -396,7 +397,7 @@ void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, static void kvm_flush_shadow_all(struct kvm *kvm) { kvm_arch_flush_shadow_all(kvm); - kvm_arch_guest_memory_reclaimed(kvm); + kvm_arch_guest_memory_reclaimed(kvm, MMU_NOTIFY_RELEASE); } #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE @@ -546,11 +547,13 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, unsigned long end); -typedef void (*on_unlock_fn_t)(struct kvm *kvm); +typedef void (*on_unlock_fn_t)(struct kvm *kvm, + unsigned int mmu_notifier_event); struct kvm_hva_range { unsigned long start; unsigned long end; + unsigned int event; union kvm_mmu_notifier_arg arg; hva_handler_t handler; on_lock_fn_t on_lock; @@ -647,7 +650,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, if (locked) { KVM_MMU_UNLOCK(kvm); if (!IS_KVM_NULL_FN(range->on_unlock)) - range->on_unlock(kvm); + range->on_unlock(kvm, range->event); } srcu_read_unlock(&kvm->srcu, idx); @@ -774,6 +777,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct kvm_hva_range hva_range = { .start = range->start, .end = range->end, + .event = range->event, .handler = kvm_unmap_gfn_range, .on_lock = kvm_mmu_invalidate_begin, .on_unlock = kvm_arch_guest_memory_reclaimed, @@ -1769,7 +1773,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, old); - kvm_arch_guest_memory_reclaimed(kvm); + kvm_arch_guest_memory_reclaimed(kvm, MMU_NOTIFY_UNMAP); /* Was released by kvm_swap_active_memslots(), reacquire. */ mutex_lock(&kvm->slots_arch_lock); -- 2.43.0.rc0.421.g78406f8d94-goog