Add an mmu_notifier for protected VMs. The callback function is triggered when the mm is torn down, and will attempt to convert all protected vCPUs to non-protected. This allows the mm teardown to use the destroy page UVC instead of export. Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/pv.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a604d51acfc8..8b288cff2a3a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -921,6 +921,7 @@ struct kvm_s390_pv { u64 guest_len; unsigned long stor_base; void *stor_var; + struct mmu_notifier mmu_notifier; }; struct kvm_arch{ diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 59da54bd6b21..111f85158cce 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -195,6 +195,21 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) return -EIO; } +static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription, + struct mm_struct *mm) +{ + struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier); + u16 dummy; + + mutex_lock(&kvm->lock); + kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); + mutex_unlock(&kvm->lock); +} + +static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = { + .release = kvm_s390_pv_mmu_notifier_release, +}; + int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) { struct uv_cb_cgc uvcb = { @@ -236,6 +251,11 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) return -EIO; } kvm->arch.gmap->guest_handle = uvcb.guest_handle; + /* Add the notifier only once. No races because we hold kvm->lock */ + if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { + kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; + mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); + } return 0; } -- 2.31.1