The mmu_notifier_retry() function, used to test whether any page invalidations are in progress, currently takes a vcpu pointer, though the code only needs the VM's struct kvm pointer. Forthcoming patches to the powerpc Book3S HV code will need to test for retry within a VM ioctl, where a struct kvm pointer is available but a struct vcpu pointer isn't. Therefore this creates a variant of mmu_notifier_retry called kvm_mmu_notifier_retry that takes a struct kvm pointer, and implements mmu_notifier_retry in terms of it. Signed-off-by: Paul Mackerras <paulus@xxxxxxxxx> --- include/linux/kvm_host.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6afc5be..1cc1e1d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -841,9 +841,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) -static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +static inline int kvm_mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) { - if (unlikely(vcpu->kvm->mmu_notifier_count)) + if (unlikely(kvm->mmu_notifier_count)) return 1; /* * Ensure the read of mmu_notifier_count happens before the read @@ -856,10 +856,15 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se * can't rely on kvm->mmu_lock to keep things ordered. */ smp_rmb(); - if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + if (kvm->mmu_notifier_seq != mmu_seq) return 1; return 0; } + +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + return kvm_mmu_notifier_retry(vcpu->kvm, mmu_seq); +} #endif #ifdef KVM_CAP_IRQ_ROUTING -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html