The wrappers make it less clear that the position of the call to kvm_arch_async_page_present depends on the architecture, and that only one of the two call sites will actually be active. Remove them. Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- virt/kvm/async_pf.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index d8ef708a2ef6..15e5b037f92d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -17,21 +17,6 @@ #include "async_pf.h" #include <trace/events/kvm.h> -static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, - struct kvm_async_pf *work) -{ -#ifdef CONFIG_KVM_ASYNC_PF_SYNC - kvm_arch_async_page_present(vcpu, work); -#endif -} -static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, - struct kvm_async_pf *work) -{ -#ifndef CONFIG_KVM_ASYNC_PF_SYNC - kvm_arch_async_page_present(vcpu, work); -#endif -} - static struct kmem_cache *async_pf_cache; int kvm_async_pf_init(void) @@ -80,7 +65,8 @@ static void async_pf_execute(struct work_struct *work) if (locked) up_read(&mm->mmap_sem); - kvm_async_page_present_sync(vcpu, apf); + if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) + kvm_arch_async_page_present(vcpu, apf); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); @@ -157,7 +143,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->async_pf.lock); kvm_arch_async_page_ready(vcpu, work); - kvm_async_page_present_async(vcpu, work); + if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) + kvm_arch_async_page_present(vcpu, work); list_del(&work->queue); vcpu->async_pf.queued--; -- 1.8.3.1