On Mon, Nov 01, 2010 at 05:02:35PM +0800, Xiao Guangrong wrote: > If it's no need to inject async #PF to PV guest we can handle > more completed apfs at one time, so we can retry guest #PF > as early as possible > > Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 3 ++- > arch/x86/kvm/x86.c | 8 ++++++-- > virt/kvm/async_pf.c | 28 ++++++++++++++++------------ > 3 files changed, 24 insertions(+), 15 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 1be0058..c95b3ff 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -818,7 +818,8 @@ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); > > void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, > struct kvm_async_pf *work); > -void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, > +/* return true if we can handle more completed apfs, false otherwise */ > +bool kvm_arch_async_page_present(struct kvm_vcpu *vcpu, > struct kvm_async_pf *work); > void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, > struct kvm_async_pf *work); > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 4da8485..189664a 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -6265,7 +6265,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, > } > } > > -void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, > +bool kvm_arch_async_page_present(struct kvm_vcpu *vcpu, > struct kvm_async_pf *work) > { > trace_kvm_async_pf_ready(work->arch.token, work->gva); > @@ -6274,13 +6274,17 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, > else > kvm_del_async_pf_gfn(vcpu, work->arch.gfn); > > + vcpu->arch.apf.halted = false; > + > if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && > !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { > vcpu->arch.fault.error_code = 0; > vcpu->arch.fault.address = work->arch.token; > kvm_inject_page_fault(vcpu); > + return false; > } > - vcpu->arch.apf.halted = false; > + > + return true; > } > > bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) > diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c > index 60df9e0..d57ec92 100644 > --- a/virt/kvm/async_pf.c > +++ b/virt/kvm/async_pf.c > @@ -123,25 +123,29 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) > void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) > { > struct kvm_async_pf *work; > + bool ret; > > if (list_empty_careful(&vcpu->async_pf.done) || > !kvm_arch_can_inject_async_page_present(vcpu)) > return; > > - spin_lock(&vcpu->async_pf.lock); > - work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link); > - list_del(&work->link); > - spin_unlock(&vcpu->async_pf.lock); > + do { > + spin_lock(&vcpu->async_pf.lock); > + work = list_first_entry(&vcpu->async_pf.done, typeof(*work), > + link); > + list_del(&work->link); > + spin_unlock(&vcpu->async_pf.lock); > > - if (work->page) > - kvm_arch_async_page_ready(vcpu, work); > - kvm_arch_async_page_present(vcpu, work); > + if (work->page) > + kvm_arch_async_page_ready(vcpu, work); > + ret = kvm_arch_async_page_present(vcpu, work); > > - list_del(&work->queue); > - vcpu->async_pf.queued--; > - if (work->page) > - put_page(work->page); > - kmem_cache_free(async_pf_cache, work); > + list_del(&work->queue); > + vcpu->async_pf.queued--; > + if (work->page) > + put_page(work->page); > + kmem_cache_free(async_pf_cache, work); > + } while (ret && !list_empty_careful(&vcpu->async_pf.done)); > } > No need to change kvm_arch_async_page_present() to return anything. You can do while loop like this: while (!list_empty_careful(&vcpu->async_pf.done) && kvm_arch_can_inject_async_page_present(vcpu)) { } If kvm_arch_async_page_present() call injects exception kvm_arch_can_inject_async_page_present() will return false on next iteration. -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html