On Mon, Oct 04, 2010 at 05:56:32PM +0200, Gleb Natapov wrote: > If async page fault is received by idle task or when preemp_count is > not zero guest cannot reschedule, so do sti; hlt and wait for page to be > ready. vcpu can still process interrupts while it waits for the page to > be ready. > > Acked-by: Rik van Riel <riel@xxxxxxxxxx> > Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx> > --- > arch/x86/kernel/kvm.c | 40 ++++++++++++++++++++++++++++++++++------ > 1 files changed, 34 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > index 36fb3e4..f73946f 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -37,6 +37,7 @@ > #include <asm/cpu.h> > #include <asm/traps.h> > #include <asm/desc.h> > +#include <asm/tlbflush.h> > > #define MMU_QUEUE_SIZE 1024 > > @@ -78,6 +79,8 @@ struct kvm_task_sleep_node { > wait_queue_head_t wq; > u32 token; > int cpu; > + bool halted; > + struct mm_struct *mm; > }; > > static struct kvm_task_sleep_head { > @@ -106,6 +109,11 @@ void kvm_async_pf_task_wait(u32 token) > struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; > struct kvm_task_sleep_node n, *e; > DEFINE_WAIT(wait); > + int cpu, idle; > + > + cpu = get_cpu(); > + idle = idle_cpu(cpu); > + put_cpu(); > > spin_lock(&b->lock); > e = _find_apf_task(b, token); > @@ -119,19 +127,33 @@ void kvm_async_pf_task_wait(u32 token) > > n.token = token; > n.cpu = smp_processor_id(); > + n.mm = current->active_mm; > + n.halted = idle || preempt_count() > 1; > + atomic_inc(&n.mm->mm_count); Can't see why this reference is needed. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html