On Thu, May 31, 2012 at 5:21 AM, Marc Zyngier <marc.zyngier at arm.com> wrote: > It is important to handle the exit code on the same CPU as the guest, > specially as we're accessing resources that are per-CPU (caches, > for exemple). > > To achieve this, make the section that encompassing both > __kvm_vcpu_run() and handle_exit(). > > user_mem_abort() can sleep though (as it calls gfp_to_pfn()), so > preemption has to be reenabled at that stage. > > Reported-by: Will Deacon <will.deacon at arm.com> > Signed-off-by: Marc Zyngier <marc.zyngier at arm.com> > --- > ?arch/arm/kvm/arm.c | ? ?2 ++ > ?arch/arm/kvm/mmu.c | ? ?2 ++ > ?2 files changed, 4 insertions(+), 0 deletions(-) > > diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c > index 13681a1..b96462b 100644 > --- a/arch/arm/kvm/arm.c > +++ b/arch/arm/kvm/arm.c > @@ -522,6 +522,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) > > ? ? ? ? ? ? ? ?update_vttbr(vcpu->kvm); > > + ? ? ? ? ? ? ? preempt_disable(); > ? ? ? ? ? ? ? ?local_irq_disable(); > ? ? ? ? ? ? ? ?kvm_guest_enter(); > ? ? ? ? ? ? ? ?vcpu->mode = IN_GUEST_MODE; > @@ -540,6 +541,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) > ? ? ? ? ? ? ? ?trace_kvm_exit(vcpu->arch.regs.pc); > > ? ? ? ? ? ? ? ?ret = handle_exit(vcpu, run, ret); > + ? ? ? ? ? ? ? preempt_enable(); > ? ? ? ? ? ? ? ?if (ret) { > ? ? ? ? ? ? ? ? ? ? ? ?kvm_err("Error in handle_exit\n"); > ? ? ? ? ? ? ? ? ? ? ? ?break; > diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c > index 992d39a..ae38c21 100644 > --- a/arch/arm/kvm/mmu.c > +++ b/arch/arm/kvm/mmu.c > @@ -419,7 +419,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > ? ? ? ?pfn_t pfn; > ? ? ? ?int ret; > > + ? ? ? preempt_enable(); > ? ? ? ?pfn = gfn_to_pfn(vcpu->kvm, gfn); > + ? ? ? preempt_disable(); > > ? ? ? ?if (is_error_pfn(pfn)) { > ? ? ? ? ? ? ? ?put_page(pfn_to_page(pfn)); > -- > 1.7.3.4 > thanks, applied. (Nice catch!). I added some commenting: diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 56968e5..a9f209b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -490,13 +490,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (vcpu->arch.wait_for_interrupts) kvm_vcpu_block(vcpu); - /* + /************************************************************** * Enter the guest */ trace_kvm_entry(vcpu->arch.regs.pc); update_vttbr(vcpu->kvm); + /* + * Make sure preemption is disabled while calling handle_exit + * as exit handling touches CPU-specific resources, such as + * caches, and we must stay on the same CPU. + * + * Code that might sleep must disable preemption and access + * CPU-specific resources first. + */ preempt_disable(); local_irq_disable(); kvm_guest_enter(); @@ -509,6 +517,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); trace_kvm_exit(vcpu->arch.regs.pc); + /* + * Back from guest + *************************************************************/ ret = handle_exit(vcpu, run, ret); preempt_enable(); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 7b068b2..e5c45a6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -324,6 +324,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, pfn_t pfn; int ret; + /* preemption disabled for handle_exit, gfn_to_pfn may sleep */ preempt_enable(); pfn = gfn_to_pfn(vcpu->kvm, gfn); preempt_disable(); -Christoffer