Hi Marc, On Thu, May 31, 2012 at 05:17:41AM +0100, Marc Zyngier wrote: > diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c > index bf3e5f5..803d945 100644 > --- a/arch/arm/kvm/arm.c > +++ b/arch/arm/kvm/arm.c > @@ -33,6 +33,7 @@ > #include <asm/ptrace.h> > #include <asm/mman.h> > #include <asm/tlbflush.h> > +#include <asm/cacheflush.h> > #include <asm/cputype.h> > #include <asm/kvm_arm.h> > #include <asm/kvm_asm.h> > @@ -462,6 +463,17 @@ static inline int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > return arm_exit_handlers[hsr_ec](vcpu, run); > } > > +/* > + * Check whether this vcpu requires the cache to be flushed on this > + * physical CPU. This is a consequence of doing dcache operations by > + * set/way on this vcpu. > + */ > +static bool kvm_needs_dcache_flush(struct kvm_vcpu *vcpu) > +{ > + return cpumask_test_and_clear_cpu(smp_processor_id(), > + &vcpu->arch.require_dcache_flush); > +} > + > /** > * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code > * @vcpu: The VCPU pointer > @@ -487,6 +499,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) > if (vcpu->sigset_active) > sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); > > + if (kvm_needs_dcache_flush(vcpu)) > + flush_cache_all(); /* We'd really want v7_flush_dcache_all()... */ > + I'd probably just inline the check since it has the side-effect of clearing the mask. However, I think this code is called in a preemptible context so it's not safe to use smp_processor_id(). Perhaps this code would be better off in kvm_arch_vcpu_load, which hangs off the preempt notifiers during sched in (and takes the cpu as an argument). You'll still need to disable preemption during the cache flush/cpumask update. > diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c > index 3ceab47..55b4726 100644 > --- a/arch/arm/kvm/emulate.c > +++ b/arch/arm/kvm/emulate.c > @@ -255,6 +255,36 @@ static bool read_actlr(struct kvm_vcpu *vcpu, > return true; > } > > +static bool write_dcsw(struct kvm_vcpu *vcpu, > + const struct coproc_params *p, > + unsigned long cp15_reg) > +{ > + cpumask_var_t tmpmask; > + > + if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) > + return false; > + > + switch(p->CRm) { > + case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ > + case 14: /* DCCISW */ > + asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (p->Rt1)); > + break; > + > + case 10: /* DCCSW */ > + asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (p->Rt1)); > + break; > + } > + > + cpumask_complement(tmpmask, cpumask_of(smp_processor_id())); > + cpumask_or(&vcpu->arch.require_dcache_flush, > + &vcpu->arch.require_dcache_flush, > + tmpmask); Is this not the same as cpumask_clear_cpu? In that case, you can lose the temporary mask. > + free_cpumask_var(tmpmask); > + > + return true; > +} I'm guessing the emulation is called in a non-preemptible context, but I haven't checked myself (not familiar with the kvm code :). Cheers, Will