On Fri, Apr 20, 2012 at 04:17:47PM +0800, Xiao Guangrong wrote: > Introduce a common function to abstract spte write-protect to > cleanup the code > > Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxxxxxx> > --- > arch/x86/kvm/mmu.c | 60 ++++++++++++++++++++++++++++++--------------------- > 1 files changed, 35 insertions(+), 25 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 4a3cc18..e70ff38 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -1041,6 +1041,34 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) > rmap_remove(kvm, sptep); > } > > +/* Return true if the spte is dropped. */ > +static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool large, > + bool *flush) > +{ > + u64 spte = *sptep; > + > + if (!is_writable_pte(spte)) > + return false; > + > + *flush |= true; > + > + if (large) { > + pgprintk("rmap_write_protect(large): spte %p %llx\n", > + spte, *spte); > + BUG_ON(!is_large_pte(spte)); > + > + drop_spte(kvm, sptep); > + --kvm->stat.lpages; > + return true; > + } > + > + rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); > + spte = spte & ~PT_WRITABLE_MASK; > + mmu_spte_update(sptep, spte); > + > + return false; > +} > + > static bool > __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) > { > @@ -1050,24 +1078,13 @@ __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) > > for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { > BUG_ON(!(*sptep & PT_PRESENT_MASK)); > - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); > - > - if (!is_writable_pte(*sptep)) { > - sptep = rmap_get_next(&iter); > - continue; > - } > - > - if (level == PT_PAGE_TABLE_LEVEL) { > - mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK); > - sptep = rmap_get_next(&iter); > - } else { > - BUG_ON(!is_large_pte(*sptep)); > - drop_spte(kvm, sptep); > - --kvm->stat.lpages; It is preferable to remove all large sptes including read-only ones, the current behaviour, then to verify that no read->write transition can occur in fault paths (fault paths which are increasing in number). -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html