On 04/10/2012 09:05 PM, Takuya Yoshikawa wrote: > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 29ad6f9..a50f7ba 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -3930,16 +3930,30 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) > kvm_flush_remote_tlbs(kvm); > } > > -void kvm_mmu_zap_all(struct kvm *kvm) > +/** > + * kvm_mmu_zap_all - zap all shadows which have mappings into a given slot > + * @kvm: the kvm instance > + * @slot: id of the target slot > + * > + * If @slot is -1, zap all shadow pages. > + */ > +void kvm_mmu_zap_all(struct kvm *kvm, int slot) > { > struct kvm_mmu_page *sp, *node; > LIST_HEAD(invalid_list); > + int zapped; > > spin_lock(&kvm->mmu_lock); > restart: > - list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) > - if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) > - goto restart; > + zapped = 0; > + list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { > + if ((slot >= 0) && !test_bit(slot, sp->slot_bitmap)) > + continue; > + > + zapped |= kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); You should "goto restart" here like the origin code, also, "safe" version of list_for_each is not needed. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html