On 09/20/2010 04:19 PM, Xiao Guangrong wrote:
It allows keep unsync sp alive while it's out of the protection, later we can use
kvm_mmu_free_page() to free it if !sp->active_count
Don't understand. Of course unsync pages exist outside mmu_lock...?
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 839852d..4b7af3f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -996,7 +996,6 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
- list_del(&sp->link);
__free_page(virt_to_page(sp->spt));
if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
@@ -1681,9 +1680,8 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_mmu_unlink_parents(kvm, sp);
if (!sp->role.invalid&& !sp->role.direct)
unaccount_shadowed(kvm, sp->gfn);
- if (sp->unsync)
- kvm_unlink_unsync_page(kvm, sp);
- if (!sp->active_count) {
+
+ if (!sp->active_count || sp->unsync) {
/* Count self */
ret++;
list_move(&sp->link, invalid_list);
How can you drop an active unsync page?
I'm missing something here.
@@ -1692,6 +1690,8 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_reload_remote_mmus(kvm);
}
+ if (sp->unsync)
+ kvm_unlink_unsync_page(kvm, sp);
sp->role.invalid = 1;
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
@@ -1709,8 +1709,12 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
- WARN_ON(!sp->role.invalid || sp->active_count);
- kvm_mmu_free_page(kvm, sp);
+ WARN_ON(!sp->role.invalid);
+ list_del(&sp->link);
+ if (sp->active_count)
+ WARN_ON(!sp->unsync);
+ else
+ kvm_mmu_free_page(kvm, sp);
} while (!list_empty(invalid_list));
}
--
error compiling committee.c: too many arguments to function
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html