From: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> This will be used for mmu_shrink() in the following patch. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 36 ++++++++++++++++++++++++++---------- 1 files changed, 26 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b1e8270..fcd0dd1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2003,6 +2003,28 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, } +/** + * pre_zap_some_sp - make some shadow pages ready for being freed + * @kvm: the kvm instance + * @invalid_list: the list to which we add shadow pages ready for being freed + * @nr_to_zap: how many shadow pages we want to zap + * + * Try to make @nr_to_zap shadow pages ready for being freed, then put them + * into the @invalid_list. + * + * Return the number of shadow pages actually added to the @invalid_list. + */ +static int pre_zap_some_sp(struct kvm *kvm, struct list_head *invalid_list, + int nr_to_zap) +{ + int nr_before = kvm->arch.n_used_mmu_pages; + + while (nr_to_zap > 0 && !list_empty(&kvm->arch.active_mmu_pages)) + nr_to_zap -= pre_zap_one_sp(kvm, invalid_list); + + return nr_before - kvm->arch.n_used_mmu_pages; +} + /* * Changing the number of mmu pages allocated to the vm * Note: if goal_nr_mmu_pages is too small, you will get dead lock @@ -2010,17 +2032,11 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) { LIST_HEAD(invalid_list); - /* - * If we set the number of mmu pages to be smaller be than the - * number of actived pages , we must to free some mmu pages before we - * change the value - */ + int nr_to_zap = kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages; - if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { - while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && - !list_empty(&kvm->arch.active_mmu_pages)) { - pre_zap_one_sp(kvm, &invalid_list); - } + if (nr_to_zap > 0) { + /* free some shadow pages to make the number fit the goal */ + pre_zap_some_sp(kvm, &invalid_list, nr_to_zap); kvm_mmu_commit_zap_page(kvm, &invalid_list); goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; } -- 1.7.5.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html