This basically takes the loop contents and sticks it in its own function for readability. Don't pay too much attention to the use of nr_scanned in here. It's a bit wonky but it'll change in a minute anyway. Signed-off-by: Dave Hansen <dave@xxxxxxxxxxxxxxxxxx> --- linux-2.6.git-dave/arch/x86/kvm/mmu.c | 35 ++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff -puN arch/x86/kvm/mmu.c~optimize_shrinker arch/x86/kvm/mmu.c --- linux-2.6.git/arch/x86/kvm/mmu.c~optimize_shrinker 2010-06-09 15:14:30.000000000 -0700 +++ linux-2.6.git-dave/arch/x86/kvm/mmu.c 2010-06-09 15:14:30.000000000 -0700 @@ -2923,6 +2923,26 @@ static int kvm_mmu_remove_some_alloc_mmu return kvm_mmu_zap_page(kvm, page) + 1; } +static int shrink_kvm_mmu(struct kvm *kvm, int nr_to_scan) +{ + int idx, freed_pages; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + if (kvm->arch.n_used_mmu_pages > 0) + freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + + /* + * This should optimally return the number of objects (mmu pages) + * that we have scanned. But, for now, just return the number + * that we were able to free. + */ + return freed_pages; +} + static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) { struct kvm *kvm; @@ -2934,20 +2954,15 @@ static int mmu_shrink(int nr_to_scan, gf spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - int idx, freed_pages; + if (nr_to_scan <= 0) + break; - idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); - if (!kvm_freed && nr_to_scan > 0 && - kvm->arch.n_used_mmu_pages > 0) { - freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm); + shrink_kvm_mmu(kvm, nr_to_scan); + if (!kvm_freed) kvm_freed = kvm; - } nr_to_scan--; - - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); } + if (kvm_freed) list_move_tail(&kvm_freed->vm_list, &vm_list); _ -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html