From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> mmu_unsync_walk() and __mmu_unsync_walk() requires the caller to clear unsync for the shadow pages in the resulted pvec by synching them or zapping them. All callers does so. Otherwise mmu_unsync_walk() and __mmu_unsync_walk() can't work because they always walk from the beginning. And mmu_unsync_walk() and __mmu_unsync_walk() directly clear unsync bits now, rename it. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2446ede0b7b9..a56d328365e4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1773,7 +1773,7 @@ static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) __clear_bit(idx, sp->unsync_child_bitmap); } -static int __mmu_unsync_walk(struct kvm_mmu_page *sp, +static int __mmu_unsync_walk_and_clear(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { int i, ret, nr_unsync_leaf = 0; @@ -1793,7 +1793,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, if (mmu_pages_add(pvec, child, i)) return -ENOSPC; - ret = __mmu_unsync_walk(child, pvec); + ret = __mmu_unsync_walk_and_clear(child, pvec); if (ret < 0) return ret; nr_unsync_leaf += ret; @@ -1818,7 +1818,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, #define INVALID_INDEX (-1) -static int mmu_unsync_walk(struct kvm_mmu_page *sp, +static int mmu_unsync_walk_and_clear(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { pvec->nr = 0; @@ -1826,7 +1826,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, return 0; mmu_pages_add(pvec, sp, INVALID_INDEX); - return __mmu_unsync_walk(sp, pvec); + return __mmu_unsync_walk_and_clear(sp, pvec); } static void kvm_mmu_page_clear_unsync(struct kvm *kvm, struct kvm_mmu_page *sp) @@ -1962,7 +1962,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, LIST_HEAD(invalid_list); bool flush = false; - while (mmu_unsync_walk(parent, &pages)) { + while (mmu_unsync_walk_and_clear(parent, &pages)) { bool protected = false; for_each_sp(pages, sp, parents, i) @@ -2279,7 +2279,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm, if (parent->role.level == PG_LEVEL_4K) return 0; - while (mmu_unsync_walk(parent, &pages)) { + while (mmu_unsync_walk_and_clear(parent, &pages)) { struct kvm_mmu_page *sp; for_each_sp(pages, sp, parents, i) { -- 2.19.1.6.gb485710b