Certainly better to read. Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- arch/x86/kvm/mmu.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9ed26cc..3769613 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3596,8 +3596,8 @@ static bool walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) { struct kvm_shadow_walk_iterator iterator; - u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; - int root, leaf; + u64 sptes[PT64_ROOT_LEVEL] = { 0 }, spte = 0ull; + int level; bool reserved = false; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) @@ -3605,14 +3605,8 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) walk_shadow_page_lockless_begin(vcpu); - for (shadow_walk_init(&iterator, vcpu, addr), - leaf = root = iterator.level; - shadow_walk_okay(&iterator); - __shadow_walk_next(&iterator, spte)) { - spte = mmu_spte_get_lockless(iterator.sptep); - - sptes[leaf - 1] = spte; - leaf--; + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + sptes[iterator.level - 1] = spte; if (!is_shadow_present_pte(spte)) break; @@ -3626,10 +3620,11 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) if (reserved) { pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", __func__, addr); - while (root > leaf) { + for (level = PT64_ROOT_LEVEL; level > 0; level--) { + if (!sptes[level - 1]) + continue; pr_err("------ spte 0x%llx level %d.\n", - sptes[root - 1], root); - root--; + sptes[level - 1], level); } } exit: -- 2.9.4