Hello Jim Mattson, The patch 3a2936dedd20: "kvm: mmu: Don't expose private memslots to L2" from May 9, 2018, leads to the following static checker warning: arch/x86/kvm/mmu/mmu.c:3686 nonpaging_map() error: uninitialized symbol 'map_writable'. arch/x86/kvm/mmu/mmu.c 3665 3666 if (fast_page_fault(vcpu, v, level, error_code)) 3667 return RET_PF_RETRY; 3668 3669 mmu_seq = vcpu->kvm->mmu_notifier_seq; 3670 smp_rmb(); 3671 3672 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) ^^^^^^^^^^^^^ The patch introduces a new false return which doesn't initialize map_writable. 3673 return RET_PF_RETRY; 3674 3675 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) 3676 return r; 3677 3678 r = RET_PF_RETRY; 3679 spin_lock(&vcpu->kvm->mmu_lock); 3680 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 3681 goto out_unlock; 3682 if (make_mmu_pages_available(vcpu) < 0) 3683 goto out_unlock; 3684 if (likely(!force_pt_level)) 3685 transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); 3686 r = __direct_map(vcpu, v, write, map_writable, level, pfn, ^^^^^^^^^^^^ 3687 prefault, false); 3688 out_unlock: 3689 spin_unlock(&vcpu->kvm->mmu_lock); 3690 kvm_release_pfn_clean(pfn); 3691 return r; regards, dan carpenter