On 8/18/21 11:55 AM, Wei Huang wrote: > When the 5-level page table CPU flag is exposed, KVM code needs to handle > this case by pointing mmu->root_hpa to a properly-constructed 5-level page > table. > > Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > Signed-off-by: Wei Huang <wei.huang2@xxxxxxx> > --- ... > pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); > - if (!pml4_root) { > - free_page((unsigned long)pae_root); > - return -ENOMEM; > + if (!pml4_root) > + goto err_pml4; > + > + if (mmu->shadow_root_level > PT64_ROOT_4LEVEL) { > + pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); > + if (!pml5_root) > + goto err_pml5; > } > > mmu->pae_root = pae_root; > mmu->pml4_root = pml4_root; > + mmu->pml5_root = pml5_root; It looks like pml5_root could be used uninitialized here. You should initialize it to NULL or set it to NULL as an else path of the new check above. Thanks, Tom > > return 0; > +err_pml5: > + free_page((unsigned long)pml4_root); > +err_pml4: > + free_page((unsigned long)pae_root); > + return -ENOMEM; > } > > void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) > @@ -5364,6 +5377,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu) > set_memory_encrypted((unsigned long)mmu->pae_root, 1); > free_page((unsigned long)mmu->pae_root); > free_page((unsigned long)mmu->pml4_root); > + free_page((unsigned long)mmu->pml5_root); > } > > static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) >