On Mon, Aug 26, 2019 at 02:16:14PM +0200, Vitaly Kuznetsov wrote: > Jiří Paleček <jpalecek@xxxxxx> writes: > > @@ -5646,7 +5647,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) > > vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; > > > > vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; > > - return alloc_mmu_pages(vcpu); > > + > > + ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); > > + if (ret) > > + return ret; > > + > > + ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); > > + if (ret) > > + goto fail_allocate_root; > > (personal preference) here you're just jumping over 'return' so I'd > prefer this to be written as: > > ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); > if (!ret) > return 0; > > free_mmu_pages(&vcpu->arch.guest_mmu); > return ret; > > and no label/goto required. Or alternatively: ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); if (ret) free_mmu_pages(&vcpu->arch.guest_mmu); return ret; since error handling is usually *not* the fall through path. > > + > > + return ret; > > + fail_allocate_root: > > + free_mmu_pages(&vcpu->arch.guest_mmu); > > + return ret; > > } > > > > static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, > > @@ -6102,7 +6115,8 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) > > void kvm_mmu_destroy(struct kvm_vcpu *vcpu) > > { > > kvm_mmu_unload(vcpu); > > - free_mmu_pages(vcpu); > > + free_mmu_pages(&vcpu->arch.root_mmu); > > + free_mmu_pages(&vcpu->arch.guest_mmu); > > mmu_free_memory_caches(vcpu); > > } > > > > -- > > 2.23.0.rc1 > > > > -- > Vitaly