Just like we removed kvm_arch->apic_access_page, nested_vmx->apic_access_page becomes useless for the same reason. This patch removes nested_vmx->apic_access_page, and use gfn_to_page() to pin it in memory when we need it, and unpin it after then. Signed-off-by: Tang Chen <tangchen@xxxxxxxxxxxxxx> --- arch/x86/kvm/vmx.c | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 058c373..4aa73cb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -374,11 +374,6 @@ struct nested_vmx { u64 vmcs01_tsc_offset; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; - /* - * Guest pages referred to in vmcs02 with host-physical pointers, so - * we must keep them pinned while L2 runs. - */ - struct page *apic_access_page; u64 msr_ia32_feature_control; struct hrtimer preemption_timer; @@ -6154,11 +6149,6 @@ static void free_nested(struct vcpu_vmx *vmx) nested_release_vmcs12(vmx); if (enable_shadow_vmcs) free_vmcs(vmx->nested.current_shadow_vmcs); - /* Unpin physical memory we referred to in current vmcs02 */ - if (vmx->nested.apic_access_page) { - nested_release_page(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = 0; - } nested_free_all_saved_vmcss(vmx); } @@ -7983,28 +7973,31 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exec_control |= vmcs12->secondary_vm_exec_control; if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { + struct page *page; /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ - if (vmx->nested.apic_access_page) /* shouldn't happen */ - nested_release_page(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = - nested_get_page(vcpu, vmcs12->apic_access_addr); + page = nested_get_page(vcpu, vmcs12->apic_access_addr); /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ - if (!vmx->nested.apic_access_page) + if (!page) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; else vmcs_write64(APIC_ACCESS_ADDR, - page_to_phys(vmx->nested.apic_access_page)); + page_to_phys(page)); + /* + * Do not pin nested vm's apic access page in memory so + * that memory hotplug process is able to migrate it. + */ + put_page(page); } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) { struct page *page = gfn_to_page(vmx->vcpu.kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); @@ -8807,12 +8800,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; - /* Unpin physical memory we referred to in vmcs02 */ - if (vmx->nested.apic_access_page) { - nested_release_page(vmx->nested.apic_access_page); - vmx->nested.apic_access_page = 0; - } - /* * Do not call kvm_reload_apic_access_page() because we are now * running, mmu_notifier will force to reload the page's hpa for L2 -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html