From: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx> Shadow EPT pool is the memory pool to allocate the memory for the shadow EPT paging structure pages. When destroy a shadow EPT, these pages will be put back to this pool again. Signed-off-by: Chuanxiao Dong <chuanxiao.dong@xxxxxxxxx> --- arch/x86/kvm/vmx/pkvm/hyp/ept.c | 9 +++++++++ arch/x86/kvm/vmx/pkvm/hyp/ept.h | 1 + arch/x86/kvm/vmx/pkvm/hyp/init_finalise.c | 13 +++++++++++++ 3 files changed, 23 insertions(+) diff --git a/arch/x86/kvm/vmx/pkvm/hyp/ept.c b/arch/x86/kvm/vmx/pkvm/hyp/ept.c index b0a542b47e83..14bc8f4429db 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/ept.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/ept.c @@ -24,6 +24,8 @@ static struct hyp_pool host_ept_pool; static struct pkvm_pgtable host_ept; static pkvm_spinlock_t host_ept_lock = __PKVM_SPINLOCK_UNLOCKED; +static struct hyp_pool shadow_ept_pool; + static void flush_tlb_noop(void) { }; static void *host_ept_zalloc_page(void) { @@ -221,3 +223,10 @@ int handle_host_ept_violation(unsigned long gpa) pkvm_spin_unlock(&host_ept_lock); return ret; } + +int pkvm_shadow_ept_pool_init(void *ept_pool_base, unsigned long ept_pool_pages) +{ + unsigned long pfn = __pkvm_pa(ept_pool_base) >> PAGE_SHIFT; + + return hyp_pool_init(&shadow_ept_pool, pfn, ept_pool_pages, 0); +} diff --git a/arch/x86/kvm/vmx/pkvm/hyp/ept.h b/arch/x86/kvm/vmx/pkvm/hyp/ept.h index d517bf8ec169..c4ad5c269d5c 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/ept.h +++ b/arch/x86/kvm/vmx/pkvm/hyp/ept.h @@ -17,5 +17,6 @@ int pkvm_host_ept_unmap(unsigned long vaddr_start, unsigned long phys_start, int pkvm_host_ept_init(struct pkvm_pgtable_cap *cap, void *ept_pool_base, unsigned long ept_pool_pages); int handle_host_ept_violation(unsigned long gpa); +int pkvm_shadow_ept_pool_init(void *ept_pool_base, unsigned long ept_pool_pages); #endif diff --git a/arch/x86/kvm/vmx/pkvm/hyp/init_finalise.c b/arch/x86/kvm/vmx/pkvm/hyp/init_finalise.c index c16b53b7bcd0..8d52a20f6497 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/init_finalise.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/init_finalise.c @@ -23,6 +23,7 @@ void *pkvm_mmu_pgt_base; void *pkvm_vmemmap_base; void *host_ept_pgt_base; +static void *shadow_ept_base; static int divide_memory_pool(phys_addr_t phys, unsigned long size) { @@ -49,6 +50,12 @@ static int divide_memory_pool(phys_addr_t phys, unsigned long size) if (!host_ept_pgt_base) return -ENOMEM; + nr_pages = pkvm_shadow_ept_pgtable_pages(PKVM_MAX_NORMAL_VM_NUM + + PKVM_MAX_PROTECTED_VM_NUM); + shadow_ept_base = pkvm_early_alloc_contig(nr_pages); + if (!shadow_ept_base) + return -ENOMEM; + return 0; } @@ -291,6 +298,12 @@ int __pkvm_init_finalise(struct kvm_vcpu *vcpu, struct pkvm_section sections[], pkvm_init_nest(); + ret = pkvm_shadow_ept_pool_init(shadow_ept_base, + pkvm_shadow_ept_pgtable_pages(PKVM_MAX_NORMAL_VM_NUM + + PKVM_MAX_PROTECTED_VM_NUM)); + if (ret) + goto out; + pkvm_init = true; switch_pgt: -- 2.25.1