Add below memory operation APIs for host VM: - gva2gpa - read_gva/write_gva - read_gpa/write_gpa such ops will be used later for vmx instruction emulation, for example, vmxon instruction will put vmxon area's point in a guest memory, it means pKVM need to read its content from a gva. Signed-off-by: Jason Chen CJ <jason.cj.chen@xxxxxxxxx> --- arch/x86/kvm/vmx/pkvm/hyp/memory.c | 106 +++++++++++++++++++++++++++++ arch/x86/kvm/vmx/pkvm/hyp/memory.h | 11 +++ arch/x86/kvm/vmx/pkvm/hyp/vmexit.c | 1 + 3 files changed, 118 insertions(+) diff --git a/arch/x86/kvm/vmx/pkvm/hyp/memory.c b/arch/x86/kvm/vmx/pkvm/hyp/memory.c index d3e479860189..e99fa72cedac 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/memory.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/memory.c @@ -6,7 +6,10 @@ #include <linux/types.h> #include <asm/kvm_pkvm.h> +#include <pkvm.h> #include "memory.h" +#include "pgtable.h" +#include "pkvm_hyp.h" unsigned long __page_base_offset; unsigned long __symbol_base_offset; @@ -63,3 +66,106 @@ bool mem_range_included(struct mem_range *child, struct mem_range *parent) { return parent->start <= child->start && child->end <= parent->end; } + +void *host_gpa2hva(unsigned long gpa) +{ + /* host gpa = hpa */ + return pkvm_phys_to_virt(gpa); +} + +extern struct pkvm_pgtable_ops mmu_ops; +static struct pkvm_mm_ops mm_ops = { + .phys_to_virt = host_gpa2hva, +}; + +static int check_translation(struct kvm_vcpu *vcpu, gpa_t gpa, + u64 prot, u32 access, struct x86_exception *exception) +{ + /* TODO: exception for #PF */ + return 0; +} + +int gva2gpa(struct kvm_vcpu *vcpu, gva_t gva, gpa_t *gpa, + u32 access, struct x86_exception *exception) +{ + struct pkvm_pgtable guest_mmu; + gpa_t _gpa; + u64 prot; + int pg_level; + + /* caller should ensure exception is not NULL */ + WARN_ON(exception == NULL); + + memset(exception, 0, sizeof(*exception)); + + /*TODO: support other paging mode beside long mode */ + guest_mmu.root_pa = vcpu->arch.cr3 & PAGE_MASK; + pkvm_pgtable_init(&guest_mmu, &mm_ops, &mmu_ops, &pkvm_hyp->mmu_cap, false); + pkvm_pgtable_lookup(&guest_mmu, (unsigned long)gva, + (unsigned long *)&_gpa, &prot, &pg_level); + *gpa = _gpa; + if (_gpa == INVALID_ADDR) + return -EFAULT; + + return check_translation(vcpu, _gpa, prot, access, exception); +} + +/* only support host VM now */ +static int copy_gva(struct kvm_vcpu *vcpu, gva_t gva, void *addr, + unsigned int bytes, struct x86_exception *exception, bool from_guest) +{ + u32 access = VMX_AR_DPL(vmcs_read32(GUEST_SS_AR_BYTES)) == 3 ? PFERR_USER_MASK : 0; + gpa_t gpa; + void *hva; + int ret; + + /*FIXME: need check the gva per page granularity */ + ret = gva2gpa(vcpu, gva, &gpa, access, exception); + if (ret) + return ret; + + hva = host_gpa2hva(gpa); + if (from_guest) + memcpy(addr, hva, bytes); + else + memcpy(hva, addr, bytes); + + return bytes; +} + +int read_gva(struct kvm_vcpu *vcpu, gva_t gva, void *addr, + unsigned int bytes, struct x86_exception *exception) +{ + return copy_gva(vcpu, gva, addr, bytes, exception, true); +} + +int write_gva(struct kvm_vcpu *vcpu, gva_t gva, void *addr, + unsigned int bytes, struct x86_exception *exception) +{ + return copy_gva(vcpu, gva, addr, bytes, exception, false); +} + +/* only support host VM now */ +static int copy_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, void *addr, + unsigned int bytes, bool from_guest) +{ + void *hva; + + hva = host_gpa2hva(gpa); + if (from_guest) + memcpy(addr, hva, bytes); + else + memcpy(hva, addr, bytes); + + return bytes; +} + +int read_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, void *addr, unsigned int bytes) +{ + return copy_gpa(vcpu, gpa, addr, bytes, true); +} + +int write_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, void *addr, unsigned int bytes) +{ + return copy_gpa(vcpu, gpa, addr, bytes, false); +} diff --git a/arch/x86/kvm/vmx/pkvm/hyp/memory.h b/arch/x86/kvm/vmx/pkvm/hyp/memory.h index c9175272096b..4a75d8dff1b3 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/memory.h +++ b/arch/x86/kvm/vmx/pkvm/hyp/memory.h @@ -20,4 +20,15 @@ struct mem_range { bool find_mem_range(unsigned long addr, struct mem_range *range); bool mem_range_included(struct mem_range *child, struct mem_range *parent); +#include <linux/kvm_host.h> +void *host_gpa2hva(unsigned long gpa); +int gva2gpa(struct kvm_vcpu *vcpu, gva_t gva, gpa_t *gpa, + u32 access, struct x86_exception *exception); +int read_gva(struct kvm_vcpu *vcpu, gva_t gva, void *addr, + unsigned int bytes, struct x86_exception *exception); +int write_gva(struct kvm_vcpu *vcpu, gva_t gva, void *addr, + unsigned int bytes, struct x86_exception *exception); +int read_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, void *addr, unsigned int bytes); +int write_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, void *addr, unsigned int bytes); + #endif diff --git a/arch/x86/kvm/vmx/pkvm/hyp/vmexit.c b/arch/x86/kvm/vmx/pkvm/hyp/vmexit.c index e8015a6830b0..02224d93384a 100644 --- a/arch/x86/kvm/vmx/pkvm/hyp/vmexit.c +++ b/arch/x86/kvm/vmx/pkvm/hyp/vmexit.c @@ -154,6 +154,7 @@ int pkvm_main(struct kvm_vcpu *vcpu) } vcpu->arch.cr2 = native_read_cr2(); + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); -- 2.25.1