From: "Yingshiuan Pan" <yingshiuan.pan@xxxxxxxxxxxx> This page fault handler helps GenieZone hypervisor to do demand paging. On a lower level translation fault, GenieZone hypervisor will first check the fault GPA (guest physical address or IPA in ARM) is valid e.g. within the registered memory region, then it will setup the vcpu_run->exit_reason with necessary information for returning to gzvm driver. With the fault information, the gzvm driver looks up the physical address and call the MT_HVC_GZVM_MAP_GUEST to request the hypervisor maps the found PA to the fault GPA (IPA). There is one exception, for protected vm, we will populate full VM's memory region in advance in order to improve performance. Signed-off-by: Yingshiuan Pan <yingshiuan.pan@xxxxxxxxxxxx> Signed-off-by: Jerry Wang <ze-yu.wang@xxxxxxxxxxxx> Signed-off-by: kevenny hsieh <kevenny.hsieh@xxxxxxxxxxxx> Signed-off-by: Liju Chen <liju-clr.chen@xxxxxxxxxxxx> Signed-off-by: Yi-De Wu <yi-de.wu@xxxxxxxxxxxx> --- arch/arm64/geniezone/gzvm_arch_common.h | 2 + arch/arm64/geniezone/vm.c | 130 ++++++++++++++++++++++++ drivers/virt/geniezone/Makefile | 3 +- drivers/virt/geniezone/gzvm_exception.c | 39 +++++++ drivers/virt/geniezone/gzvm_main.c | 2 + drivers/virt/geniezone/gzvm_mmu.c | 32 ++++++ drivers/virt/geniezone/gzvm_vcpu.c | 6 +- drivers/virt/geniezone/gzvm_vm.c | 95 +++++------------ include/linux/gzvm_drv.h | 7 ++ include/uapi/linux/gzvm.h | 11 ++ 10 files changed, 253 insertions(+), 74 deletions(-) create mode 100644 drivers/virt/geniezone/gzvm_exception.c diff --git a/arch/arm64/geniezone/gzvm_arch_common.h b/arch/arm64/geniezone/gzvm_arch_common.h index 82d2c44e819b..c21eb8ca8d4b 100644 --- a/arch/arm64/geniezone/gzvm_arch_common.h +++ b/arch/arm64/geniezone/gzvm_arch_common.h @@ -24,6 +24,7 @@ enum { GZVM_FUNC_INFORM_EXIT = 14, GZVM_FUNC_MEMREGION_PURPOSE = 15, GZVM_FUNC_SET_DTB_CONFIG = 16, + GZVM_FUNC_MAP_GUEST = 17, NR_GZVM_FUNC, }; @@ -48,6 +49,7 @@ enum { #define MT_HVC_GZVM_INFORM_EXIT GZVM_HCALL_ID(GZVM_FUNC_INFORM_EXIT) #define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE) #define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG) +#define MT_HVC_GZVM_MAP_GUEST GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST) #define GIC_V3_NR_LRS 16 diff --git a/arch/arm64/geniezone/vm.c b/arch/arm64/geniezone/vm.c index cf18b607bc81..d236b6cf84b3 100644 --- a/arch/arm64/geniezone/vm.c +++ b/arch/arm64/geniezone/vm.c @@ -161,6 +161,122 @@ static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm, return 0; } +/** + * fill_constituents() - Populate pa to buffer until full + * @consti: Pointer to struct mem_region_addr_range. + * @consti_cnt: Constituent count. + * @max_nr_consti: Maximum number of constituent count. + * @gfn: Guest frame number. + * @total_pages: Total page numbers. + * @slot: Pointer to struct gzvm_memslot. + * + * Return: how many pages we've fill in, negative if error + */ +static int fill_constituents(struct mem_region_addr_range *consti, + int *consti_cnt, int max_nr_consti, u64 gfn, + u32 total_pages, struct gzvm_memslot *slot) +{ + u64 pfn, prev_pfn, gfn_end; + int nr_pages = 1; + int i = 0; + + if (unlikely(total_pages == 0)) + return -EINVAL; + gfn_end = gfn + total_pages; + + /* entry 0 */ + if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0) + return -EFAULT; + consti[0].address = PFN_PHYS(pfn); + consti[0].pg_cnt = 1; + gfn++; + prev_pfn = pfn; + + while (i < max_nr_consti && gfn < gfn_end) { + if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0) + return -EFAULT; + if (pfn == (prev_pfn + 1)) { + consti[i].pg_cnt++; + } else { + i++; + if (i >= max_nr_consti) + break; + consti[i].address = PFN_PHYS(pfn); + consti[i].pg_cnt = 1; + } + prev_pfn = pfn; + gfn++; + nr_pages++; + } + if (i != max_nr_consti) + i++; + *consti_cnt = i; + + return nr_pages; +} + +/** + * populate_mem_region() - Iterate all mem slot and populate pa to buffer until it's full + * @gzvm: Pointer to struct gzvm. + * + * Return: 0 if it is successful, negative if error + */ +static int populate_mem_region(struct gzvm *gzvm) +{ + int slot_cnt = 0; + + while (slot_cnt < GZVM_MAX_MEM_REGION && gzvm->memslot[slot_cnt].npages != 0) { + struct gzvm_memslot *memslot = &gzvm->memslot[slot_cnt]; + struct gzvm_memory_region_ranges *region; + int max_nr_consti, remain_pages; + u64 gfn, gfn_end; + u32 buf_size; + + buf_size = PAGE_SIZE * 2; + region = alloc_pages_exact(buf_size, GFP_KERNEL); + if (!region) + return -ENOMEM; + + max_nr_consti = (buf_size - sizeof(*region)) / + sizeof(struct mem_region_addr_range); + + region->slot = memslot->slot_id; + remain_pages = memslot->npages; + gfn = memslot->base_gfn; + gfn_end = gfn + remain_pages; + + while (gfn < gfn_end) { + int nr_pages; + + nr_pages = fill_constituents(region->constituents, + ®ion->constituent_cnt, + max_nr_consti, gfn, + remain_pages, memslot); + + if (nr_pages < 0) { + pr_err("Failed to fill constituents\n"); + free_pages_exact(region, buf_size); + return -EFAULT; + } + + region->gpa = PFN_PHYS(gfn); + region->total_pages = nr_pages; + remain_pages -= nr_pages; + gfn += nr_pages; + + if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size, + virt_to_phys(region))) { + pr_err("Failed to register memregion to hypervisor\n"); + free_pages_exact(region, buf_size); + return -EFAULT; + } + } + free_pages_exact(region, buf_size); + ++slot_cnt; + } + return 0; +} + /** * gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands * @gzvm: Pointer to struct gzvm. @@ -182,6 +298,11 @@ static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm, case GZVM_CAP_PVM_SET_PVMFW_GPA: fallthrough; case GZVM_CAP_PVM_SET_PROTECTED_VM: + /* + * To improve performance for protected VM, we have to populate VM's memory + * before VM booting + */ + populate_mem_region(gzvm); ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res); return ret; case GZVM_CAP_PVM_GET_PVMFW_SIZE: @@ -235,3 +356,12 @@ u64 gzvm_hva_to_pa_arch(u64 hva) return GZVM_PA_ERR_BAD; return par; } + +int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn, + u64 nr_pages) +{ + struct arm_smccc_res res; + + return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id, + pfn, gfn, nr_pages, 0, 0, &res); +} diff --git a/drivers/virt/geniezone/Makefile b/drivers/virt/geniezone/Makefile index b56ff8fa4039..aaff04bbe854 100644 --- a/drivers/virt/geniezone/Makefile +++ b/drivers/virt/geniezone/Makefile @@ -8,4 +8,5 @@ GZVM_DIR ?= ../../../drivers/virt/geniezone gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_mmu.o \ $(GZVM_DIR)/gzvm_vm.o $(GZVM_DIR)/gzvm_vcpu.o \ - $(GZVM_DIR)/gzvm_irqfd.o $(GZVM_DIR)/gzvm_ioeventfd.o + $(GZVM_DIR)/gzvm_irqfd.o $(GZVM_DIR)/gzvm_ioeventfd.o \ + $(GZVM_DIR)/gzvm_exception.o diff --git a/drivers/virt/geniezone/gzvm_exception.c b/drivers/virt/geniezone/gzvm_exception.c new file mode 100644 index 000000000000..31fdb4ae8db4 --- /dev/null +++ b/drivers/virt/geniezone/gzvm_exception.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 MediaTek Inc. + */ + +#include <linux/device.h> +#include <linux/gzvm_drv.h> + +/** + * gzvm_handle_guest_exception() - Handle guest exception + * @vcpu: Pointer to struct gzvm_vcpu_run in userspace + * Return: + * * true - This exception has been processed, no need to back to VMM. + * * false - This exception has not been processed, require userspace. + */ +bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu) +{ + int ret; + + for (int i = 0; i < ARRAY_SIZE(vcpu->run->exception.reserved); i++) { + if (vcpu->run->exception.reserved[i]) + return -EINVAL; + } + + switch (vcpu->run->exception.exception) { + case GZVM_EXCEPTION_PAGE_FAULT: + ret = gzvm_handle_page_fault(vcpu); + break; + case GZVM_EXCEPTION_UNKNOWN: + fallthrough; + default: + ret = -EFAULT; + } + + if (!ret) + return true; + else + return false; +} diff --git a/drivers/virt/geniezone/gzvm_main.c b/drivers/virt/geniezone/gzvm_main.c index 4e5d1b83df4a..af5ab8949206 100644 --- a/drivers/virt/geniezone/gzvm_main.c +++ b/drivers/virt/geniezone/gzvm_main.c @@ -28,6 +28,8 @@ int gzvm_err_to_errno(unsigned long err) return 0; case ERR_NO_MEMORY: return -ENOMEM; + case ERR_INVALID_ARGS: + return -EINVAL; case ERR_NOT_SUPPORTED: return -EOPNOTSUPP; case ERR_NOT_IMPLEMENTED: diff --git a/drivers/virt/geniezone/gzvm_mmu.c b/drivers/virt/geniezone/gzvm_mmu.c index 17d696992d2c..4fdbdd8e809d 100644 --- a/drivers/virt/geniezone/gzvm_mmu.c +++ b/drivers/virt/geniezone/gzvm_mmu.c @@ -106,3 +106,35 @@ int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn, return 0; } + +/** + * gzvm_handle_page_fault() - Handle guest page fault, find corresponding page + * for the faulting gpa + * @vcpu: Pointer to struct gzvm_vcpu_run of the faulting vcpu + * + * Return: + * * 0 - Success to handle guest page fault + * * -EFAULT - Failed to map phys addr to guest's GPA + */ +int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu) +{ + struct gzvm *vm = vcpu->gzvm; + u64 pfn, gfn; + int memslot_id; + int ret; + + gfn = PHYS_PFN(vcpu->run->exception.fault_gpa); + memslot_id = gzvm_find_memslot(vm, gfn); + if (unlikely(memslot_id < 0)) + return -EFAULT; + + ret = gzvm_gfn_to_pfn_memslot(&vm->memslot[memslot_id], gfn, &pfn); + if (unlikely(ret)) + return -EFAULT; + + ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1); + if (unlikely(ret)) + return -EFAULT; + + return 0; +} diff --git a/drivers/virt/geniezone/gzvm_vcpu.c b/drivers/virt/geniezone/gzvm_vcpu.c index 3082dbdecb5d..0c62fe7f2c37 100644 --- a/drivers/virt/geniezone/gzvm_vcpu.c +++ b/drivers/virt/geniezone/gzvm_vcpu.c @@ -114,9 +114,11 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void * __user argp) * it's geniezone's responsibility to fill corresponding data * structure */ - case GZVM_EXIT_HYPERCALL: - fallthrough; case GZVM_EXIT_EXCEPTION: + if (!gzvm_handle_guest_exception(vcpu)) + need_userspace = true; + break; + case GZVM_EXIT_HYPERCALL: fallthrough; case GZVM_EXIT_DEBUG: fallthrough; diff --git a/drivers/virt/geniezone/gzvm_vm.c b/drivers/virt/geniezone/gzvm_vm.c index 79fe6eb4196f..9f7e44521de5 100644 --- a/drivers/virt/geniezone/gzvm_vm.c +++ b/drivers/virt/geniezone/gzvm_vm.c @@ -24,57 +24,28 @@ u64 gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn) } /** - * fill_constituents() - Populate pa to buffer until full - * @consti: Pointer to struct mem_region_addr_range. - * @consti_cnt: Constituent count. - * @max_nr_consti: Maximum number of constituent count. - * @gfn: Guest frame number. - * @total_pages: Total page numbers. - * @slot: Pointer to struct gzvm_memslot. + * gzvm_find_memslot() - Find memslot containing this @gpa + * @vm: Pointer to struct gzvm + * @gfn: Guest frame number * - * Return: how many pages we've fill in, negative if error + * Return: + * * >=0 - Index of memslot + * * -EFAULT - Not found */ -static int fill_constituents(struct mem_region_addr_range *consti, - int *consti_cnt, int max_nr_consti, u64 gfn, - u32 total_pages, struct gzvm_memslot *slot) +int gzvm_find_memslot(struct gzvm *vm, u64 gfn) { - u64 pfn, prev_pfn, gfn_end; - int nr_pages = 1; - int i = 0; + int i; - if (unlikely(total_pages == 0)) - return -EINVAL; - gfn_end = gfn + total_pages; + for (i = 0; i < GZVM_MAX_MEM_REGION; i++) { + if (vm->memslot[i].npages == 0) + continue; - /* entry 0 */ - if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0) - return -EFAULT; - consti[0].address = PFN_PHYS(pfn); - consti[0].pg_cnt = 1; - gfn++; - prev_pfn = pfn; - - while (i < max_nr_consti && gfn < gfn_end) { - if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0) - return -EFAULT; - if (pfn == (prev_pfn + 1)) { - consti[i].pg_cnt++; - } else { - i++; - if (i >= max_nr_consti) - break; - consti[i].address = PFN_PHYS(pfn); - consti[i].pg_cnt = 1; - } - prev_pfn = pfn; - gfn++; - nr_pages++; + if (gfn >= vm->memslot[i].base_gfn && + gfn < vm->memslot[i].base_gfn + vm->memslot[i].npages) + return i; } - if (i != max_nr_consti) - i++; - *consti_cnt = i; - return nr_pages; + return -EFAULT; } /** @@ -89,42 +60,24 @@ register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot) { struct gzvm_memory_region_ranges *region; u32 buf_size = PAGE_SIZE * 2; - int max_nr_consti, remain_pages; - u64 gfn, gfn_end; + u64 gfn; region = alloc_pages_exact(buf_size, GFP_KERNEL); if (!region) return -ENOMEM; - max_nr_consti = (buf_size - sizeof(*region)) / - sizeof(struct mem_region_addr_range); region->slot = memslot->slot_id; - remain_pages = memslot->npages; + region->total_pages = memslot->npages; gfn = memslot->base_gfn; - gfn_end = gfn + remain_pages; - while (gfn < gfn_end) { - int nr_pages = fill_constituents(region->constituents, - ®ion->constituent_cnt, - max_nr_consti, gfn, - remain_pages, memslot); - if (nr_pages < 0) { - pr_err("Failed to fill constituents\n"); - free_pages_exact(region, buf_size); - return nr_pages; - } - region->gpa = PFN_PHYS(gfn); - region->total_pages = nr_pages; + region->gpa = PFN_PHYS(gfn); - remain_pages -= nr_pages; - gfn += nr_pages; - - if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size, - virt_to_phys(region))) { - pr_err("Failed to register memregion to hypervisor\n"); - free_pages_exact(region, buf_size); - return -EFAULT; - } + if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size, + virt_to_phys(region))) { + pr_err("Failed to register memregion to hypervisor\n"); + free_pages_exact(region, buf_size); + return -EFAULT; } + free_pages_exact(region, buf_size); return 0; } diff --git a/include/linux/gzvm_drv.h b/include/linux/gzvm_drv.h index 43d85dc5d7c0..b9e60fe5dcde 100644 --- a/include/linux/gzvm_drv.h +++ b/include/linux/gzvm_drv.h @@ -29,6 +29,7 @@ */ #define NO_ERROR (0) #define ERR_NO_MEMORY (-5) +#define ERR_INVALID_ARGS (-8) #define ERR_NOT_SUPPORTED (-24) #define ERR_NOT_IMPLEMENTED (-27) #define ERR_FAULT (-40) @@ -123,6 +124,8 @@ int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size, int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp); int gzvm_arch_create_vm(unsigned long vm_type); int gzvm_arch_destroy_vm(u16 vm_id); +int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn, + u64 nr_pages); int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm, struct gzvm_enable_cap *cap, void __user *argp); @@ -141,6 +144,10 @@ int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason); int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid); int gzvm_arch_inform_exit(u16 vm_id); +int gzvm_find_memslot(struct gzvm *vm, u64 gpa); +int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu); +bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu); + int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev); int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx, u32 irq, bool level); diff --git a/include/uapi/linux/gzvm.h b/include/uapi/linux/gzvm.h index f4f04403d5b3..1f134c55ac2a 100644 --- a/include/uapi/linux/gzvm.h +++ b/include/uapi/linux/gzvm.h @@ -185,6 +185,12 @@ enum { GZVM_EXIT_GZ = 0x9292000a, }; +/* exception definitions of GZVM_EXIT_EXCEPTION */ +enum { + GZVM_EXCEPTION_UNKNOWN = 0x0, + GZVM_EXCEPTION_PAGE_FAULT = 0x1, +}; + /** * struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is * shared between userspace, kernel and @@ -209,6 +215,9 @@ enum { * Handle exception occurred in VM * @exception: Which exception vector * @error_code: Exception error codes + * @fault_gpa: Fault GPA (guest physical address or IPA in ARM) + * @reserved: Future-proof reservation and reset to zero in hypervisor. + * Fill up to the union size, 256 bytes. * @hypercall: The nested struct in anonymous union. * Some hypercalls issued from VM must be handled * @args: The hypercall's arguments @@ -255,6 +264,8 @@ struct gzvm_vcpu_run { struct { __u32 exception; __u32 error_code; + __u64 fault_gpa; + __u64 reserved[30]; } exception; /* GZVM_EXIT_HYPERCALL */ struct { -- 2.18.0