From: Jerry Wang <ze-yu.wang@xxxxxxxxxxxx> Unpin the pages when VM relinquish the pages or is destroyed. Signed-off-by: Jerry Wang <ze-yu.wang@xxxxxxxxxxxx> Co-developed-by: Yingshiuan Pan <yingshiuan.pan@xxxxxxxxxxxx> Signed-off-by: Yingshiuan Pan <yingshiuan.pan@xxxxxxxxxxxx> Signed-off-by: Yi-De Wu <yi-de.wu@xxxxxxxxxxxx> Signed-off-by: Liju Chen <liju-clr.chen@xxxxxxxxxxxx> --- drivers/virt/geniezone/gzvm_exception.c | 23 ++++++++++++ drivers/virt/geniezone/gzvm_mmu.c | 49 +++++++++++++++++++++++++ drivers/virt/geniezone/gzvm_vcpu.c | 6 ++- include/linux/soc/mediatek/gzvm_drv.h | 2 + include/uapi/linux/gzvm.h | 5 +++ 5 files changed, 83 insertions(+), 2 deletions(-) diff --git a/drivers/virt/geniezone/gzvm_exception.c b/drivers/virt/geniezone/gzvm_exception.c index 2f5c05045e61..cdfc99d24ded 100644 --- a/drivers/virt/geniezone/gzvm_exception.c +++ b/drivers/virt/geniezone/gzvm_exception.c @@ -37,3 +37,26 @@ bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu) else return false; } + +/** + * gzvm_handle_guest_hvc() - Handle guest hvc + * @vcpu: Pointer to struct gzvm_vcpu struct + * Return: + * * true - This hvc has been processed, no need to back to VMM. + * * false - This hvc has not been processed, require userspace. + */ +bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu) +{ + unsigned long ipa; + int ret; + + switch (vcpu->run->hypercall.args[0]) { + case GZVM_HVC_MEM_RELINQUISH: + ipa = vcpu->run->hypercall.args[1]; + ret = gzvm_handle_relinquish(vcpu, ipa); + return (ret == 0) ? true : false; + default: + break; + } + return false; +} diff --git a/drivers/virt/geniezone/gzvm_mmu.c b/drivers/virt/geniezone/gzvm_mmu.c index a5aa296ca790..25b13665617d 100644 --- a/drivers/virt/geniezone/gzvm_mmu.c +++ b/drivers/virt/geniezone/gzvm_mmu.c @@ -29,6 +29,36 @@ static int gzvm_insert_ppage(struct gzvm *vm, struct gzvm_pinned_page *ppage) return 0; } +static int rb_ppage_cmp(const void *key, const struct rb_node *node) +{ + struct gzvm_pinned_page *p = container_of(node, + struct gzvm_pinned_page, + node); + phys_addr_t ipa = (phys_addr_t)key; + + return (ipa < p->ipa) ? -1 : (ipa > p->ipa); +} + +/* Invoker of this function is responsible for locking */ +static int gzvm_remove_ppage(struct gzvm *vm, phys_addr_t ipa) +{ + struct gzvm_pinned_page *ppage; + struct rb_node *node; + + node = rb_find((void *)ipa, &vm->pinned_pages, rb_ppage_cmp); + + if (node) + rb_erase(node, &vm->pinned_pages); + else + return 0; + + ppage = container_of(node, struct gzvm_pinned_page, node); + unpin_user_pages_dirty_lock(&ppage->page, 1, true); + kfree(ppage); + + return 0; +} + static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa, struct page **out_page) { @@ -77,6 +107,25 @@ static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa, return ret; } +/** + * gzvm_handle_relinquish() - Handle memory relinquish request from hypervisor + * + * @vcpu: Pointer to struct gzvm_vcpu_run in userspace + * @ipa: Start address(gpa) of a reclaimed page + * + * Return: Always return 0 because there are no cases of failure + */ +int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa) +{ + struct gzvm *vm = vcpu->gzvm; + + mutex_lock(&vm->mem_lock); + gzvm_remove_ppage(vm, ipa); + mutex_unlock(&vm->mem_lock); + + return 0; +} + int gzvm_vm_allocate_guest_page(struct gzvm *vm, struct gzvm_memslot *slot, u64 gfn, u64 *pfn) { diff --git a/drivers/virt/geniezone/gzvm_vcpu.c b/drivers/virt/geniezone/gzvm_vcpu.c index 3f6bffbafc7a..b0dc3e4f47d7 100644 --- a/drivers/virt/geniezone/gzvm_vcpu.c +++ b/drivers/virt/geniezone/gzvm_vcpu.c @@ -112,12 +112,14 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp) * it's geniezone's responsibility to fill corresponding data * structure */ + case GZVM_EXIT_HYPERCALL: + if (!gzvm_handle_guest_hvc(vcpu)) + need_userspace = true; + break; case GZVM_EXIT_EXCEPTION: if (!gzvm_handle_guest_exception(vcpu)) need_userspace = true; break; - case GZVM_EXIT_HYPERCALL: - fallthrough; case GZVM_EXIT_DEBUG: fallthrough; case GZVM_EXIT_FAIL_ENTRY: diff --git a/include/linux/soc/mediatek/gzvm_drv.h b/include/linux/soc/mediatek/gzvm_drv.h index 14c1adfbb204..7065ec36f190 100644 --- a/include/linux/soc/mediatek/gzvm_drv.h +++ b/include/linux/soc/mediatek/gzvm_drv.h @@ -234,6 +234,8 @@ int gzvm_arch_inform_exit(u16 vm_id); int gzvm_find_memslot(struct gzvm *vm, u64 gpa); int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu); bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu); +int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa); +bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu); int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev); int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx, diff --git a/include/uapi/linux/gzvm.h b/include/uapi/linux/gzvm.h index 5e21d64a2f4f..f23266055140 100644 --- a/include/uapi/linux/gzvm.h +++ b/include/uapi/linux/gzvm.h @@ -196,6 +196,11 @@ enum { GZVM_EXCEPTION_PAGE_FAULT = 0x1, }; +/* hypercall definitions of GZVM_EXIT_HYPERCALL */ +enum { + GZVM_HVC_MEM_RELINQUISH = 0xc6000009, +}; + /** * struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is * shared between userspace, kernel and -- 2.18.0