From: Fuad Tabba <tabba@xxxxxxxxxx> __pkvm_host_map_guest() always applies to the loaded vcpu in hyp, and should not trust the host to provide the vcpu. Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 15 ++++----------- arch/arm64/kvm/mmu.c | 6 +++--- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index e82c0faf6c81..0f1c9d27f6eb 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -445,20 +445,15 @@ static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(u64, pfn, host_ctxt, 1); DECLARE_REG(u64, gfn, host_ctxt, 2); - DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 3); - struct kvm_shadow_vcpu_state *shadow_state; + struct kvm_vcpu *host_vcpu; struct kvm_vcpu *shadow_vcpu; - struct kvm *host_kvm; - unsigned int handle; + struct kvm_shadow_vcpu_state *shadow_state; int ret = -EINVAL; if (!is_protected_kvm_enabled()) goto out; - host_vcpu = kern_hyp_va(host_vcpu); - host_kvm = kern_hyp_va(host_vcpu->kvm); - handle = host_kvm->arch.pkvm.shadow_handle; - shadow_state = pkvm_load_shadow_vcpu_state(handle, host_vcpu->vcpu_idx); + shadow_state = pkvm_loaded_shadow_vcpu_state(); if (!shadow_state) goto out; @@ -468,11 +463,9 @@ static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt) /* Topup shadow memcache with the host's */ ret = pkvm_refill_memcache(shadow_vcpu, host_vcpu); if (ret) - goto out_put_state; + goto out; ret = __pkvm_host_share_guest(pfn, gfn, shadow_vcpu); -out_put_state: - pkvm_put_shadow_vcpu_state(shadow_state); out: cpu_reg(host_ctxt, 1) = ret; } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c74c431588a3..137d4382ed1c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1143,9 +1143,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, return 0; } -static int pkvm_host_map_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu) +static int pkvm_host_map_guest(u64 pfn, u64 gfn) { - int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn, vcpu); + int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn); /* * Getting -EPERM at this point implies that the pfn has already been @@ -1211,7 +1211,7 @@ static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, write_lock(&kvm->mmu_lock); pfn = page_to_pfn(page); - ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT, vcpu); + ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT); if (ret) { if (ret == -EAGAIN) ret = 0; -- 2.36.1.124.g0e6072fb45-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm