Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
---
arch/x86/kvm/vmx/tdx.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index e4167f08b58b..7b81811eb404 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1454,7 +1454,8 @@ static void tdx_measure_page(struct kvm_tdx *kvm_tdx, hpa_t gpa, int size)
}
}
-static void tdx_unpin(struct kvm *kvm, kvm_pfn_t pfn, int level)
+static void tdx_unpin(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ enum pg_level level)
{
int i;
@@ -1476,7 +1477,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
- tdx_unpin(kvm, pfn, level);
+ tdx_unpin(kvm, gfn, pfn, level);
return -EAGAIN;
}
if (unlikely(err == (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))) {
@@ -1493,7 +1494,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
}
if (KVM_BUG_ON(err, kvm)) {
pr_tdx_error(TDH_MEM_PAGE_AUG, err, &out);
- tdx_unpin(kvm, pfn, level);
+ tdx_unpin(kvm, gfn, pfn, level);
return -EIO;
}
@@ -1529,7 +1530,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
* always uses vcpu 0's page table and protected by vcpu->mutex).
*/
if (KVM_BUG_ON(kvm_tdx->source_pa == INVALID_PAGE, kvm)) {
- tdx_unpin(kvm, pfn, level);
+ tdx_unpin(kvm, gfn, pfn, level);
return -EINVAL;
}
@@ -1547,7 +1548,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
} while (unlikely(err == TDX_ERROR_SEPT_BUSY));
if (KVM_BUG_ON(err, kvm)) {
pr_tdx_error(TDH_MEM_PAGE_ADD, err, &out);
- tdx_unpin(kvm, pfn, level);
+ tdx_unpin(kvm, gfn, pfn, level);
return -EIO;
} else if (measure)
tdx_measure_page(kvm_tdx, gpa, KVM_HPAGE_SIZE(level));
@@ -1600,7 +1601,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
err = tdx_reclaim_page(hpa, level);
if (KVM_BUG_ON(err, kvm))
return -EIO;
- tdx_unpin(kvm, pfn, level);
+ tdx_unpin(kvm, gfn, pfn, level);
return 0;
}
@@ -1633,7 +1634,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
r = -EIO;
} else {
tdx_clear_page(hpa, PAGE_SIZE);
- tdx_unpin(kvm, pfn + i, PG_LEVEL_4K);
+ tdx_unpin(kvm, gfn + i, pfn + i, PG_LEVEL_4K);
}
hpa += PAGE_SIZE;
}