[RFC PATCH part-7 10/12] pkvm: x86: Implement do_unshare() helper for unsharing memory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Shaoqin Huang <shaoqin.huang@xxxxxxxxx>

do_unshare() is paired with do_share(), and it's the reverse of the
do_share(), which results in the page sharer take back accessibility of
the shared page for another component which being shared. The page state
become like:

sharer: PAGE_SHARED_OWNED       => OWNED
be shared: PAGE_SHARED_BORROWED => NOPAGE

Introduce a do_unshare() helper for safely unsharing a memory region
from one component to another. It will check the page state stored in
pte, and make sure the do_unshare() is paired with do_share(), this will
make the page being correctly take back and can only be accessed by the
page owner after unsharing.

Signed-off-by: Shaoqin Huang <shaoqin.huang@xxxxxxxxx>
---
 arch/x86/kvm/vmx/pkvm/hyp/mem_protect.c | 138 ++++++++++++++++++++++++
 arch/x86/kvm/vmx/pkvm/hyp/mem_protect.h |  15 +++
 2 files changed, 153 insertions(+)

diff --git a/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.c b/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.c
index 987fe172f6a6..092c8b6ea5fe 100644
--- a/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.c
+++ b/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.c
@@ -453,3 +453,141 @@ int __pkvm_host_share_guest(u64 hpa, struct pkvm_pgtable *guest_pgt,
 
 	return ret;
 }
+
+static int host_request_unshare(const struct pkvm_mem_transition *tx)
+{
+	u64 addr = tx->initiator.host.addr;
+	u64 size = tx->size;
+
+	return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
+}
+
+static int guest_ack_unshare(const struct pkvm_mem_transition *tx)
+{
+	u64 addr = tx->completer.guest.addr;
+	u64 size = tx->size;
+
+	return __guest_check_page_state_range(tx->completer.guest.pgt, addr,
+					      size, PKVM_PAGE_SHARED_BORROWED);
+}
+
+int check_unshare(const struct pkvm_mem_transition *tx)
+{
+	int ret;
+
+	switch (tx->initiator.id) {
+	case PKVM_ID_HOST:
+		ret = host_request_unshare(tx);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	switch (tx->completer.id) {
+	case PKVM_ID_GUEST:
+		ret = guest_ack_unshare(tx);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int host_initiate_unshare(const struct pkvm_mem_transition *tx)
+{
+	u64 addr = tx->initiator.host.addr;
+	u64 size = tx->size;
+	u64 prot = pkvm_mkstate(tx->initiator.prot, PKVM_PAGE_OWNED);
+
+	return host_ept_create_idmap_locked(addr, size, 0, prot);
+}
+
+static int guest_complete_unshare(const struct pkvm_mem_transition *tx)
+{
+	struct pkvm_pgtable *pgt = tx->completer.guest.pgt;
+	u64 addr = tx->completer.guest.addr;
+	u64 phys = tx->completer.guest.phys;
+	u64 size = tx->size;
+
+	return pkvm_pgtable_unmap_safe(pgt, addr, phys, size);
+}
+
+static int __do_unshare(struct pkvm_mem_transition *tx)
+{
+	int ret;
+
+	switch (tx->initiator.id) {
+	case PKVM_ID_HOST:
+		ret = host_initiate_unshare(tx);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	switch (tx->completer.id) {
+	case PKVM_ID_GUEST:
+		ret = guest_complete_unshare(tx);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * do_unshare() - The page owner takes back the page access for another
+ * component.
+ *
+ * Initiator: SHARED_OWNED	=> OWNED
+ * Completer: SHARED_BORROWED	=> NOPAGE
+ */
+int do_unshare(struct pkvm_mem_transition *share)
+{
+	int ret;
+
+	ret = check_unshare(share);
+	if (ret)
+		return ret;
+
+	return WARN_ON(__do_unshare(share));
+}
+
+int __pkvm_host_unshare_guest(u64 hpa, struct pkvm_pgtable *guest_pgt,
+			      u64 gpa, u64 size)
+{
+	int ret;
+	struct pkvm_mem_transition share = {
+		.size = size,
+		.initiator	= {
+			.id	= PKVM_ID_HOST,
+			.host	= {
+				.addr	= hpa,
+			},
+			.prot	= HOST_EPT_DEF_MEM_PROT,
+		},
+		.completer	= {
+			.id	= PKVM_ID_GUEST,
+			.guest	= {
+				.pgt	= guest_pgt,
+				.addr	= gpa,
+				.phys	= hpa,
+			},
+		},
+	};
+
+	host_ept_lock();
+
+	ret = do_unshare(&share);
+
+	host_ept_unlock();
+
+	return ret;
+}
diff --git a/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.h b/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.h
index 549cc5246620..b004a792b21a 100644
--- a/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.h
+++ b/arch/x86/kvm/vmx/pkvm/hyp/mem_protect.h
@@ -100,4 +100,19 @@ int __pkvm_hyp_donate_host(u64 hpa, u64 size);
 int __pkvm_host_share_guest(u64 hpa, struct pkvm_pgtable *guest_pgt,
 			    u64 gpa, u64 size, u64 prot);
 
+/*
+ * __pkvm_host_unshare_guest() - Host unshare pages that have been shared to guest
+ * previously. Guest will not be able to access these pages.
+ *
+ * @hpa:	Start hpa of being shared pages, must be continuous.
+ * @guest_pgt:	The guest ept pagetable.
+ * @gpa:	Start gpa of shared pages being mapped in guest ept.
+ * @size:	The size of pages to be shared.
+ *
+ * Unmap the range [gfn, gfn + nr_pages) in guest ept pagetable. And change
+ * the page state from PAGE_SHARED_BORROWED to PAGE_OWNED in the host ept.
+ */
+int __pkvm_host_unshare_guest(u64 hpa, struct pkvm_pgtable *guest_pgt,
+			      u64 gpa, u64 size);
+
 #endif
-- 
2.25.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux