---
arch/powerpc/include/asm/kvm_host.h | 12 ++++++++++++
arch/powerpc/include/asm/ultravisor-api.h | 1 +
arch/powerpc/include/asm/ultravisor.h | 7 +++++++
arch/powerpc/kvm/book3s_64_mmu_radix.c | 22 ++++++++++++++++++++++
arch/powerpc/kvm/book3s_hv_hmm.c | 20 ++++++++++++++++++++
5 files changed, 62 insertions(+)
diff --git a/arch/powerpc/include/asm/kvm_host.h
b/arch/powerpc/include/asm/kvm_host.h
index 0c49c3401c63..dcbf7480cb10 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -865,6 +865,8 @@ static inline void
kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#ifdef CONFIG_PPC_UV
extern int kvmppc_hmm_init(void);
extern void kvmppc_hmm_free(void);
+extern bool kvmppc_is_guest_secure(struct kvm *kvm);
+extern int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa);
#else
static inline int kvmppc_hmm_init(void)
{
@@ -872,6 +874,16 @@ static inline int kvmppc_hmm_init(void)
}
static inline void kvmppc_hmm_free(void) {}
+
+static inline bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned
long gpa)
+{
+ return -EFAULT;
+}
#endif /* CONFIG_PPC_UV */
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h
b/arch/powerpc/include/asm/ultravisor-api.h
index d6d6eb2e6e6b..9f5510b55892 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -24,5 +24,6 @@
#define UV_UNREGISTER_MEM_SLOT 0xF124
#define UV_PAGE_IN 0xF128
#define UV_PAGE_OUT 0xF12C
+#define UV_PAGE_INVAL 0xF138
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/include/asm/ultravisor.h
b/arch/powerpc/include/asm/ultravisor.h
index fe45be9ee63b..f4f674794b35 100644
--- a/arch/powerpc/include/asm/ultravisor.h
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -77,6 +77,13 @@ static inline int uv_unregister_mem_slot(u64 lpid,
u64 slotid)
return ucall(UV_UNREGISTER_MEM_SLOT, retbuf, lpid, slotid);
}
+
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+ unsigned long retbuf[UCALL_BUFSIZE];
+
+ return ucall(UV_PAGE_INVAL, retbuf, lpid, gpa, page_shift);
+}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index f55ef071883f..c454600c454f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -21,6 +21,8 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
+#include <asm/ultravisor.h>
+#include <asm/kvm_host.h>
/*
* Supported radix tree geometry.
@@ -923,6 +925,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run
*run, struct kvm_vcpu *vcpu,
if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;
+ if (kvmppc_is_guest_secure(kvm))
+ return kvmppc_send_page_to_uv(kvm, gpa & PAGE_MASK);
+
/* Get the corresponding memslot */
memslot = gfn_to_memslot(kvm, gfn);
@@ -980,6 +985,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct
kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
+ if (kvmppc_is_guest_secure(kvm)) {
+ uv_page_inval(kvm->arch.lpid, gpa, PAGE_SIZE);
+ return 0;
+ }
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
@@ -997,6 +1007,9 @@ int kvm_age_radix(struct kvm *kvm, struct
kvm_memory_slot *memslot,
int ref = 0;
unsigned long old, *rmapp;
+ if (kvmppc_is_guest_secure(kvm))
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
@@ -1021,6 +1034,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct
kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
+ if (kvmppc_is_guest_secure(kvm))
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
@@ -1038,6 +1054,9 @@ static int kvm_radix_test_clear_dirty(struct kvm
*kvm,
int ret = 0;
unsigned long old, *rmapp;
+ if (kvmppc_is_guest_secure(kvm))
+ return ret;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
@@ -1090,6 +1109,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
unsigned long gpa;
unsigned int shift;
+ if (kvmppc_is_guest_secure(kvm))
+ return;
+
gpa = memslot->base_gfn << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
for (n = memslot->npages; n; --n) {
diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c
b/arch/powerpc/kvm/book3s_hv_hmm.c
index 55bab9c4e60a..9e6c88de456f 100644
--- a/arch/powerpc/kvm/book3s_hv_hmm.c
+++ b/arch/powerpc/kvm/book3s_hv_hmm.c
@@ -62,6 +62,11 @@ struct kvmppc_hmm_migrate_args {
unsigned long page_shift;
};
+bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+ return !!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE);
+}
+
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
{
struct kvm_memslots *slots;
@@ -494,6 +499,21 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned
long gpa,
return ret;
}
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa)
+{
+ unsigned long pfn;
+ int ret;
+
+ pfn = gfn_to_pfn(kvm, gpa >> PAGE_SHIFT);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+
+ ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gpa, 0,
PAGE_SHIFT);
+ kvm_release_pfn_clean(pfn);
+
+ return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
+}
+
static u64 kvmppc_get_secmem_size(void)
{
struct device_node *np;