On Wed, Aug 28, 2019 at 08:05:52PM -0700, Sukadev Bhattiprolu wrote: > > - After the guest becomes secure, when we handle a page fault of a page > > belonging to SVM in HV, send that page to UV via UV_PAGE_IN. > > - Whenever a page is unmapped on the HV side, inform UV via UV_PAGE_INVAL. > > - Ensure all those routines that walk the secondary page tables of > > the guest don't do so in case of secure VM. For secure guest, the > > active secondary page tables are in secure memory and the secondary > > page tables in HV are freed when guest becomes secure. > > > > Signed-off-by: Bharata B Rao <bharata@xxxxxxxxxxxxx> > > --- > > arch/powerpc/include/asm/kvm_host.h | 12 ++++++++++++ > > arch/powerpc/include/asm/ultravisor-api.h | 1 + > > arch/powerpc/include/asm/ultravisor.h | 5 +++++ > > arch/powerpc/kvm/book3s_64_mmu_radix.c | 22 ++++++++++++++++++++++ > > arch/powerpc/kvm/book3s_hv_devm.c | 20 ++++++++++++++++++++ > > 5 files changed, 60 insertions(+) > > > > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > > index 66e5cc8c9759..29333e8de1c4 100644 > > --- a/arch/powerpc/include/asm/kvm_host.h > > +++ b/arch/powerpc/include/asm/kvm_host.h > > @@ -867,6 +867,8 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} > > #ifdef CONFIG_PPC_UV > > extern int kvmppc_devm_init(void); > > extern void kvmppc_devm_free(void); > > +extern bool kvmppc_is_guest_secure(struct kvm *kvm); > > +extern int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa); > > #else > > static inline int kvmppc_devm_init(void) > > { > > @@ -874,6 +876,16 @@ static inline int kvmppc_devm_init(void) > > } > > > > static inline void kvmppc_devm_free(void) {} > > + > > +static inline bool kvmppc_is_guest_secure(struct kvm *kvm) > > +{ > > + return false; > > +} > > + > > +static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa) > > +{ > > + return -EFAULT; > > +} > > #endif /* CONFIG_PPC_UV */ > > > > #endif /* __POWERPC_KVM_HOST_H__ */ > > diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h > > index 46b1ee381695..cf200d4ce703 100644 > > --- a/arch/powerpc/include/asm/ultravisor-api.h > > +++ b/arch/powerpc/include/asm/ultravisor-api.h > > @@ -29,5 +29,6 @@ > > #define UV_UNREGISTER_MEM_SLOT 0xF124 > > #define UV_PAGE_IN 0xF128 > > #define UV_PAGE_OUT 0xF12C > > +#define UV_PAGE_INVAL 0xF138 > > > > #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ > > diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h > > index 719c0c3930b9..b333241bbe4c 100644 > > --- a/arch/powerpc/include/asm/ultravisor.h > > +++ b/arch/powerpc/include/asm/ultravisor.h > > @@ -57,4 +57,9 @@ static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid) > > return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid); > > } > > > > +static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift) > > +{ > > + return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift); > > +} > > + > > #endif /* _ASM_POWERPC_ULTRAVISOR_H */ > > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > > index 2d415c36a61d..93ad34e63045 100644 > > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > > @@ -19,6 +19,8 @@ > > #include <asm/pgtable.h> > > #include <asm/pgalloc.h> > > #include <asm/pte-walk.h> > > +#include <asm/ultravisor.h> > > +#include <asm/kvm_host.h> > > > > /* > > * Supported radix tree geometry. > > @@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, > > if (!(dsisr & DSISR_PRTABLE_FAULT)) > > gpa |= ea & 0xfff; > > > > + if (kvmppc_is_guest_secure(kvm)) > > + return kvmppc_send_page_to_uv(kvm, gpa & PAGE_MASK); > > + > > /* Get the corresponding memslot */ > > memslot = gfn_to_memslot(kvm, gfn); > > > > @@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, > > unsigned long gpa = gfn << PAGE_SHIFT; > > unsigned int shift; > > > > + if (kvmppc_is_guest_secure(kvm)) { > > + uv_page_inval(kvm->arch.lpid, gpa, PAGE_SIZE); > > + return 0; > > + } > > If it is a page we share with UV, won't we need to drop the HV mapping > for the page? I believe we come here via MMU notifies only after dropping HV mapping. Regards, Bharata.