On 19/03/2019 15:04, Suraj Jitindar Singh wrote: > Implement a virtual mode handler for the H_CALL H_PAGE_INIT which can be > used to zero or copy a guest page. The page is defined to be 4k and must > be 4k aligned. > > The in-kernel handler halves the time to handle this H_CALL compared to > handling it in userspace for a radix guest. > > Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@xxxxxxxxx> > --- > arch/powerpc/kvm/book3s_hv.c | 39 +++++++++++++++++++++++++++++++++++++++ > 1 file changed, 39 insertions(+) > > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 7179ea783f4f..fa29cc4900c2 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -883,6 +883,39 @@ static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, > return 0; > } > > +static int kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, > + unsigned long dest, unsigned long src) > +{ > + int ret = H_FUNCTION; > + u64 pg_sz = 1UL << 12; /* 4K page size */ > + u64 pg_mask = pg_sz - 1; Ditch pg_sz and just use SZ_4K? The page size is not going to change here ever. > + > + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ > + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | > + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) > + return H_PARAMETER; > + > + /* dest (and src if copy_page flag set) must be page aligned */ > + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) > + return H_PARAMETER; > + > + /* zero and/or copy the page as determined by the flags */ > + if (flags & H_ZERO_PAGE) { > + ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); > + if (ret < 0) > + return H_PARAMETER; > + } > + if (flags & H_COPY_PAGE) { If H_COPY_PAGE is set, is there any good reason to do kvm_clear_guest first even if H_ZERO_PAGE is set? It should not happen in practice and it is no harm but also no sense. > + ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); > + if (ret < 0) > + return H_PARAMETER; > + } else if (src) return H_PARAMETER; > + > + /* We can ignore the remaining flags */ > + > + return H_SUCCESS; > +} > + > static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) > { > struct kvmppc_vcore *vcore = target->arch.vcore; > @@ -1083,6 +1116,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) > if (nesting_enabled(vcpu->kvm)) > ret = kvmhv_copy_tofrom_guest_nested(vcpu); > break; > + case H_PAGE_INIT: > + ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5), > + kvmppc_get_gpr(vcpu, 6)); > + break; > default: > return RESUME_HOST; > } > @@ -1127,6 +1165,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) > case H_IPOLL: > case H_XIRR_X: > #endif > + case H_PAGE_INIT: > return 1; > } > > -- Alexey