Sukadev Bhattiprolu <sukadev@xxxxxxxxxxxxxxxxxx> writes: > Michael Ellerman [mpe@xxxxxxxxxxxxxx] wrote: >> Claudio Carvalho <cclaudio@xxxxxxxxxxxxx> writes: >> > From: Sukadev Bhattiprolu <sukadev@xxxxxxxxxxxxxxxxxx> >> > >> > To enter a secure guest, we have to go through the ultravisor, therefore >> > we do a ucall when we are entering a secure guest. >> > >> > This change is needed for any sort of entry to the secure guest from the >> > hypervisor, whether it is a return from an hcall, a return from a >> > hypervisor interrupt, or the first time that a secure guest vCPU is run. >> > >> > If we are returning from an hcall, the results are already in the >> > appropriate registers R3:12, except for R3, R6 and R7. R3 has the status >> > of the reflected hcall, therefore we move it to R0 for the ultravisor and >> > set R3 to the UV_RETURN ucall number. R6,7 were used as temporary >> > registers, hence we restore them. >> >> This is another case where some documentation would help people to >> review the code. >> >> > Have fast_guest_return check the kvm_arch.secure_guest field so that a >> > new CPU enters UV when started (in response to a RTAS start-cpu call). >> > >> > Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson. >> > >> > Signed-off-by: Sukadev Bhattiprolu <sukadev@xxxxxxxxxxxxxxxxxx> >> > [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve >> > the MSR_S bit ] >> > Signed-off-by: Paul Mackerras <paulus@xxxxxxxxxx> >> > [ Fix UV_RETURN ucall number and arch.secure_guest check ] >> > Signed-off-by: Ram Pai <linuxram@xxxxxxxxxx> >> > [ Save the actual R3 in R0 for the ultravisor and use R3 for the >> > UV_RETURN ucall number. Update commit message and ret_to_ultra comment ] >> > Signed-off-by: Claudio Carvalho <cclaudio@xxxxxxxxxxxxx> >> > --- >> > arch/powerpc/include/asm/kvm_host.h | 1 + >> > arch/powerpc/include/asm/ultravisor-api.h | 1 + >> > arch/powerpc/kernel/asm-offsets.c | 1 + >> > arch/powerpc/kvm/book3s_hv_rmhandlers.S | 40 +++++++++++++++++++---- >> > 4 files changed, 37 insertions(+), 6 deletions(-) >> > >> > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S >> > index cffb365d9d02..89813ca987c2 100644 >> > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S >> > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S >> > @@ -36,6 +36,7 @@ >> > #include <asm/asm-compat.h> >> > #include <asm/feature-fixups.h> >> > #include <asm/cpuidle.h> >> > +#include <asm/ultravisor-api.h> >> > >> > /* Sign-extend HDEC if not on POWER9 */ >> > #define EXTEND_HDEC(reg) \ >> > @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION >> > END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) >> > >> > ld r5, VCPU_LR(r4) >> > - ld r6, VCPU_CR(r4) >> > mtlr r5 >> > - mtcr r6 >> > >> > ld r1, VCPU_GPR(R1)(r4) >> > ld r2, VCPU_GPR(R2)(r4) >> > ld r3, VCPU_GPR(R3)(r4) >> > ld r5, VCPU_GPR(R5)(r4) >> > - ld r6, VCPU_GPR(R6)(r4) >> > - ld r7, VCPU_GPR(R7)(r4) >> > ld r8, VCPU_GPR(R8)(r4) >> > ld r9, VCPU_GPR(R9)(r4) >> > ld r10, VCPU_GPR(R10)(r4) >> > @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION >> > mtspr SPRN_HDSISR, r0 >> > END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) >> > >> > + ld r6, VCPU_KVM(r4) >> > + lbz r7, KVM_SECURE_GUEST(r6) >> > + cmpdi r7, 0 >> >> You could hoist the load of r6 and r7 to here? > > we could move 'ld r7' here. r6 is used to restore CR below so > it (r6) has to stay there? It's used to restore CR in both paths, so both paths load VCPU_CR(r4) into r6. So we could instead do that load once, before the branch? >> > + bne ret_to_ultra >> > + >> > + lwz r6, VCPU_CR(r4) >> > + mtcr r6 >> > + >> > + ld r7, VCPU_GPR(R7)(r4) >> > + ld r6, VCPU_GPR(R6)(r4) >> > ld r0, VCPU_GPR(R0)(r4) >> > ld r4, VCPU_GPR(R4)(r4) >> > HRFI_TO_GUEST >> > b . >> > +/* >> > + * We are entering a secure guest, so we have to invoke the ultravisor to do >> > + * that. If we are returning from a hcall, the results are already in the >> > + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of >> > + * the reflected hcall, therefore we move it to R0 for the ultravisor and set >> > + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers >> > + * above, hence we restore them. >> > + */ >> > +ret_to_ultra: >> > + lwz r6, VCPU_CR(r4) >> > + mtcr r6 >> > + mfspr r11, SPRN_SRR1 >> > + mr r0, r3 >> > + LOAD_REG_IMMEDIATE(r3, UV_RETURN) >> >> Worth open coding to save three instructions? > > Yes, good point: > > - LOAD_REG_IMMEDIATE(r3, UV_RETURN) > + > + li r3, 0 > + oris r3, r3, (UV_RETURN)@__AS_ATHIGH > + ori r3, r3, (UV_RETURN)@l This should do it no? li r3, 0 oris r3, r3, UV_RETURN cheers