On 1/10/20 8:14 AM, Janosch Frank wrote: > On 1/10/20 8:03 AM, Thomas Huth wrote: >> On 09/01/2020 18.51, Janosch Frank wrote: >>> On 1/9/20 6:08 PM, Cornelia Huck wrote: >>>> On Thu, 9 Jan 2020 10:56:01 -0500 >>>> Janosch Frank <frankja@xxxxxxxxxxxxx> wrote: >>>> >>>>> The architecture states that we need to reset local IRQs for all CPU >>>>> resets. Because the old reset interface did not support the normal CPU >>>>> reset we never did that on a normal reset. >>>>> >>>>> Let's implement an interface for the missing normal and clear resets >>>>> and reset all local IRQs, registers and control structures as stated >>>>> in the architecture. >>>>> >>>>> Userspace might already reset the registers via the vcpu run struct, >>>>> but as we need the interface for the interrupt clearing part anyway, >>>>> we implement the resets fully and don't rely on userspace to reset the >>>>> rest. >>>>> >>>>> Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx> >>>>> --- >>>>> >>>>> I dropped the reviews, as I changed quite a lot. >>>>> >>>>> Keep in mind, that now we'll need a new parameter in normal and >>>>> initial reset for protected virtualization to indicate that we need to >>>>> do the reset via the UV call. The Ultravisor does only accept the >>>>> needed reset, not any subset resets. >>>> >>>> In the interface, or externally? >>> >>> ? >>> >>>> >>>> [Apologies, but the details of the protected virt stuff are no longer >>>> in my cache. >>> Reworded explanation: >>> I can't use a fallthrough, because the UV will reject the normal reset >>> if we do an initial reset (same goes for the clear reset). To address >>> this issue, I added a boolean to the normal and initial reset functions >>> which tells the function if it was called directly or was called because >>> of the fallthrough. >>> >>> Only if called directly a UV call for the reset is done, that way we can >>> keep the fallthrough. >> >> Sounds complicated. And do we need the fallthrough stuff here at all? >> What about doing something like: > > That would work and I thought about it, it just comes down to taste :-) > I don't have any strong feelings for a specific implementation. To be more specific: Commit c72db49c098bceb8b73c2e9d305caf37a41fb3bf Author: Janosch Frank <frankja@xxxxxxxxxxxxx> Date: Thu Jan 9 04:37:50 2020 -0500 KVM: s390: protvirt: Add UV cpu reset calls For protected VMs, the VCPU resets are done by the Ultravisor, as KVM has no access to the VCPU registers. As the Ultravisor will only accept a call for the reset that is needed, we need to fence the UV calls when chaining resets. Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 63dc2bd97582..d5876527e464 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3476,8 +3476,11 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, return r; } -static int kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu) +static int kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu, bool chain) { + int rc = 0; + u32 ret; + vcpu->arch.sie_block->gpsw.mask = ~PSW_MASK_RI; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); @@ -3487,11 +3490,21 @@ static int kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu) kvm_s390_vcpu_stop(vcpu); kvm_s390_clear_local_irqs(vcpu); - return 0; + if (kvm_s390_pv_handle_cpu(vcpu) && !chain) { + rc = uv_cmd_nodata(kvm_s390_pv_handle_cpu(vcpu), + UVC_CMD_CPU_RESET, &ret); + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: cpu %d rc %x rrc %x", + vcpu->vcpu_id, ret >> 16, ret & 0x0000ffff); + } + + return rc; } -static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) +static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu, bool chain) { + int rc = 0; + u32 ret; + /* this equals initial cpu reset in pop, but we don't switch to ESA */ vcpu->arch.sie_block->gpsw.mask = 0UL; vcpu->arch.sie_block->gpsw.addr = 0UL; @@ -3509,16 +3522,26 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) /* make sure the new fpc will be lazily loaded */ save_fpu_regs(); current->thread.fpu.fpc = 0; - if (!kvm_s390_pv_is_protected(vcpu->kvm)) + if (!kvm_s390_pv_handle_cpu(vcpu)) vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->fpf &= ~FPF_BPBC; - return 0; + if (kvm_s390_pv_handle_cpu(vcpu) && !chain) { + rc = uv_cmd_nodata(kvm_s390_pv_handle_cpu(vcpu), + UVC_CMD_CPU_RESET_INITIAL, + &ret); + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: cpu %d rc %x rrc %x", + vcpu->vcpu_id, ret >> 16, ret & 0x0000ffff); + } + + return rc; } static int kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) { + int rc = 0; + u32 ret; struct kvm_sync_regs *regs = &vcpu->run->s.regs; memset(®s->gprs, 0, sizeof(regs->gprs)); @@ -3547,7 +3570,13 @@ static int kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) } preempt_enable(); } - return 0; + if (kvm_s390_pv_handle_cpu(vcpu)) { + rc = uv_cmd_nodata(kvm_s390_pv_handle_cpu(vcpu), + UVC_CMD_CPU_RESET_CLEAR, &ret); + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: cpu %d rc %x rrc %x", + vcpu->vcpu_id, ret >> 16, ret & 0x0000ffff); + } + return rc; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) @@ -4738,12 +4767,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_S390_CLEAR_RESET: r = kvm_arch_vcpu_ioctl_clear_reset(vcpu); + if (r) + break; /* fallthrough */ case KVM_S390_INITIAL_RESET: - r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); + r = kvm_arch_vcpu_ioctl_initial_reset(vcpu, ioctl != KVM_S390_INITIAL_RESET); + if (r) + break; /* fallthrough */ case KVM_S390_NORMAL_RESET: - r = kvm_arch_vcpu_ioctl_normal_reset(vcpu); + r = kvm_arch_vcpu_ioctl_normal_reset(vcpu, ioctl != KVM_S390_NORMAL_RESET); break; case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: {
Attachment:
signature.asc
Description: OpenPGP digital signature