In order for userspace to find out whether the MMIO guard is exposed to a guest, expose a capability that says so. We take this opportunity to make it incompatible with the NISV option, as that would be rather counter-productive! Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/kvm/arm.c | 29 ++++++++++++++++++----------- arch/arm64/kvm/hypercalls.c | 14 ++++++++++++-- include/uapi/linux/kvm.h | 1 + 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ed9c89ec0b4f..1c9a7abe2728 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -81,32 +81,33 @@ int kvm_arch_check_processor_compat(void *opaque) int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { - int r; + int r = -EINVAL; if (cap->flags) return -EINVAL; + mutex_lock(&kvm->lock); + switch (cap->cap) { case KVM_CAP_ARM_NISV_TO_USER: - r = 0; - set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, - &kvm->arch.flags); + /* This is incompatible with MMIO guard */ + if (!test_bit(KVM_ARCH_FLAG_MMIO_GUARD, &kvm->arch.flags)) { + r = 0; + set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, + &kvm->arch.flags); + } break; case KVM_CAP_ARM_MTE: - mutex_lock(&kvm->lock); - if (!system_supports_mte() || kvm->created_vcpus) { - r = -EINVAL; - } else { + if (system_supports_mte() && !kvm->created_vcpus) { r = 0; set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); } - mutex_unlock(&kvm->lock); break; default: - r = -EINVAL; break; } + mutex_unlock(&kvm->lock); return r; } @@ -211,13 +212,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: - case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_INJECT_EXT_DABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: r = 1; break; + case KVM_CAP_ARM_NISV_TO_USER: + r = !test_bit(KVM_ARCH_FLAG_MMIO_GUARD, &kvm->arch.flags); + break; + case KVM_CAP_ARM_MMIO_GUARD: + r = !test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, + &kvm->arch.flags); + break; case KVM_CAP_SET_GUEST_DEBUG2: return KVM_GUESTDBG_VALID_MASK; case KVM_CAP_ARM_SET_DEVICE_ADDR: diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index c39aab55ecae..e4fade6a96f6 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -59,6 +59,14 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) val[3] = lower_32_bits(cycles); } +static bool mmio_guard_allowed(struct kvm_vcpu *vcpu) +{ + return (!test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, + &vcpu->kvm->arch.flags) && + !vcpu_mode_is_32bit(vcpu)); + +} + int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); @@ -131,7 +139,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES); val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PTP); /* Only advertise MMIO guard to 64bit guests */ - if (!vcpu_mode_is_32bit(vcpu)) { + if (mmio_guard_allowed(vcpu)) { val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO); val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL); val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP); @@ -146,10 +154,12 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) val[0] = PAGE_SIZE; break; case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID: - if (!vcpu_mode_is_32bit(vcpu)) { + mutex_lock(&vcpu->kvm->lock); + if (mmio_guard_allowed(vcpu)) { set_bit(KVM_ARCH_FLAG_MMIO_GUARD, &vcpu->kvm->arch.flags); val[0] = SMCCC_RET_SUCCESS; } + mutex_unlock(&vcpu->kvm->lock); break; case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: if (!vcpu_mode_is_32bit(vcpu) && diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a067410ebea5..ef171186e7be 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1112,6 +1112,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 +#define KVM_CAP_ARM_MMIO_GUARD 206 #ifdef KVM_CAP_IRQ_ROUTING -- 2.30.2