From: Fuad Tabba <tabba@xxxxxxxxxx> Restrict protected VM capabilities based on the fixed-configuration for protected VMs. No functional change intended in current KVM-supported modes (nVHE, VHE). Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_pkvm.h | 27 ++++++++++++ arch/arm64/kvm/arm.c | 69 ++++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index b92440cfb5b4..6f13f62558dd 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -208,6 +208,33 @@ void kvm_shadow_destroy(struct kvm *kvm); ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \ ) +/* + * Returns the maximum number of breakpoints supported for protected VMs. + */ +static inline int pkvm_get_max_brps(void) +{ + int num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), + PVM_ID_AA64DFR0_ALLOW); + + /* + * If breakpoints are supported, the maximum number is 1 + the field. + * Otherwise, return 0, which is not compliant with the architecture, + * but is reserved and is used here to indicate no debug support. + */ + return num ? num + 1 : 0; +} + +/* + * Returns the maximum number of watchpoints supported for protected VMs. + */ +static inline int pkvm_get_max_wrps(void) +{ + int num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), + PVM_ID_AA64DFR0_ALLOW); + + return num ? num + 1 : 0; +} + extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7c57c14e173a..10e036bf06e3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -194,9 +194,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_unshare_hyp(kvm, kvm + 1); } -int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +static int kvm_check_extension(struct kvm *kvm, long ext) { int r; + switch (ext) { case KVM_CAP_IRQCHIP: r = vgic_present; @@ -294,6 +295,72 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +/* + * Checks whether the extension specified in ext is supported in protected + * mode for the specified vm. + * The capabilities supported by kvm in general are passed in kvm_cap. + */ +static int pkvm_check_extension(struct kvm *kvm, long ext, int kvm_cap) +{ + int r; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_ARM_PSCI: + case KVM_CAP_ARM_PSCI_0_2: + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + case KVM_CAP_MAX_VCPU_ID: + case KVM_CAP_MSI_DEVID: + case KVM_CAP_ARM_VM_IPA_SIZE: + r = kvm_cap; + break; + case KVM_CAP_GUEST_DEBUG_HW_BPS: + r = min(kvm_cap, pkvm_get_max_brps()); + break; + case KVM_CAP_GUEST_DEBUG_HW_WPS: + r = min(kvm_cap, pkvm_get_max_wrps()); + break; + case KVM_CAP_ARM_PMU_V3: + r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), + PVM_ID_AA64DFR0_ALLOW); + break; + case KVM_CAP_ARM_SVE: + r = kvm_cap && FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); + break; + case KVM_CAP_ARM_PTRAUTH_ADDRESS: + r = kvm_cap && + FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_API), + PVM_ID_AA64ISAR1_ALLOW) && + FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA), + PVM_ID_AA64ISAR1_ALLOW); + break; + case KVM_CAP_ARM_PTRAUTH_GENERIC: + r = kvm_cap && + FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI), + PVM_ID_AA64ISAR1_ALLOW) && + FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA), + PVM_ID_AA64ISAR1_ALLOW); + break; + default: + r = 0; + break; + } + + return r; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r = kvm_check_extension(kvm, ext); + + if (kvm && kvm_vm_is_protected(kvm)) + r = pkvm_check_extension(kvm, ext, r); + + return r; +} + long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { -- 2.36.1.124.g0e6072fb45-goog