This enables guests to lock their CR0 and CR4 registers with a subset of X86_CR0_WP, X86_CR4_SMEP, X86_CR4_SMAP, X86_CR4_UMIP, X86_CR4_FSGSBASE and X86_CR4_CET flags. The new KVM_HC_LOCK_CR_UPDATE hypercall takes three arguments. The first is to identify the control register, the second is a bit mask to pin (i.e. mark as read-only), and the third is for optional flags. These register flags should already be pinned by Linux guests, but once compromised, this self-protection mechanism could be disabled, which is not the case with this dedicated hypercall. Once the CRs are pinned by the guest, if it attempts to change them, then a general protection fault is sent to the guest. This hypercall may evolve and support new kind of registers or pinning. The optional KVM_LOCK_CR_UPDATE_VERSION flag enables guests to know the supported abilities by mapping the returned version with the related features. Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: H. Peter Anvin <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Madhavan T. Venkataraman <madvenka@xxxxxxxxxxxxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Sean Christopherson <seanjc@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> Cc: Wanpeng Li <wanpengli@xxxxxxxxxxx> Signed-off-by: Mickaël Salaün <mic@xxxxxxxxxxx> Link: https://lore.kernel.org/r/20240503131910.307630-3-mic@xxxxxxxxxxx --- Changes since v1: * Guard KVM_HC_LOCK_CR_UPDATE hypercall with CONFIG_HEKI. * Move extern cr4_pinned_mask to x86.h (suggested by Kees Cook). * Move VMX CR checks from vmx_set_cr*() to handle_cr() to make it possible to return to user space (see next commit). * Change the heki_check_cr()'s first argument to vcpu. * Don't use -KVM_EPERM in heki_check_cr(). * Generate a fault when the guest requests a denied CR update. * Add a flags argument to get the version of this hypercall. Being able to do a preper version check was suggested by Wei Liu. --- Documentation/virt/kvm/x86/hypercalls.rst | 17 +++++ arch/x86/include/uapi/asm/kvm_para.h | 2 + arch/x86/kernel/cpu/common.c | 7 +- arch/x86/kvm/vmx/vmx.c | 5 ++ arch/x86/kvm/x86.c | 84 +++++++++++++++++++++++ arch/x86/kvm/x86.h | 22 ++++++ include/linux/kvm_host.h | 5 ++ include/uapi/linux/kvm_para.h | 1 + 8 files changed, 141 insertions(+), 2 deletions(-) diff --git a/Documentation/virt/kvm/x86/hypercalls.rst b/Documentation/virt/kvm/x86/hypercalls.rst index 10db7924720f..3178576f4c47 100644 --- a/Documentation/virt/kvm/x86/hypercalls.rst +++ b/Documentation/virt/kvm/x86/hypercalls.rst @@ -190,3 +190,20 @@ the KVM_CAP_EXIT_HYPERCALL capability. Userspace must enable that capability before advertising KVM_FEATURE_HC_MAP_GPA_RANGE in the guest CPUID. In addition, if the guest supports KVM_FEATURE_MIGRATION_CONTROL, userspace must also set up an MSR filter to process writes to MSR_KVM_MIGRATION_CONTROL. + +9. KVM_HC_LOCK_CR_UPDATE +------------------------ + +:Architecture: x86 +:Status: active +:Purpose: Request some control registers to be restricted. + +- a0: identify a control register +- a1: bit mask to make some flags read-only +- a2: optional KVM_LOCK_CR_UPDATE_VERSION flag that will return the version of + this hypercall. Version 1 supports CR0 and CR4 pinning. + +The hypercall lets a guest request control register flags to be pinned for +itself. + +Returns 0 on success or a KVM error code otherwise. diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index a1efa7907a0b..cfc17f3d1877 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -149,4 +149,6 @@ struct kvm_vcpu_pv_apf_data { #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK #define KVM_PV_EOI_DISABLED 0x0 +#define KVM_LOCK_CR_UPDATE_VERSION (1 << 0) + #endif /* _UAPI_ASM_X86_KVM_PARA_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 605c26c009c8..69695d9d6e2a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -398,8 +398,11 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) } /* These bits should not change their value after CPU init is finished. */ -static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | - X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED; +const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | + X86_CR4_UMIP | X86_CR4_FSGSBASE | + X86_CR4_CET | X86_CR4_FRED; +EXPORT_SYMBOL_GPL(cr4_pinned_mask); + static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); static unsigned long cr4_pinned_bits __ro_after_init; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 22411f4aff53..7ba970b525f7 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5453,6 +5453,11 @@ static int handle_cr(struct kvm_vcpu *vcpu) case 0: /* mov to cr */ val = kvm_register_read(vcpu, reg); trace_kvm_cr_write(cr, val); + + ret = heki_check_cr(vcpu, cr, val); + if (ret) + return ret; + switch (cr) { case 0: err = handle_set_cr0(vcpu, val); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91478b769af0..a5f47be59abc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8281,11 +8281,86 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) return value; } +#ifdef CONFIG_HEKI + +#define HEKI_ABI_VERSION 1 + +static int heki_lock_cr(struct kvm_vcpu *const vcpu, const unsigned long cr, + unsigned long pin, unsigned long flags) +{ + if (flags) { + if ((flags == KVM_LOCK_CR_UPDATE_VERSION) && !cr && !pin) + return HEKI_ABI_VERSION; + return -KVM_EINVAL; + } + + if (!pin) + return -KVM_EINVAL; + + switch (cr) { + case 0: + /* Cf. arch/x86/kernel/cpu/common.c */ + if (!(pin & X86_CR0_WP)) + return -KVM_EINVAL; + + if ((pin & read_cr0()) != pin) + return -KVM_EINVAL; + + atomic_long_or(pin, &vcpu->kvm->heki_pinned_cr0); + return 0; + case 4: + /* Checks for irrelevant bits. */ + if ((pin & cr4_pinned_mask) != pin) + return -KVM_EINVAL; + + /* Ignores bits not present in host. */ + pin &= __read_cr4(); + atomic_long_or(pin, &vcpu->kvm->heki_pinned_cr4); + return 0; + } + return -KVM_EINVAL; +} + +int heki_check_cr(struct kvm_vcpu *const vcpu, const unsigned long cr, + const unsigned long val) +{ + unsigned long pinned; + + switch (cr) { + case 0: + pinned = atomic_long_read(&vcpu->kvm->heki_pinned_cr0); + if ((val & pinned) != pinned) { + pr_warn_ratelimited( + "heki: Blocked CR0 update: 0x%lx\n", val); + kvm_inject_gp(vcpu, 0); + return 1; + } + return 0; + case 4: + pinned = atomic_long_read(&vcpu->kvm->heki_pinned_cr4); + if ((val & pinned) != pinned) { + pr_warn_ratelimited( + "heki: Blocked CR4 update: 0x%lx\n", val); + kvm_inject_gp(vcpu, 0); + return 1; + } + return 0; + } + return 0; +} +EXPORT_SYMBOL_GPL(heki_check_cr); + +#endif /* CONFIG_HEKI */ + static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; + res = heki_check_cr(vcpu, cr, val); + if (res) + return res; + switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); @@ -10142,6 +10217,15 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } +#ifdef CONFIG_HEKI + case KVM_HC_LOCK_CR_UPDATE: + if (a0 > U32_MAX) { + ret = -KVM_EINVAL; + } else { + ret = heki_lock_cr(vcpu, a0, a1, a2); + } + break; +#endif /* CONFIG_HEKI */ default: ret = -KVM_ENOSYS; break; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index a8b71803777b..ade7d68ddaff 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -290,6 +290,26 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } +#ifdef CONFIG_HEKI + +int heki_check_cr(struct kvm_vcpu *vcpu, unsigned long cr, unsigned long val); + +#else /* CONFIG_HEKI */ + +static inline int heki_check_cr(struct kvm_vcpu *vcpu, unsigned long cr, + unsigned long val) +{ + return 0; +} + +static inline int heki_lock_cr(struct kvm_vcpu *const vcpu, unsigned long cr, + unsigned long pin) +{ + return 0; +} + +#endif /* CONFIG_HEKI */ + void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); u64 get_kvmclock_ns(struct kvm *kvm); @@ -327,6 +347,8 @@ extern u64 host_xcr0; extern u64 host_xss; extern u64 host_arch_capabilities; +extern const unsigned long cr4_pinned_mask; + extern struct kvm_caps kvm_caps; extern bool enable_pmu; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 48f31dcd318a..6ff13937929a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -836,6 +836,11 @@ struct kvm { bool vm_bugged; bool vm_dead; +#ifdef CONFIG_HEKI + atomic_long_t heki_pinned_cr0; + atomic_long_t heki_pinned_cr4; +#endif /* CONFIG_HEKI */ + #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER struct notifier_block pm_notifier; #endif diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a9..2ed418704603 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,7 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_LOCK_CR_UPDATE 13 /* * hypercalls use architecture specific -- 2.45.0