Enable KVM_{G,S}ET_ONE_REG uAPIs so that userspace can access HW MSR or KVM synthetic MSR throught it. In CET KVM series [*], KVM "steals" an MSR from PV MSR space and access it via KVM_{G,S}ET_MSRs uAPIs, but the approach pollutes PV MSR space and hides the difference of synthetic MSRs and normal HW defined MSRs. Now carve out a separate room in KVM-customized MSR address space for synthetic MSRs. The synthetic MSRs are not exposed to userspace via KVM_GET_MSR_INDEX_LIST, instead userspace complies with KVM's setup and composes the uAPI params. KVM synthetic MSR indices start from 0 and increase linearly. Userspace caller should tag MSR type correctly in order to access intended HW or synthetic MSR. [*]: https://lore.kernel.org/all/20240219074733.122080-18-weijiang.yang@xxxxxxxxx/ Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx> --- arch/x86/include/uapi/asm/kvm.h | 10 ++++++ arch/x86/kvm/x86.c | 62 +++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index ef11aa4cab42..ca2a47a85fa1 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -410,6 +410,16 @@ struct kvm_xcrs { __u64 padding[16]; }; +#define KVM_X86_REG_MSR (1 << 2) +#define KVM_X86_REG_SYNTHETIC_MSR (1 << 3) + +struct kvm_x86_reg_id { + __u32 index; + __u8 type; + __u8 rsvd; + __u16 rsvd16; +}; + #define KVM_SYNC_X86_REGS (1UL << 0) #define KVM_SYNC_X86_SREGS (1UL << 1) #define KVM_SYNC_X86_EVENTS (1UL << 2) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91478b769af0..d0054c52f24b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2244,6 +2244,31 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) return kvm_set_msr_ignored_check(vcpu, index, *data, true); } +static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value) +{ + u64 val; + int r; + + r = do_get_msr(vcpu, msr, &val); + if (r) + return r; + + if (put_user(val, value)) + return -EFAULT; + + return 0; +} + +static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *value) +{ + u64 val; + + if (get_user(val, value)) + return -EFAULT; + + return do_set_msr(vcpu, msr, &val); +} + #ifdef CONFIG_X86_64 struct pvclock_clock { int vclock_mode; @@ -5859,6 +5884,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, } } +static int kvm_translate_synthetic_msr(u32 *index) +{ + return 0; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -5976,6 +6006,38 @@ long kvm_arch_vcpu_ioctl(struct file *filp, srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } + case KVM_GET_ONE_REG: + case KVM_SET_ONE_REG: { + struct kvm_x86_reg_id *id; + struct kvm_one_reg reg; + u64 __user *value; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + + r = -EINVAL; + id = (struct kvm_x86_reg_id *)®.id; + if (id->rsvd || id->rsvd16) + break; + + if (id->type != KVM_X86_REG_MSR && + id->type != KVM_X86_REG_SYNTHETIC_MSR) + break; + + if (id->type == KVM_X86_REG_SYNTHETIC_MSR) { + r = kvm_translate_synthetic_msr(&id->index); + if (r) + break; + } + + value = u64_to_user_ptr(reg.addr); + if (ioctl == KVM_GET_ONE_REG) + r = kvm_get_one_msr(vcpu, id->index, value); + else + r = kvm_set_one_msr(vcpu, id->index, value); + break; + } case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; -- 2.43.0