On Mon, Oct 31, 2022 at 07:39:46PM +0800, Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx> wrote: > > On 2022/10/30 14:22, isaku.yamahata@xxxxxxxxx wrote: > > From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> > > > > Stub in kvm_tdx, vcpu_tdx, and their various accessors. TDX defines > > SEAMCALL APIs to access TDX control structures corresponding to the VMX > > VMCS. Introduce helper accessors to hide its SEAMCALL ABI details. > > > > Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> > > Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> > > --- > > arch/x86/kvm/vmx/tdx.h | 118 ++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 116 insertions(+), 2 deletions(-) > > > > diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h > > index 473013265bd8..98999bf3f188 100644 > > --- a/arch/x86/kvm/vmx/tdx.h > > +++ b/arch/x86/kvm/vmx/tdx.h > > @@ -3,14 +3,27 @@ > > #define __KVM_X86_TDX_H > > #ifdef CONFIG_INTEL_TDX_HOST > > + > > +#include "tdx_ops.h" > > + > > +struct tdx_td_page { > > + unsigned long va; > > + hpa_t pa; > > + bool added; > > +}; > > + > > struct kvm_tdx { > > struct kvm kvm; > > - /* TDX specific members follow. */ > > + > > + struct tdx_td_page tdr; > > + struct tdx_td_page *tdcs; > > }; > > struct vcpu_tdx { > > struct kvm_vcpu vcpu; > > - /* TDX specific members follow. */ > > + > > + struct tdx_td_page tdvpr; > > + struct tdx_td_page *tdvpx; > > }; > > static inline bool is_td(struct kvm *kvm) > > @@ -32,6 +45,107 @@ static inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu) > > { > > return container_of(vcpu, struct vcpu_tdx, vcpu); > > } > > + > > +static __always_inline void tdvps_vmcs_check(u32 field, u8 bits) > > +{ > > +#define VMCS_ENC_ACCESS_TYPE_MASK 0x1UL > > +#define VMCS_ENC_ACCESS_TYPE_FULL 0x0UL > > +#define VMCS_ENC_ACCESS_TYPE_HIGH 0x1UL > > +#define VMCS_ENC_ACCESS_TYPE(field) ((field) & VMCS_ENC_ACCESS_TYPE_MASK) > > + > > + /* TDX is 64bit only. HIGH field isn't supported. */ > > + BUILD_BUG_ON_MSG(__builtin_constant_p(field) && > > + VMCS_ENC_ACCESS_TYPE(field) == VMCS_ENC_ACCESS_TYPE_HIGH, > > + "Read/Write to TD VMCS *_HIGH fields not supported"); > > + > > + BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64); > > + > > +#define VMCS_ENC_WIDTH_MASK GENMASK(14, 13) > > +#define VMCS_ENC_WIDTH_16BIT (0UL << 13) > > +#define VMCS_ENC_WIDTH_64BIT (1UL << 13) > > +#define VMCS_ENC_WIDTH_32BIT (2UL << 13) > > +#define VMCS_ENC_WIDTH_NATURAL (3UL << 13) > > +#define VMCS_ENC_WIDTH(field) ((field) & VMCS_ENC_WIDTH_MASK) > > + > > + /* TDX is 64bit only. i.e. natural width = 64bit. */ > > + BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) && > > + (VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_64BIT || > > + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_NATURAL), > > + "Invalid TD VMCS access for 64-bit field"); > > + BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) && > > + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_32BIT, > > + "Invalid TD VMCS access for 32-bit field"); > > + BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) && > > + VMCS_ENC_WIDTH(field) == VMCS_ENC_WIDTH_16BIT, > > + "Invalid TD VMCS access for 16-bit field"); > > +} > > + > > +static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {} > > +static __always_inline void tdvps_management_check(u64 field, u8 bits) {} > > + > > +#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \ > > +static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \ > > + u32 field) \ > > +{ \ > > + struct tdx_module_output out; \ > > + u64 err; \ > > + \ > > + tdvps_##lclass##_check(field, bits); \ > > + err = tdh_vp_rd(tdx->tdvpr.pa, TDVPS_##uclass(field), &out); \ > > + if (unlikely(err)) { \ > > + pr_err("TDH_VP_RD["#uclass".0x%x] failed: 0x%llx\n", \ > > + field, err); \ > > + return 0; \ > > + } \ > > + return (u##bits)out.r8; \ > > +} \ > > +static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \ > > + u32 field, u##bits val) \ > > +{ \ > > + struct tdx_module_output out; \ > > + u64 err; \ > > + \ > > + tdvps_##lclass##_check(field, bits); \ > > + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), val, \ > > + GENMASK_ULL(bits - 1, 0), &out); \ > > + if (unlikely(err)) \ > > + pr_err("TDH_VP_WR["#uclass".0x%x] = 0x%llx failed: 0x%llx\n", \ > > + field, (u64)val, err); \ > > +} \ > > +static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \ > > + u32 field, u64 bit) \ > > +{ \ > > + struct tdx_module_output out; \ > > + u64 err; \ > > + \ > > + tdvps_##lclass##_check(field, bits); \ > > + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), bit, bit, \ > > + &out); \ > > + if (unlikely(err)) \ > > + pr_err("TDH_VP_WR["#uclass".0x%x] |= 0x%llx failed: 0x%llx\n", \ > > + field, bit, err); \ > > +} \ > > +static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \ > > + u32 field, u64 bit) \ > > +{ \ > > + struct tdx_module_output out; \ > > + u64 err; \ > > + \ > > + tdvps_##lclass##_check(field, bits); \ > > + err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), 0, bit, \ > > + &out); \ > > + if (unlikely(err)) \ > > + pr_err("TDH_VP_WR["#uclass".0x%x] &= ~0x%llx failed: 0x%llx\n", \ > > + field, bit, err); \ > > +} > > For the set of accessors, although there will be kernel errer message when > tdh_vp_{rd,wr} fails, > the caller doesn't know these function calls succeed or not. Won't this > cause any unexpected > behavior? Oh right. Basically tdh_vp_{rd, wr} should fail as long as parameters are correct. So I'll add KVM_BUG_ON(). TDX module takes shared lock on TDVPR, TDR and TDCS. It should fails with TDX_OPERAND_BUSY on run time. (At least with TDX module 1.0) Thanks, -- Isaku Yamahata <isaku.yamahata@xxxxxxxxx>