On Thu, 2023-05-11 at 16:33 -0700, Sean Christopherson wrote: > Drop handling of MSR_IA32_CR_PAT from mtrr.c now that SVM and VMX handle > writes without bouncing through kvm_set_msr_common(). PAT isn't truly an > MTRR even though it affects memory types, and more importantly KVM enables > hardware virtualization of guest PAT (by NOT setting "ignore guest PAT") > when a guest has non-coherent DMA, i.e. KVM doesn't need to zap SPTEs when > the guest PAT changes. > > The read path is and always has been trivial, i.e. burying it in the MTRR > code does more harm than good. > > WARN and continue for the PAT case in kvm_set_msr_common(), as that code > is _currently_ reached if and only if KVM is buggy. Defer cleaning up the > lack of symmetry between the read and write paths to a future patch. > > Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> Reviewed-by: Kai Huang <kai.huang@xxxxxxxxx> > --- > arch/x86/kvm/mtrr.c | 19 ++++++------------- > arch/x86/kvm/x86.c | 13 +++++++++++++ > 2 files changed, 19 insertions(+), 13 deletions(-) > > diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c > index dc213b940141..cdbbb511f940 100644 > --- a/arch/x86/kvm/mtrr.c > +++ b/arch/x86/kvm/mtrr.c > @@ -55,7 +55,6 @@ static bool msr_mtrr_valid(unsigned msr) > case MSR_MTRRfix4K_F0000: > case MSR_MTRRfix4K_F8000: > case MSR_MTRRdefType: > - case MSR_IA32_CR_PAT: > return true; > } > return false; > @@ -74,9 +73,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) > if (!msr_mtrr_valid(msr)) > return false; > > - if (msr == MSR_IA32_CR_PAT) { > - return kvm_pat_valid(data); > - } else if (msr == MSR_MTRRdefType) { > + if (msr == MSR_MTRRdefType) { > if (data & ~0xcff) > return false; > return valid_mtrr_type(data & 0xff); > @@ -324,8 +321,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) > struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; > gfn_t start, end; > > - if (msr == MSR_IA32_CR_PAT || !tdp_enabled || > - !kvm_arch_has_noncoherent_dma(vcpu->kvm)) > + if (!tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm)) > return; > > if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) > @@ -392,8 +388,6 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) > *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; > else if (msr == MSR_MTRRdefType) > vcpu->arch.mtrr_state.deftype = data; > - else if (msr == MSR_IA32_CR_PAT) > - vcpu->arch.pat = data; > else > set_var_mtrr_msr(vcpu, msr, data); > > @@ -421,13 +415,12 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) > return 1; > > index = fixed_msr_to_range_index(msr); > - if (index >= 0) > + if (index >= 0) { > *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; > - else if (msr == MSR_MTRRdefType) > + } else if (msr == MSR_MTRRdefType) { > *pdata = vcpu->arch.mtrr_state.deftype; > - else if (msr == MSR_IA32_CR_PAT) > - *pdata = vcpu->arch.pat; > - else { /* Variable MTRRs */ > + } else { > + /* Variable MTRRs */ > if (is_mtrr_base_msr(msr)) > *pdata = var_mtrr_msr_to_range(vcpu, msr)->base; > else > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 8b356c9d8a81..d71cf924cd8f 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -3701,6 +3701,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > } > break; > case MSR_IA32_CR_PAT: > + /* > + * Writes to PAT should be handled by vendor code as both SVM > + * and VMX track the guest's PAT in the VMCB/VMCS. > + */ > + WARN_ON_ONCE(1); > + > + if (!kvm_pat_valid(data)) > + return 1; > + > + vcpu->arch.pat = data; > + break; > case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: > case MSR_MTRRdefType: > return kvm_mtrr_set_msr(vcpu, msr, data); > @@ -4110,6 +4121,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > break; > } > case MSR_IA32_CR_PAT: > + msr_info->data = vcpu->arch.pat; > + break; > case MSR_MTRRcap: > case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: > case MSR_MTRRdefType: > -- > 2.40.1.606.ga4b1b128d6-goog >