On 10/16/2019 8:40 AM, Krish Sadhukhan wrote:
On 10/15/2019 09:40 AM, Xiaoyao Li wrote:
Move the MSR bitmap setup codes to vmx_vmcs_setup() and only setup them
when hardware has msr_bitmap capability.
Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
---
arch/x86/kvm/vmx/vmx.c | 39 ++++++++++++++++++++-------------------
1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 58b77a882426..7051511c27c2 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4164,12 +4164,30 @@ static void ept_set_mmio_spte_mask(void)
static void vmx_vmcs_setup(struct vcpu_vmx *vmx)
{
int i;
+ unsigned long *msr_bitmap;
if (nested)
nested_vmx_vmcs_setup();
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
+ if (cpu_has_vmx_msr_bitmap()) {
+ msr_bitmap = vmx->vmcs01.msr_bitmap;
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC,
MSR_TYPE_R);
vmx_disable_intercept_for_msr() also calls cpu_has_vmx_msr_bitmap(),
which means we are repeating the check. A cleaner approach is to remove
the call to cpu_has_vmx_msr_bitmap() from
vmx_disable_intercept_for_msr() and let its callers do the check just
like you are doing here.
Right.
I'll improve it. Thanks!
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE,
MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE,
MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE,
MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ if (kvm_cstate_in_guest(vmx->vcpu.kvm)) {
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C1_RES, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
+ }
+
+ vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
+ }
+ vmx->msr_bitmap_mode = 0;
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
@@ -6697,7 +6715,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct
kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx;
- unsigned long *msr_bitmap;
int cpu;
BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
@@ -6754,22 +6771,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct
kvm *kvm, unsigned int id)
if (err < 0)
goto free_msrs;
- msr_bitmap = vmx->vmcs01.msr_bitmap;
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE,
MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS,
MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP,
MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP,
MSR_TYPE_RW);
- if (kvm_cstate_in_guest(kvm)) {
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES,
MSR_TYPE_R);
- vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
- vmx_disable_intercept_for_msr(msr_bitmap,
MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
- }
- vmx->msr_bitmap_mode = 0;
-
vmx->loaded_vmcs = &vmx->vmcs01;
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);