Do not use the last byte (bit 56 ~ bit 63) in shadow_mmio_mask, the late patch will store vcpu id in the last byte Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 1 + arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ad40647..bb4d292 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -190,6 +190,7 @@ static void mmu_spte_set(u64 *sptep, u64 spte); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) { + WARN_ON(mmio_mask & (0xffull << 56)); shadow_mmio_mask = mmio_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2c22fc7..5ef2b35 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3706,7 +3706,7 @@ static void ept_set_mmio_spte_mask(void) * Also, magic bits (0xffull << 49) is set to quickly identify mmio * spte. */ - kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull); + kvm_mmu_set_mmio_spte_mask(0xffull << 46 | 0x6ull); } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9970ee6..19ef25e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4834,7 +4834,7 @@ static void kvm_set_mmio_spte_mask(void) * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ - mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; + mask = ((1ull << (55 - maxphyaddr + 1)) - 1) << maxphyaddr; mask |= 1ull; #ifdef CONFIG_X86_64 -- 1.7.7.6 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html