In instruction/mmio emulation cases, if the target write memroy is SPP protected, exit to user-space to handle it as if it's caused by SPP induced EPT violation due to guest write. Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx> --- arch/x86/kvm/x86.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fa114b5db672..71f5a8ae76cf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5525,6 +5525,36 @@ static const struct read_write_emulator_ops write_emultor = { .write = true, }; +static bool is_emulator_spp_protected(struct kvm_vcpu *vcpu, + gpa_t gpa, + unsigned int bytes) +{ + gfn_t gfn, start_gfn, end_gfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + u32 access; + + if (!kvm->arch.spp_active) + return false; + + start_gfn = gpa >> PAGE_SHIFT; + end_gfn = (gpa + bytes) >> PAGE_SHIFT; + for (gfn = start_gfn; gfn <= end_gfn; gfn++) { + slot = gfn_to_memslot(kvm, gfn); + if (slot) { + access = *gfn_to_subpage_wp_info(slot, gfn); + if (access != FULL_SPP_ACCESS) { + vcpu->run->exit_reason = KVM_EXIT_SPP; + vcpu->run->spp.addr = gfn; + kvm_skip_emulated_instruction(vcpu); + return true; + } + } + } + + return false; +} + static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, @@ -5555,6 +5585,9 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, return X86EMUL_PROPAGATE_FAULT; } + if (write && is_emulator_spp_protected(vcpu, gpa, bytes)) + return X86EMUL_UNHANDLEABLE; + if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; @@ -6616,6 +6649,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, return EMULATE_DONE; if (r == EMULATION_FAILED) { + if (vcpu->run->exit_reason == KVM_EXIT_SPP) + return EMULATE_USER_EXIT; + if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; -- 2.17.2