mode-based protections as hardware behaves.
As exceptions, the target memory address of emulation of invlpg, branch
and call instructions doesn't require LASS violation check.
Signed-off-by: Zeng Guang <guang.zeng@xxxxxxxxx>
---
arch/x86/kvm/emulate.c | 36 +++++++++++++++++++++++++++++++-----
arch/x86/kvm/vmx/nested.c | 3 +++
arch/x86/kvm/vmx/sgx.c | 2 ++
3 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5cc3efa0e21c..a9a022fd712e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -687,7 +687,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
- enum x86emul_mode mode, ulong *linear)
+ enum x86emul_mode mode, ulong *linear,
+ u64 flags)
{
struct desc_struct desc;
bool usable;
@@ -695,6 +696,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u32 lim;
u16 sel;
u8 va_bits;
+ u64 access = fetch ? PFERR_FETCH_MASK : 0;
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
@@ -740,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
+
+ if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
+ goto bad;
+
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -757,7 +763,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
- ctxt->mode, linear);
+ ctxt->mode, linear, 0);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@@ -770,7 +776,10 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+
+ /* LASS doesn't apply to address for branch and call instructions */
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
+ &linear, KVM_X86_EMULFLAG_SKIP_LASS);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@@ -845,6 +854,13 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
void *data, unsigned size)
{
+ if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+ ctxt->exception.vector = GP_VECTOR;
+ ctxt->exception.error_code = 0;
+ ctxt->exception.error_code_valid = true;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
}
@@ -852,6 +868,13 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
ulong linear, void *data,
unsigned int size)
{
+ if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+ ctxt->exception.vector = GP_VECTOR;
+ ctxt->exception.error_code = 0;
+ ctxt->exception.error_code_valid = true;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
}
@@ -907,7 +930,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
- &linear);
+ &linear, 0);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
@@ -3432,8 +3455,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
+ unsigned max_size;
- rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+ /* LASS doesn't apply to the memory address for invlpg */
+ rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
+ ctxt->mode, &linear, KVM_X86_EMULFLAG_SKIP_LASS);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c8ae9d0e59b3..55c88c4593a6 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4974,6 +4974,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
* destination for long mode!
*/
exn = is_noncanonical_address(*ret, vcpu);
+
+ if (!exn)
+ exn = __vmx_check_lass(vcpu, 0, *ret, 0);
} else {
/*
* When not in long mode, the virtual/linear address is
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index b12da2a6dec9..30cb5d0980be 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -37,6 +37,8 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
fault = true;
} else if (likely(is_long_mode(vcpu))) {
fault = is_noncanonical_address(*gva, vcpu);
+ if (!fault)
+ fault = __vmx_check_lass(vcpu, 0, *gva, 0);
} else {
*gva &= 0xffffffff;
fault = (s.unusable) ||