From: Prasad Joshi <prasadjoshi.linux@xxxxxxxxx> Signed-off-by: Prasad Joshi <prasadjoshi.linux@xxxxxxxxx> --- lib/x86/msr.h | 10 ++++++++++ x86/svm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/lib/x86/msr.h b/lib/x86/msr.h index 509a421..0e602d0 100644 --- a/lib/x86/msr.h +++ b/lib/x86/msr.h @@ -402,5 +402,15 @@ #define MSR_VM_CR 0xc0010114 #define MSR_VM_IGNNE 0xc0010115 #define MSR_VM_HSAVE_PA 0xc0010117 +#define MSR_SVM_KEY 0xc0010118 + +/* VM CR bits: */ +#define _VM_CR_DPD 0 /* disable internal debugging */ +#define _VM_CR_R_INIT 1 /* non-interrupted INIT causes #SX exception */ +#define _VM_CR_DIS_A20M 2 /* disable A20 masking */ +#define _VM_CR_LOCK 3 /* svm lock */ +#define _VM_CR_SVMDIS 4 /* disable svm */ + +#define VM_CR_LOCK (1 << _VM_CR_LOCK) #endif /* _ASM_X86_MSR_INDEX_H */ diff --git a/x86/svm.c b/x86/svm.c index d51e7ec..6d39ff1 100644 --- a/x86/svm.c +++ b/x86/svm.c @@ -241,6 +241,63 @@ static bool null_check(struct test *test) return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; } +static bool svm_lock_supported(void) +{ + /* + * Support for SVM-Lock is indicated by EDX bit 2 as returned by CPUID + * function 8000_000Ah. + */ + return (cpuid(0x8000000A).d & 4); +} + +static void svm_lock_test(struct test *test) +{ + const int no_tests = 6; + const u64 svm_key = 0x1234567890; + + test->scratch = no_tests; + + /* reading SVM_KEY must always return 0 */ + if (!rdmsr(MSR_SVM_KEY)) + test->scratch--; + + /* set the key */ + wrmsr(MSR_SVM_KEY, svm_key); + + /* reading SVM_KEY must always return 0 */ + if (!rdmsr(MSR_SVM_KEY)) + test->scratch--; + + /* set SVM lock */ + wrmsr(MSR_VM_CR, rdmsr(MSR_VM_CR) | VM_CR_LOCK); + if (rdmsr(MSR_VM_CR) & VM_CR_LOCK) { + /* lock is set */ + test->scratch--; + } + + /* compare with 0 key */ + wrmsr(MSR_SVM_KEY, 0); + if (rdmsr(MSR_VM_CR) & VM_CR_LOCK) + test->scratch--; + + /* compare with incorrect key */ + wrmsr(MSR_SVM_KEY, 0x12); + if (rdmsr(MSR_VM_CR) & VM_CR_LOCK) + test->scratch--; + + /* compare with correct key */ + wrmsr(MSR_SVM_KEY, svm_key); + if (!(rdmsr(MSR_VM_CR) & VM_CR_LOCK)) { + /* lock no longer set */ + test->scratch--; + } +} + +static bool svm_lock_check(struct test *test) +{ + return !test->scratch; +} + static void prepare_no_vmrun_int(struct test *test) { test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); @@ -746,6 +803,8 @@ static bool lat_svm_insn_check(struct test *test) static struct test tests[] = { { "null", default_supported, default_prepare, null_test, default_finished, null_check }, + { "SVM Lock", svm_lock_supported, default_prepare, svm_lock_test, + default_finished, svm_lock_check }, { "vmrun", default_supported, default_prepare, test_vmrun, default_finished, check_vmrun }, { "vmrun intercept check", default_supported, prepare_no_vmrun_int, -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html