On 07.06.2017 15:41, Thomas Huth wrote: > Certain CPU instructions will cause an exit of the virtual > machine. Run some of these instructions to check whether > they are emulated right by KVM (or QEMU). > > Signed-off-by: Thomas Huth <thuth@xxxxxxxxxx> > --- > v3: > - Split the prefix tests into spx and stpx tests, so checking the > exception with check_pgm_int_code() is now possible with the right > report_prefix_push() > - Move low_prot enable/disable code into interrupt.h > - Added -i and -t parameters for running the tests more than once > > lib/s390x/asm/interrupt.h | 20 ++++ > s390x/Makefile | 1 + > s390x/intercept.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++ > s390x/unittests.cfg | 3 + > 4 files changed, 251 insertions(+) > create mode 100644 s390x/intercept.c > > diff --git a/lib/s390x/asm/interrupt.h b/lib/s390x/asm/interrupt.h > index 383d312..e4bde6c 100644 > --- a/lib/s390x/asm/interrupt.h > +++ b/lib/s390x/asm/interrupt.h > @@ -15,4 +15,24 @@ void handle_pgm_int(void); > void expect_pgm_int(void); > void check_pgm_int_code(uint16_t code); > > +/* Activate low-address protection */ > +static inline void low_prot_enable(void) > +{ > + uint64_t cr0; > + > + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); > + cr0 |= 1ULL << (63-35); > + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); You then also might want to use %%c0 here. > +} > + > +/* Disable low-address protection */ > +static inline void low_prot_disable(void) > +{ > + uint64_t cr0; > + > + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); > + cr0 &= ~(1ULL << (63-35)); > + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); Dito. > +} > + > #endif > diff --git a/s390x/Makefile b/s390x/Makefile > index b48f8ab..a61e163 100644 > --- a/s390x/Makefile > +++ b/s390x/Makefile > @@ -1,4 +1,5 @@ > tests = $(TEST_DIR)/selftest.elf > +tests += $(TEST_DIR)/intercept.elf > > all: directories test_cases > > diff --git a/s390x/intercept.c b/s390x/intercept.c > new file mode 100644 > index 0000000..639cb72 > --- /dev/null > +++ b/s390x/intercept.c > @@ -0,0 +1,227 @@ > +/* > + * Interception tests - for s390x CPU instruction that cause a VM exit > + * > + * Copyright (c) 2017 Red Hat Inc > + * > + * Authors: > + * Thomas Huth <thuth@xxxxxxxxxx> > + * > + * This code is free software; you can redistribute it and/or modify it > + * under the terms of the GNU Library General Public License version 2. > + */ > +#include <libcflat.h> > +#include <asm/asm-offsets.h> > +#include <asm/interrupt.h> > +#include <asm/page.h> > + > +static uint8_t pagebuf[PAGE_SIZE * 2] __attribute__((aligned(PAGE_SIZE * 2))); > + > +static unsigned long nr_iterations; > +static unsigned long time_to_run; > + > +/* Test the STORE PREFIX instruction */ > +static void test_stpx(void) > +{ > + uint32_t old_prefix = -1U, tst_prefix = -1U; > + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; > + > + /* Can we successfully change the prefix? */ > + asm volatile ( > + " stpx %0\n" > + " spx %2\n" > + " stpx %1\n" > + " spx %0\n" > + : "+Q"(old_prefix), "+Q"(tst_prefix) > + : "Q"(new_prefix)); > + report("store prefix", old_prefix == 0 && tst_prefix == new_prefix); > + > + expect_pgm_int(); > + low_prot_enable(); > + asm volatile(" stpx 0(%0) " : : "r"(8)); > + low_prot_disable(); > + check_pgm_int_code(PGM_INT_CODE_PROTECTION); > + > + expect_pgm_int(); > + asm volatile(" stpx 0(%0) " : : "r"(1)); > + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); > + > + expect_pgm_int(); > + asm volatile(" stpx 0(%0) " : : "r"(-8)); > + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); > +} > + > +/* Test the SET PREFIX instruction */ > +static void test_spx(void) > +{ > + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; > + uint32_t old_prefix; > + > + memset(pagebuf, 0, PAGE_SIZE * 2); > + > + /* > + * Temporarily change the prefix page to our buffer, and store > + * some facility bits there ... at least some of them should be > + * set in our buffer afterwards. > + */ > + asm volatile ( > + " stpx %0\n" > + " spx %1\n" > + " stfl 0\n" > + " spx %0\n" > + : "+Q"(old_prefix) > + : "Q"(new_prefix) > + : "memory"); > + report("stfl to new prefix", pagebuf[GEN_LC_STFL] != 0); > + > + expect_pgm_int(); > + asm volatile(" spx 0(%0) " : : "r"(1)); > + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); > + > + expect_pgm_int(); > + asm volatile(" spx 0(%0) " : : "r"(-8)); > + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); > +} > + > +/* Test the STORE CPU ADDRESS instruction */ > +static void test_stap(void) > +{ > + uint16_t cpuid = 0xffff; > + > + asm volatile ("stap %0\n" : "+Q"(cpuid)); > + report("get cpu id", cpuid != 0xffff); CPUID is returned by STIDP (STORE CPU ID). You most likely want to name this "get CPU address" / cpu_addr here. Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Just FYI, with my hacked up QEMU (tcg irq injection rework + some alignment checks) I get: PASS: intercept: stpx: store prefix FAIL: intercept: stpx: Program interrupt: expected(4) == received(0) PASS: intercept: stpx: Program interrupt: expected(6) == received(6) PASS: intercept: stpx: Program interrupt: expected(5) == received(5) PASS: intercept: spx: stfl to new prefix FAIL: intercept: spx: Program interrupt: expected(6) == received(0) PASS: intercept: spx: Program interrupt: expected(5) == received(5) PASS: intercept: stap: get cpu id FAIL: intercept: stap: Program interrupt: expected(4) == received(0) FAIL: intercept: stap: Program interrupt: expected(6) == received(0) PASS: intercept: stap: Program interrupt: expected(5) == received(5) PASS: intercept: testblock: page cleared PASS: intercept: testblock: Program interrupt: expected(4) == received(4) PASS: intercept: testblock: Program interrupt: expected(5) == received(5) SUMMARY: 14 tests, 4 unexpected failures EXIT: STATUS=3 Alignment checks for reads I haven't had a look at yet. Low address protection still is an issue. -- Thanks, David