Let's also check the PEI values to make sure our VSIE implementation is correct. Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx> --- s390x/Makefile | 3 +- s390x/mvpg-sie.c | 139 ++++++++++++++++++++++++++++++++ s390x/snippets/c/mvpg-snippet.c | 33 ++++++++ s390x/unittests.cfg | 3 + 4 files changed, 177 insertions(+), 1 deletion(-) create mode 100644 s390x/mvpg-sie.c create mode 100644 s390x/snippets/c/mvpg-snippet.c diff --git a/s390x/Makefile b/s390x/Makefile index fe267011..6692cf73 100644 --- a/s390x/Makefile +++ b/s390x/Makefile @@ -22,6 +22,7 @@ tests += $(TEST_DIR)/uv-guest.elf tests += $(TEST_DIR)/sie.elf tests += $(TEST_DIR)/mvpg.elf tests += $(TEST_DIR)/uv-host.elf +tests += $(TEST_DIR)/mvpg-sie.elf tests_binary = $(patsubst %.elf,%.bin,$(tests)) ifneq ($(HOST_KEY_DOCUMENT),) @@ -79,7 +80,7 @@ FLATLIBS = $(libcflat) SNIPPET_DIR = $(TEST_DIR)/snippets # C snippets that need to be linked -snippets-c = +snippets-c = $(SNIPPET_DIR)/c/mvpg-snippet.gbin # ASM snippets that are directly compiled and converted to a *.gbin snippets-a = diff --git a/s390x/mvpg-sie.c b/s390x/mvpg-sie.c new file mode 100644 index 00000000..a617704b --- /dev/null +++ b/s390x/mvpg-sie.c @@ -0,0 +1,139 @@ +#include <libcflat.h> +#include <asm/asm-offsets.h> +#include <asm-generic/barrier.h> +#include <asm/interrupt.h> +#include <asm/pgtable.h> +#include <mmu.h> +#include <asm/page.h> +#include <asm/facility.h> +#include <asm/mem.h> +#include <asm/sigp.h> +#include <smp.h> +#include <alloc_page.h> +#include <bitops.h> +#include <vm.h> +#include <sclp.h> +#include <sie.h> + +static u8 *guest; +static u8 *guest_instr; +static struct vm vm; + +static uint8_t *src; +static uint8_t *dst; + +extern const char _binary_s390x_snippets_c_mvpg_snippet_gbin_start[]; +extern const char _binary_s390x_snippets_c_mvpg_snippet_gbin_end[]; +int binary_size; + +static void handle_validity(struct vm *vm) +{ + report(0, "VALIDITY: %x", vm->sblk->ipb >> 16); +} + +static void sie(struct vm *vm) +{ + /* Reset icptcode so we don't trip below */ + vm->sblk->icptcode = 0; + + while (vm->sblk->icptcode == 0) { + sie64a(vm->sblk, &vm->save_area); + if (vm->sblk->icptcode == ICPT_VALIDITY) + handle_validity(vm); + } + vm->save_area.guest.grs[14] = vm->sblk->gg14; + vm->save_area.guest.grs[15] = vm->sblk->gg15; +} + +static void test_mvpg_pei(void) +{ + uint64_t **pei_dst = (uint64_t **)((uintptr_t) vm.sblk + 0xc0); + uint64_t **pei_src = (uint64_t **)((uintptr_t) vm.sblk + 0xc8); + + report_prefix_push("pei"); + protect_page(guest + 0x6000, PAGE_ENTRY_I); + sie(&vm); + report(vm.sblk->icptcode == ICPT_PARTEXEC, "Partial execution"); + report((uintptr_t)**pei_src == ((uintptr_t)vm.sblk->mso) + 0x6000 + PAGE_ENTRY_I, "PEI_SRC correct"); + report((uintptr_t)**pei_dst == vm.sblk->mso + 0x5000, "PEI_DST correct"); + /* Jump over the diag44 */ + sie(&vm); + + /* Clear PEI data for next check */ + memset((uint64_t *)((uintptr_t) vm.sblk + 0xc0), 0, 16); + unprotect_page(guest + 0x6000, PAGE_ENTRY_I); + protect_page(guest + 0x5000, PAGE_ENTRY_I); + sie(&vm); + report(vm.sblk->icptcode == ICPT_PARTEXEC, "Partial execution"); + report((uintptr_t)**pei_src == vm.sblk->mso + 0x6000, "PEI_SRC correct"); + report((uintptr_t)**pei_dst == vm.sblk->mso + 0x5000 + PAGE_ENTRY_I, "PEI_DST correct"); + + report_prefix_pop(); +} + +static void test_mvpg(void) +{ + int binary_size = ((uintptr_t)_binary_s390x_snippets_c_mvpg_snippet_gbin_end - + (uintptr_t)_binary_s390x_snippets_c_mvpg_snippet_gbin_start); + + memcpy(guest, _binary_s390x_snippets_c_mvpg_snippet_gbin_start, binary_size); + memset(src, 0x42, PAGE_SIZE); + memset(dst, 0x43, PAGE_SIZE); + sie(&vm); + mb(); + report(!memcmp(src, dst, PAGE_SIZE) && *dst == 0x42, "Page moved"); +} + +static void setup_guest(void) +{ + setup_vm(); + + /* Allocate 1MB as guest memory */ + guest = alloc_pages(8); + /* The first two pages are the lowcore */ + guest_instr = guest + PAGE_SIZE * 2; + + vm.sblk = alloc_page(); + + vm.sblk->cpuflags = CPUSTAT_ZARCH | CPUSTAT_RUNNING; + vm.sblk->prefix = 0; + /* + * Pageable guest with the same ASCE as the test programm, but + * the guest memory 0x0 is offset to start at the allocated + * guest pages and end after 1MB. + * + * It's not pretty but faster and easier than managing guest ASCEs. + */ + vm.sblk->mso = (u64)guest; + vm.sblk->msl = (u64)guest; + vm.sblk->ihcpu = 0xffff; + + vm.sblk->crycbd = (uint64_t)alloc_page(); + + vm.sblk->gpsw.addr = PAGE_SIZE * 4; + vm.sblk->gpsw.mask = 0x0000000180000000ULL; + vm.sblk->ictl = ICTL_OPEREXC | ICTL_PINT; + /* Enable MVPG interpretation as we want to test KVM and not ourselves */ + vm.sblk->eca = ECA_MVPGI; + + src = guest + PAGE_SIZE * 6; + dst = guest + PAGE_SIZE * 5; +} + +int main(void) +{ + report_prefix_push("mvpg-sie"); + if (!sclp_facilities.has_sief2) { + report_skip("SIEF2 facility unavailable"); + goto done; + } + + setup_guest(); + test_mvpg(); + test_mvpg_pei(); + +done: + report_prefix_pop(); + return report_summary(); + +} diff --git a/s390x/snippets/c/mvpg-snippet.c b/s390x/snippets/c/mvpg-snippet.c new file mode 100644 index 00000000..96b70c9c --- /dev/null +++ b/s390x/snippets/c/mvpg-snippet.c @@ -0,0 +1,33 @@ +#include <libcflat.h> + +static inline void force_exit(void) +{ + asm volatile(" diag 0,0,0x44\n"); +} + +static inline int mvpg(unsigned long r0, void *dest, void *src) +{ + register unsigned long reg0 asm ("0") = r0; + int cc; + + asm volatile(" mvpg %1,%2\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc) : "a" (dest), "a" (src), "d" (reg0) + : "memory", "cc"); + return cc; +} + +static void test_mvpg_real(void) +{ + mvpg(0, (void *)0x5000, (void *)0x6000); + force_exit(); +} + +__attribute__((section(".text"))) int main(void) +{ + test_mvpg_real(); + test_mvpg_real(); + test_mvpg_real(); + return 0; +} diff --git a/s390x/unittests.cfg b/s390x/unittests.cfg index 9f81a608..8634b1b1 100644 --- a/s390x/unittests.cfg +++ b/s390x/unittests.cfg @@ -103,3 +103,6 @@ file = sie.elf [mvpg] file = mvpg.elf timeout = 10 + +[mvpg-sie] +file = mvpg-sie.elf -- 2.30.2