Collaborative Memory Management is the extended vm memory usage hinting for the hypervisor and handling the ESSA instruction is quite complicated. Let's add at least a bare-bones test, maybe someone will extend it later. Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxxxxxxx> --- lib/s390x/asm/interrupt.h | 1 + lib/s390x/interrupt.c | 8 ++++++ s390x/Makefile | 1 + s390x/cmm.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++ s390x/unittests.cfg | 3 ++ 5 files changed, 84 insertions(+) create mode 100644 s390x/cmm.c diff --git a/lib/s390x/asm/interrupt.h b/lib/s390x/asm/interrupt.h index 3ccc8e3..3f8f2f7 100644 --- a/lib/s390x/asm/interrupt.h +++ b/lib/s390x/asm/interrupt.h @@ -15,6 +15,7 @@ void handle_pgm_int(void); void expect_pgm_int(void); uint16_t clear_pgm_int(void); void check_pgm_int_code(uint16_t code); +uint16_t get_pgm_int_code(void); /* Activate low-address protection */ static inline void low_prot_enable(void) diff --git a/lib/s390x/interrupt.c b/lib/s390x/interrupt.c index 56c7603..bfa21e8 100644 --- a/lib/s390x/interrupt.c +++ b/lib/s390x/interrupt.c @@ -41,6 +41,14 @@ void check_pgm_int_code(uint16_t code) code == lc->pgm_int_code, code, lc->pgm_int_code); } +uint16_t get_pgm_int_code(void) +{ + mb(); + if (!lc->pgm_int_code) + pgm_int_expected = false; + return lc->pgm_int_code; +} + static void fixup_pgm_int(void) { switch (lc->pgm_int_code) { diff --git a/s390x/Makefile b/s390x/Makefile index 438c6b2..e04cad0 100644 --- a/s390x/Makefile +++ b/s390x/Makefile @@ -6,6 +6,7 @@ tests += $(TEST_DIR)/sthyi.elf tests += $(TEST_DIR)/skey.elf tests += $(TEST_DIR)/diag10.elf tests += $(TEST_DIR)/pfmf.elf +tests += $(TEST_DIR)/cmm.elf all: directories test_cases diff --git a/s390x/cmm.c b/s390x/cmm.c new file mode 100644 index 0000000..817d84c --- /dev/null +++ b/s390x/cmm.c @@ -0,0 +1,71 @@ +/* + * CMM tests (ESSA) + * + * Copyright (c) 2018 IBM Corp + * + * Authors: + * Janosch Frank <frankja@xxxxxxxxxxxxxxxxxx> + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU Library General Public License version 2. + */ + +#include <libcflat.h> +#include <asm/asm-offsets.h> +#include <asm/interrupt.h> +#include <asm/page.h> + +static uint8_t pagebuf[PAGE_SIZE * 2] __attribute__((aligned(PAGE_SIZE * 2))); +const unsigned long page0 = (unsigned long)pagebuf; + +static unsigned long essa(uint8_t state, unsigned long paddr) +{ + register uint64_t addr asm("0") = paddr; + register uint64_t extr_state asm("1"); + + asm volatile(".insn rrf,0xb9ab0000,%[extr_state],%[addr],%[new_state],0" + : [extr_state] "=&d" (extr_state) + : [addr] "a" (addr), [new_state] "i" (state)); + return (unsigned long)extr_state; +} + +static void test_params(void) +{ + expect_pgm_int(); + essa(8, page0); + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); +} + +static void test_priv(void) +{ + expect_pgm_int(); + enter_pstate(); + essa(0, page0); + check_pgm_int_code(PGM_INT_CODE_PRIVILEGED_OPERATION); +} + +/* Unfortunately the availability is not indicated by stfl bits, but + * we have to try to execute it and test for an operation exception. + */ +static bool test_availability(void) +{ + expect_pgm_int(); + essa(0, page0); + return get_pgm_int_code() == 0; +} + +int main(void) +{ + bool has_essa = test_availability(); + + report_prefix_push("cmm"); + report_xfail("ESSA available", !has_essa, has_essa); + if (!has_essa) + goto done; + + test_priv(); + test_params(); +done: + report_prefix_pop(); + return report_summary(); +} diff --git a/s390x/unittests.cfg b/s390x/unittests.cfg index 3b975ca..367e970 100644 --- a/s390x/unittests.cfg +++ b/s390x/unittests.cfg @@ -47,3 +47,6 @@ file = diag10.elf [pfmf] file = pfmf.elf + +[cmm] +file = cmm.elf -- 2.7.4