On 8/26/22 18:11, Janis Schoetterl-Glausch wrote:
Generate specification exceptions and check that they occur.
Signed-off-by: Janis Schoetterl-Glausch <scgl@xxxxxxxxxxxxx>
Minor issues below, apart from that:
Reviewed-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
---
s390x/Makefile | 1 +
lib/s390x/asm/arch_def.h | 5 +
s390x/spec_ex.c | 194 +++++++++++++++++++++++++++++++++++++++
s390x/unittests.cfg | 3 +
4 files changed, 203 insertions(+)
create mode 100644 s390x/spec_ex.c
diff --git a/s390x/Makefile b/s390x/Makefile
index efd5e0c1..58b1bf54 100644
--- a/s390x/Makefile
+++ b/s390x/Makefile
@@ -27,6 +27,7 @@ tests += $(TEST_DIR)/uv-host.elf
tests += $(TEST_DIR)/edat.elf
tests += $(TEST_DIR)/mvpg-sie.elf
tests += $(TEST_DIR)/spec_ex-sie.elf
+tests += $(TEST_DIR)/spec_ex.elf
tests += $(TEST_DIR)/firq.elf
tests += $(TEST_DIR)/epsw.elf
tests += $(TEST_DIR)/adtl-status.elf
diff --git a/lib/s390x/asm/arch_def.h b/lib/s390x/asm/arch_def.h
index e7ae454b..b6e60fb0 100644
--- a/lib/s390x/asm/arch_def.h
+++ b/lib/s390x/asm/arch_def.h
@@ -41,6 +41,11 @@ struct psw {
uint64_t addr;
};
+struct short_psw {
+ uint32_t mask;
+ uint32_t addr;
+};
+
struct cpu {
struct lowcore *lowcore;
uint64_t *stack;
diff --git a/s390x/spec_ex.c b/s390x/spec_ex.c
new file mode 100644
index 00000000..68469e4b
--- /dev/null
+++ b/s390x/spec_ex.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright IBM Corp. 2021, 2022
+ *
+ * Specification exception test.
+ * Tests that specification exceptions occur when expected.
+ *
+ * Can be extended by adding triggers to spec_ex_triggers, see comments below.
+ */
+#include <stdlib.h>
+#include <libcflat.h>
+#include <bitops.h>
+#include <asm/interrupt.h>
+
+/* toggled to signal occurrence of invalid psw fixup */
+static bool invalid_psw_expected;
+static struct psw expected_psw;
+static struct psw invalid_psw;
+static struct psw fixup_psw;
+
+/*
+ * The standard program exception handler cannot deal with invalid old PSWs,
+ * especially not invalid instruction addresses, as in that case one cannot
+ * find the instruction following the faulting one from the old PSW.
+ * The PSW to return to is set by load_psw.
+ */
+static void fixup_invalid_psw(struct stack_frame_int *stack)
+{
+ /* signal occurrence of invalid psw fixup */
+ invalid_psw_expected = false;
Hmmmm (TM), assert(invalid_psw_expected) ?
+ invalid_psw = lowcore.pgm_old_psw;
+ lowcore.pgm_old_psw = fixup_psw;
+}
+
+/*
+ * Load possibly invalid psw, but setup fixup_psw before,
+ * so that fixup_invalid_psw() can bring us back onto the right track.
+ * Also acts as compiler barrier, -> none required in expect/check_invalid_psw
+ */
+static void load_psw(struct psw psw)
+{
+ uint64_t scratch;
+
+ /*
+ * The fixup psw is current psw with the instruction address replaced by
is the current psw
+ * the address of the nop following the instruction loading the new psw. > + */
+ fixup_psw.mask = extract_psw_mask();
+ asm volatile ( "larl %[scratch],0f\n"
+ " stg %[scratch],%[fixup_addr]\n"
+ " lpswe %[psw]\n"
+ "0: nop\n"
+ : [scratch] "=&d" (scratch),
+ [fixup_addr] "=&T" (fixup_psw.addr)
+ : [psw] "Q" (psw)
+ : "cc", "memory"
+ );
+}
+
+static void load_short_psw(struct short_psw psw)
+{
+ uint64_t scratch;
+
+ fixup_psw.mask = extract_psw_mask();
+ asm volatile ( "larl %[scratch],0f\n"
+ " stg %[scratch],%[fixup_addr]\n"
+ " lpsw %[psw]\n"
+ "0: nop\n"
+ : [scratch] "=&d" (scratch),
+ [fixup_addr] "=&T" (fixup_psw.addr)
+ : [psw] "Q" (psw)
+ : "cc", "memory"
+ );
+}
+
+static void expect_invalid_psw(struct psw psw)
+{
+ expected_psw = psw;
+ invalid_psw_expected = true;
+}
+
+static int check_invalid_psw(void)
+{
/* Since the fixup sets this to false we check for false here. */
+ if (!invalid_psw_expected) {
+ if (expected_psw.mask == invalid_psw.mask &&
+ expected_psw.addr == invalid_psw.addr)
+ return 0;
+ report_fail("Wrong invalid PSW");
+ } else {
+ report_fail("Expected exception due to invalid PSW");
+ }
+ return 1;
+}
+