Re: [PATCH 2/2] s390x: add test for SIGP STORE_ADTL_STATUS order

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 3/28/22 11:30, Nico Boehr wrote:
Add a test for SIGP STORE_ADDITIONAL_STATUS order.

There are several cases to cover:
- when neither vector nor guarded-storage facility is available, check
   the order is rejected.
- when one of the facilities is there, test the order is rejected and
   adtl_status is not touched when the target CPU is running or when an
   invalid CPU address is specified. Also check the order is rejected
   in case of invalid alignment.
- when the vector facility is there, write some data to the CPU's
   vector registers and check we get the right contents.
- when the guarded-storage facility is there, populate the CPU's
   guarded-storage registers with some data and again check we get the
   right contents.

To make sure we cover all these cases, adjust unittests.cfg to run the
test with both guarded-storage and vector facility off and on. In TCG, we don't
have guarded-storage support, so we just run with vector facility off and on.

Signed-off-by: Nico Boehr <nrb@xxxxxxxxxxxxx>
---
  s390x/Makefile      |   1 +
  s390x/adtl_status.c | 407 ++++++++++++++++++++++++++++++++++++++++++++
  s390x/unittests.cfg |  25 +++
  3 files changed, 433 insertions(+)
  create mode 100644 s390x/adtl_status.c

diff --git a/s390x/Makefile b/s390x/Makefile
index 53b0fe044fe7..47e915fbdc51 100644
--- a/s390x/Makefile
+++ b/s390x/Makefile
@@ -26,6 +26,7 @@ tests += $(TEST_DIR)/edat.elf
  tests += $(TEST_DIR)/mvpg-sie.elf
  tests += $(TEST_DIR)/spec_ex-sie.elf
  tests += $(TEST_DIR)/firq.elf
+tests += $(TEST_DIR)/adtl_status.elf
pv-tests += $(TEST_DIR)/pv-diags.elf diff --git a/s390x/adtl_status.c b/s390x/adtl_status.c
new file mode 100644
index 000000000000..7a2bd2b07804
--- /dev/null
+++ b/s390x/adtl_status.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tests sigp store additional status order
+ *
+ * Copyright IBM Corp. 2022
+ *
+ * Authors:
+ *    Nico Boehr <nrb@xxxxxxxxxxxxx>
+ */
+#include <libcflat.h>
+#include <asm/asm-offsets.h>
+#include <asm/interrupt.h>
+#include <asm/page.h>
+#include <asm/facility.h>
+#include <asm-generic/barrier.h>
+#include <asm/sigp.h>
+
+#include <smp.h>
+#include <gs.h>
+#include <alloc_page.h>
+
+static int testflag = 0;
+
+#define INVALID_CPU_ADDRESS -4711
+
+struct mcesa_lc12 {
+	uint8_t vector_reg[0x200];            /* 0x000 */

Hrm we could do:
__uint128_t vregs[32];

or:
uint64_t vregs[16][2];

or leave it as it is.

+	uint8_t reserved200[0x400 - 0x200];   /* 0x200 */
+	struct gs_cb gs_cb;                   /* 0x400 */
+	uint8_t reserved420[0x800 - 0x420];   /* 0x420 */
+	uint8_t reserved800[0x1000 - 0x800];  /* 0x800 */
+};

Do we have plans to use this struct in the future for other tests?

+
+static struct mcesa_lc12 adtl_status __attribute__((aligned(4096)));
+
+#define NUM_VEC_REGISTERS 32
+#define VEC_REGISTER_SIZE 16

I'd shove that into lib/s390x/asm/float.h or create a vector.h as
#define VEC_REGISTERS_NUM 32
#define VEC_REGISTERS_SIZE 16

Most likely vector.h since we can do both int and float with vector regs.

+static uint8_t expected_vec_contents[NUM_VEC_REGISTERS][VEC_REGISTER_SIZE];
+
+static struct gs_cb gs_cb;
+static struct gs_epl gs_epl;
+
+static bool memisset(void *s, int c, size_t n)
+{
+	uint8_t *p = s;
+	size_t i;
+
+	for (i = 0; i < n; i++) {
+		if (p[i] != c) {
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void wait_for_flag(void)
+{
+	while (!testflag)
+		mb();
+}
+
+static void set_flag(int val)
+{
+	mb();
+	testflag = val;
+	mb();
+}
+
+static void test_func(void)
+{
+	set_flag(1);
+}
+
+static int have_adtl_status(void)

bool

+{
+	return test_facility(133) || test_facility(129);
+}
+
+static void test_store_adtl_status(void)
+{
+	uint32_t status = -1;
+	int cc;
+
+	report_prefix_push("store additional status");
+
+	if (!have_adtl_status()) {
+		report_skip("no guarded-storage or vector facility installed");
+		goto out;
+	}
+
+	memset(&adtl_status, 0xff, sizeof(adtl_status));
+
+	report_prefix_push("running");
+	smp_cpu_restart(1);
+
+	cc = smp_sigp(1, SIGP_STORE_ADDITIONAL_STATUS,
+		  (unsigned long)&adtl_status, &status);
+
+	report(cc == 1, "CC = 1");
+	report(status == SIGP_STATUS_INCORRECT_STATE, "status = INCORRECT_STATE");
+	report(memisset(&adtl_status, 0xff, sizeof(adtl_status)),
+	       "additional status not touched");
+
+	report_prefix_pop();
+
+	report_prefix_push("invalid CPU address");
+
+	cc = sigp(INVALID_CPU_ADDRESS, SIGP_STORE_ADDITIONAL_STATUS,
+		  (unsigned long)&adtl_status, &status);
+	report(cc == 3, "CC = 3");
+	report(memisset(&adtl_status, 0xff, sizeof(adtl_status)),
+	       "additional status not touched");
+
+	report_prefix_pop();
+
+	report_prefix_push("unaligned");
+	smp_cpu_stop(1);
+
+	cc = smp_sigp(1, SIGP_STORE_ADDITIONAL_STATUS,
+		  (unsigned long)&adtl_status + 256, &status);
+	report(cc == 1, "CC = 1");
+	report(status == SIGP_STATUS_INVALID_PARAMETER, "status = INVALID_PARAMETER");
+	report(memisset(&adtl_status, 0xff, sizeof(adtl_status)),
+	       "additional status not touched");
+
+	report_prefix_pop();
+
+out:
+	report_prefix_pop();
+}
+
+static void test_store_adtl_status_unavail(void)
+{
+	uint32_t status = 0;
+	int cc;
+
+	report_prefix_push("store additional status unvailable");

unavailable

+
+	if (have_adtl_status()) {
+		report_skip("guarded-storage or vector facility installed");
+		goto out;
+	}
+
+	report_prefix_push("not accepted");
+	smp_cpu_stop(1);
+
+	memset(&adtl_status, 0xff, sizeof(adtl_status));
+
+	cc = smp_sigp(1, SIGP_STORE_ADDITIONAL_STATUS,
+		  (unsigned long)&adtl_status, &status);
+
+	report(cc == 1, "CC = 1");
+	report(status == SIGP_STATUS_INVALID_ORDER,
+	       "status = INVALID_ORDER");
+	report(memisset(&adtl_status, 0xff, sizeof(adtl_status)),
+	       "additional status not touched");
+
+	report_prefix_pop();
+
+out:
+	report_prefix_pop();
+}
+
+static void restart_write_vector(void)
+{
+	uint8_t *vec_reg;
+	/* vlm handles at most 16 registers at a time */
+	uint8_t *vec_reg_16_31 = &expected_vec_contents[16][0];
+	int i;
+
+	for (i = 0; i < NUM_VEC_REGISTERS; i++) {
+		vec_reg = &expected_vec_contents[i][0];
+		/* i+1 to avoid zero content */
+		memset(vec_reg, i + 1, VEC_REGISTER_SIZE);
+	}
+
+	ctl_set_bit(0, CTL0_VECTOR);
+
+	asm volatile (
+		"	.machine z13\n"
+		"	vlm 0,15, %[vec_reg_0_15]\n"
+		"	vlm 16,31, %[vec_reg_16_31]\n"
+		:
+		: [vec_reg_0_15] "Q"(expected_vec_contents),
+		  [vec_reg_16_31] "Q"(*vec_reg_16_31)
+		: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
+		  "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
+		  "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27",
+		  "v28", "v29", "v30", "v31", "memory"

We change memory on a load?

+	);

We could also move vlm as a function to vector.h and do two calls.

[...]
diff --git a/s390x/unittests.cfg b/s390x/unittests.cfg
index 1600e714c8b9..2e65106fa140 100644
--- a/s390x/unittests.cfg
+++ b/s390x/unittests.cfg
@@ -78,6 +78,31 @@ extra_params=-name kvm-unit-test --uuid 0fb84a86-727c-11ea-bc55-0242ac130003 -sm
  file = smp.elf
  smp = 2
+[adtl_status-kvm]

Hmmmmm (TM) I don't really want to mix - and _.
Having spec_ex-sie.c is already bad enough.

+file = adtl_status.elf
+smp = 2
+accel = kvm
+extra_params = -cpu host,gs=on,vx=on
+
+[adtl_status-no-vec-no-gs-kvm]
+file = adtl_status.elf
+smp = 2
+accel = kvm
+extra_params = -cpu host,gs=off,vx=off
+
+[adtl_status-tcg]
+file = adtl_status.elf
+smp = 2
+accel = tcg
+# no guarded-storage support in tcg
+extra_params = -cpu qemu,vx=on
+
+[adtl_status-no-vec-no-gs-tcg]
+file = adtl_status.elf
+smp = 2
+accel = tcg
+extra_params = -cpu qemu,gs=off,vx=off
+

Are you trying to sort this in any way?
Normally we put new entries at the EOF.

  [sclp-1g]
  file = sclp.elf
  extra_params = -m 1G




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux