Re: [PATCH v3 5/5] KVM: arm64: selftests: Test for setting ID register from usersapce

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

On 10/11/23 21:57, Oliver Upton wrote:
> From: Jing Zhang <jingzhangos@xxxxxxxxxx>

nit: typo in the title
> 
> Add tests to verify setting ID registers from userspace is handled
> correctly by KVM. Also add a test case to use ioctl
> KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.
> 
> Signed-off-by: Jing Zhang <jingzhangos@xxxxxxxxxx>
> Signed-off-by: Oliver Upton <oliver.upton@xxxxxxxxx>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
>  2 files changed, 480 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c
> 
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 07b3f4dc1a77..4f4f6ad025f4 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -156,6 +156,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
>  TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
>  TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
>  TEST_GEN_PROGS_aarch64 += aarch64/psci_test
> +TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
>  TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
>  TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
>  TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
> diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> new file mode 100644
> index 000000000000..5c0718fd1705
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> @@ -0,0 +1,479 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * set_id_regs - Test for setting ID register from usersapce.
> + *
> + * Copyright (c) 2023 Google LLC.
> + *
> + *
> + * Test that KVM supports setting ID registers from userspace and handles the
> + * feature set correctly.
> + */
> +
> +#include <stdint.h>
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "test_util.h"
> +#include <linux/bitfield.h>
> +
> +enum ftr_type {
> +	FTR_EXACT,			/* Use a predefined safe value */
practically FTR_EXACT is not used in this patch. Is it worth to keep?
Same question for the associated logic in get_safe/invalid_value
> +	FTR_LOWER_SAFE,			/* Smaller value is safe */
> +	FTR_HIGHER_SAFE,		/* Bigger value is safe */
> +	FTR_HIGHER_OR_ZERO_SAFE,	/* Bigger value is safe, but 0 is biggest */
> +	FTR_END,			/* Mark the last ftr bits */
> +};
> +
> +#define FTR_SIGNED	true	/* Value should be treated as signed */
> +#define FTR_UNSIGNED	false	/* Value should be treated as unsigned */
> +
> +struct reg_ftr_bits {
> +	char *name;
> +	bool sign;
> +	enum ftr_type type;
> +	uint8_t shift;
> +	uint64_t mask;
> +	int64_t safe_val;
> +};
> +
> +struct test_feature_reg {
> +	uint32_t reg;
> +	const struct reg_ftr_bits *ftr_bits;
> +};
> +
> +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)	\
> +	{								\
> +		.name = #NAME,						\
> +		.sign = SIGNED,						\
> +		.type = TYPE,						\
> +		.shift = SHIFT,						\
> +		.mask = MASK,						\
> +		.safe_val = SAFE_VAL,					\
> +	}
> +
> +#define REG_FTR_BITS(type, reg, field, safe_val) \
> +	__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
> +		       reg##_##field##_MASK, safe_val)
> +
> +#define S_REG_FTR_BITS(type, reg, field, safe_val) \
> +	__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
> +		       reg##_##field##_MASK, safe_val)
> +
> +#define REG_FTR_END					\
> +	{						\
> +		.type = FTR_END,			\
> +	}
> +
> +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
Strictly speaking this is not always safe to have a lower value. For
instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
is not permitted. But I guess this consistency is to be taken into
account by the user space. But may be wort a comment. Here and below

You may at least clarify what does mean 'safe'
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
> +	REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
> +	REG_FTR_END,
> +};
> +
> +#define TEST_REG(id, table)			\
> +	{					\
> +		.reg = id,			\
> +		.ftr_bits = &((table)[0]),	\
> +	}
> +
> +static struct test_feature_reg test_regs[] = {
> +	TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
> +	TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
> +	TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
> +	TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
> +	TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
> +	TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
> +	TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
> +	TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
> +	TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
> +	TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
> +};
> +
> +#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
> +
> +static void guest_code(void)
> +{
> +	GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
> +
> +	GUEST_DONE();
> +}
> +
> +/* Return a safe value to a given ftr_bits an ftr value */
and ftr value
> +uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
> +{
> +	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
> +
> +	if (ftr_bits->type == FTR_UNSIGNED) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = ftr_bits->safe_val;
> +			break;
> +		case FTR_LOWER_SAFE:
> +			if (ftr > 0)
> +				ftr--;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			if (ftr < ftr_max)
> +				ftr++;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == ftr_max)
> +				ftr = 0;
> +			else if (ftr != 0)
> +				ftr++;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else if (ftr != ftr_max) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = ftr_bits->safe_val;
> +			break;
> +		case FTR_LOWER_SAFE:
> +			if (ftr > 0)
> +				ftr--;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			if (ftr < ftr_max - 1)
> +				ftr++;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr != 0 && ftr != ftr_max - 1)
> +				ftr++;
> +			break;
> +		default:
> +			break;
> +		}
> +	}
> +
> +	return ftr;
in some cases we are going to return the same value as ftr and the test
won't do much. Shouldn't we return an error value in that case and skip
the test instead?
> +}
> +
> +/* Return an invalid value to a given ftr_bits an ftr value */
> +uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
> +{
> +	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
> +
> +	if (ftr_bits->type == FTR_UNSIGNED) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
> +			break;
> +		case FTR_LOWER_SAFE:
> +			ftr++;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			ftr--;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == 0)
> +				ftr = ftr_max;
isn't it invalid as ftr_max >= ftr?
> +			else
> +				ftr--;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else if (ftr != ftr_max) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
> +			break;
> +		case FTR_LOWER_SAFE:
> +			ftr++;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			ftr--;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == 0)
> +				ftr = ftr_max - 1;
> +			else
> +				ftr--;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else {
> +		ftr = 0;
> +	}
> +
> +	return ftr;
> +}
> +
> +static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
> +				 const struct reg_ftr_bits *ftr_bits)
> +{
> +	uint8_t shift = ftr_bits->shift;
> +	uint64_t mask = ftr_bits->mask;
> +	uint64_t val, new_val, ftr;
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	ftr = (val & mask) >> shift;
> +
> +	ftr = get_safe_value(ftr_bits, ftr);
> +
> +	ftr <<= shift;
> +	val &= ~mask;
> +	val |= ftr;
> +
> +	vcpu_set_reg(vcpu, reg, val);
> +	vcpu_get_reg(vcpu, reg, &new_val);
> +	TEST_ASSERT_EQ(new_val, val);
> +}
> +
> +static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
> +			      const struct reg_ftr_bits *ftr_bits)
> +{
> +	uint8_t shift = ftr_bits->shift;
> +	uint64_t mask = ftr_bits->mask;
> +	uint64_t val, old_val, ftr;
> +	int r;
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	ftr = (val & mask) >> shift;
> +
> +	ftr = get_invalid_value(ftr_bits, ftr);
> +
> +	old_val = val;
> +	ftr <<= shift;
> +	val &= ~mask;
> +	val |= ftr;
> +
> +	r = __vcpu_set_reg(vcpu, reg, val);
> +	TEST_ASSERT(r < 0 && errno == EINVAL,
> +		    "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	TEST_ASSERT_EQ(val, old_val);
> +}
> +
> +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
> +{
> +	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
> +	struct reg_mask_range range = {
> +		.addr = (__u64)masks,
> +	};
> +	int ret;
> +
> +	/* KVM should return error when reserved field is not zero */
> +	range.reserved[0] = 1;
> +	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> +	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
> +
> +	/* Get writable masks for feature ID registers */
> +	memset(range.reserved, 0, sizeof(range.reserved));
> +	vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> +
> +	for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
> +		const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
> +		uint32_t reg_id = test_regs[i].reg;
> +		uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
> +		int idx;
> +
> +		/* Get the index to masks array for the idreg */
> +		idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id),
> +						   sys_reg_CRn(reg_id), sys_reg_CRm(reg_id),
> +						   sys_reg_Op2(reg_id));
> +
> +		for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
> +			/* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
> +			if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
> +				ksft_test_result_skip("%s on AARCH64 only system\n",
> +						      ftr_bits[j].name);
> +				continue;
> +			}
> +
> +			/* Make sure the feature field is writable */
> +			TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
> +
> +			test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
> +			test_reg_set_success(vcpu, reg, &ftr_bits[j]);
> +
> +			ksft_test_result_pass("%s\n", ftr_bits[j].name);
> +		}
> +	}
> +}
> +
> +static void test_guest_reg_read(struct kvm_vcpu *vcpu)
> +{
> +	bool done = false;
> +	struct ucall uc;
> +	uint64_t val;
> +
> +	while (!done) {
> +		vcpu_run(vcpu);
> +
> +		switch (get_ucall(vcpu, &uc)) {
> +		case UCALL_ABORT:
> +			REPORT_GUEST_ASSERT(uc);
> +			break;
> +		case UCALL_SYNC:
> +			/* Make sure the written values are seen by guest */
> +			vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val);
> +			TEST_ASSERT_EQ(val, uc.args[3]);
> +			break;
> +		case UCALL_DONE:
> +			done = true;
> +			break;
> +		default:
> +			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
> +		}
> +	}
> +}
> +
> +int main(void)
> +{
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_vm *vm;
> +	bool aarch64_only;
> +	uint64_t val, el0;
> +	int ftr_cnt;
> +
> +	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
> +
> +	/* Check for AARCH64 only system */
> +	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
> +	el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
> +	aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
> +
> +	ksft_print_header();
> +
> +	ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
> +		  ARRAY_SIZE(test_regs);
> +
> +	ksft_set_plan(ftr_cnt);
> +
> +	test_user_set_reg(vcpu, aarch64_only);
> +	test_guest_reg_read(vcpu);
> +
> +	kvm_vm_free(vm);
> +
> +	ksft_finished();
> +}

Thanks

Eric




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux