When a guest tries to execute code from an invalid physical address KVM should inject an external abort. This test is based on a test originally posted by Alexandru Elisei. This version avoids hard coding the invalid physical address used. Cc: Alexandru Elisei <alexandru.elisei@xxxxxxx> Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> --- arm/selftest.c | 94 +++++++++++++++++++++++++++++++++++++++++++++ lib/arm64/asm/esr.h | 3 ++ 2 files changed, 97 insertions(+) diff --git a/arm/selftest.c b/arm/selftest.c index 6d74fa1fa4c4..4495b161cdd5 100644 --- a/arm/selftest.c +++ b/arm/selftest.c @@ -8,6 +8,7 @@ #include <libcflat.h> #include <util.h> #include <devicetree.h> +#include <vmalloc.h> #include <asm/setup.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> @@ -15,6 +16,7 @@ #include <asm/thread_info.h> #include <asm/psci.h> #include <asm/smp.h> +#include <asm/mmu.h> #include <asm/barrier.h> static cpumask_t ready, valid; @@ -65,9 +67,43 @@ static void check_setup(int argc, char **argv) report_abort("missing input"); } +unsigned long check_pabt_invalid_paddr; +static bool check_pabt_init(void) +{ + phys_addr_t highest_end = 0; + unsigned long vaddr; + struct mem_region *r; + + /* + * We need a physical address that isn't backed by anything. Without + * fully parsing the device tree there's no way to be certain of any + * address, but an unknown address immediately following the highest + * memory region has a reasonable chance. This is because we can + * assume that that memory region could have been larger, if the user + * had configured more RAM, and therefore no MMIO region should be + * there. + */ + for (r = mem_regions; r->end; ++r) { + if (r->flags & MR_F_IO) + continue; + if (r->end > highest_end) + highest_end = PAGE_ALIGN(r->end); + } + + if (mem_region_get_flags(highest_end) != MR_F_UNKNOWN) + return false; + + vaddr = (unsigned long)vmap(highest_end, PAGE_SIZE); + mmu_clear_user(current_thread_info()->pgtable, vaddr); + check_pabt_invalid_paddr = vaddr; + + return true; +} + static struct pt_regs expected_regs; static bool und_works; static bool svc_works; +static bool pabt_works; #if defined(__arm__) /* * Capture the current register state and execute an instruction @@ -166,6 +202,30 @@ static bool check_svc(void) return svc_works; } +static void pabt_handler(struct pt_regs *regs) +{ + expected_regs.ARM_lr = expected_regs.ARM_pc; + expected_regs.ARM_pc = expected_regs.ARM_r9; + + pabt_works = check_regs(regs); + + regs->ARM_pc = regs->ARM_lr; +} + +static bool check_pabt(void) +{ + install_exception_handler(EXCPTN_PABT, pabt_handler); + + test_exception("ldr r9, =check_pabt_invalid_paddr\n" + "ldr r9, [r9]\n", + "blx r9\n", + "", "r9", "lr"); + + install_exception_handler(EXCPTN_PABT, NULL); + + return pabt_works; +} + static void user_psci_system_off(struct pt_regs *regs) { __user_psci_system_off(); @@ -285,6 +345,35 @@ static bool check_svc(void) return svc_works; } +static void pabt_handler(struct pt_regs *regs, unsigned int esr) +{ + bool is_extabt = (esr & ESR_EL1_FSC_MASK) == ESR_EL1_FSC_EXTABT; + + expected_regs.regs[30] = expected_regs.pc + 4; + expected_regs.pc = expected_regs.regs[9]; + + pabt_works = check_regs(regs) && is_extabt; + + regs->pc = regs->regs[30]; +} + +static bool check_pabt(void) +{ + enum vector v = check_vector_prep(); + + install_exception_handler(v, ESR_EL1_EC_IABT_EL1, pabt_handler); + + test_exception("adrp x9, check_pabt_invalid_paddr\n" + "add x9, x9, :lo12:check_pabt_invalid_paddr\n" + "ldr x9, [x9]\n", + "blr x9\n", + "", "x9", "x30"); + + install_exception_handler(v, ESR_EL1_EC_IABT_EL1, NULL); + + return pabt_works; +} + static void user_psci_system_off(struct pt_regs *regs, unsigned int esr) { __user_psci_system_off(); @@ -302,6 +391,11 @@ static void check_vectors(void *arg __unused) install_exception_handler(EL0_SYNC_64, ESR_EL1_EC_UNKNOWN, user_psci_system_off); #endif + } else { + if (!check_pabt_init()) + report_skip("Couldn't guess an invalid physical address"); + else + report(check_pabt(), "pabt"); } exit(report_summary()); } diff --git a/lib/arm64/asm/esr.h b/lib/arm64/asm/esr.h index 8e5af4d90767..8c351631b0a0 100644 --- a/lib/arm64/asm/esr.h +++ b/lib/arm64/asm/esr.h @@ -44,4 +44,7 @@ #define ESR_EL1_EC_BKPT32 (0x38) #define ESR_EL1_EC_BRK64 (0x3C) +#define ESR_EL1_FSC_MASK (0x3F) +#define ESR_EL1_FSC_EXTABT (0x10) + #endif /* _ASMARM64_ESR_H_ */ -- 2.21.1