[PATCH 09/15] arm64: vectors support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx>
---
 arm/cstart64.S            | 142 ++++++++++++++++++++++++++++++++++++++-
 arm/selftest.c            | 129 ++++++++++++++++++++++++++++++++---
 arm/unittests.cfg         |   2 -
 config/config-arm64.mak   |   1 +
 lib/arm64/asm-offsets.c   |  16 +++++
 lib/arm64/asm/esr.h       |  43 ++++++++++++
 lib/arm64/asm/processor.h |  52 ++++++++++++++
 lib/arm64/asm/ptrace.h    |  95 ++++++++++++++++++++++++++
 lib/arm64/processor.c     | 168 ++++++++++++++++++++++++++++++++++++++++++++++
 9 files changed, 637 insertions(+), 11 deletions(-)
 create mode 100644 lib/arm64/asm/esr.h
 create mode 100644 lib/arm64/asm/processor.h
 create mode 100644 lib/arm64/asm/ptrace.h
 create mode 100644 lib/arm64/processor.c

diff --git a/arm/cstart64.S b/arm/cstart64.S
index 1d98066d0e187..d1860a94fb2d3 100644
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -7,6 +7,7 @@
  */
 #define __ASSEMBLY__
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 .section .init
 
@@ -26,7 +27,7 @@ start:
 	msr	cpacr_el1, x0
 
 	/* set up exception handling */
-//	bl	exceptions_init
+	bl	exceptions_init
 
 	/* complete setup */
 	ldp	x0, x1, [sp], #16
@@ -40,9 +41,148 @@ start:
 	bl	exit
 	b	halt
 
+exceptions_init:
+	adr	x0, vector_table
+	msr	vbar_el1, x0
+	isb
+	ret
+
 .text
 
 .globl halt
 halt:
 1:	wfi
 	b	1b
+
+/*
+ * Vectors
+ * Adapted from arch/arm64/kernel/entry.S
+ */
+.macro vector_stub, name, vec
+\name:
+	stp	 x0,  x1, [sp, #-S_FRAME_SIZE]!
+	stp	 x2,  x3, [sp,  #16]
+	stp	 x4,  x5, [sp,  #32]
+	stp	 x6,  x7, [sp,  #48]
+	stp	 x8,  x9, [sp,  #64]
+	stp	x10, x11, [sp,  #80]
+	stp	x12, x13, [sp,  #96]
+	stp	x14, x15, [sp, #112]
+	stp	x16, x17, [sp, #128]
+	stp	x18, x19, [sp, #144]
+	stp	x20, x21, [sp, #160]
+	stp	x22, x23, [sp, #176]
+	stp	x24, x25, [sp, #192]
+	stp	x26, x27, [sp, #208]
+	stp	x28, x29, [sp, #224]
+
+	str	x30, [sp, #S_LR]
+
+	.if \vec >= 8
+	mrs	x1, sp_el0
+	.else
+	add	x1, sp, #S_FRAME_SIZE
+	.endif
+	str	x1, [sp, #S_SP]
+
+	mrs	x1, elr_el1
+	mrs	x2, spsr_el1
+	stp	x1, x2, [sp, #S_PC]
+
+	and	x2, x2, #PSR_MODE_MASK
+	cmp	x2, #PSR_MODE_EL0t
+	b.ne	1f
+	adr	x2, user_mode
+	str	xzr, [x2]		/* we're in kernel mode now */
+
+1:	mov	x0, \vec
+	mov	x1, sp
+	mrs	x2, esr_el1
+	bl	do_handle_exception
+
+	ldp	x1, x2, [sp, #S_PC]
+	msr	spsr_el1, x2
+	msr	elr_el1, x1
+
+	and	x2, x2, #PSR_MODE_MASK
+	cmp	x2, #PSR_MODE_EL0t
+	b.ne	1f
+	adr	x2, user_mode
+	mov	x1, #1
+	str	x1, [x2]		/* we're going back to user mode */
+
+1:
+	.if \vec >= 8
+	ldr	x1, [sp, #S_SP]
+	msr	sp_el0, x1
+	.endif
+
+	ldr	x30, [sp, #S_LR]
+
+	ldp	x28, x29, [sp, #224]
+	ldp	x26, x27, [sp, #208]
+	ldp	x24, x25, [sp, #192]
+	ldp	x22, x23, [sp, #176]
+	ldp	x20, x21, [sp, #160]
+	ldp	x18, x19, [sp, #144]
+	ldp	x16, x17, [sp, #128]
+	ldp	x14, x15, [sp, #112]
+	ldp	x12, x13, [sp,  #96]
+	ldp	x10, x11, [sp,  #80]
+	ldp	 x8,  x9, [sp,  #64]
+	ldp	 x6,  x7, [sp,  #48]
+	ldp	 x4,  x5, [sp,  #32]
+	ldp	 x2,  x3, [sp,  #16]
+	ldp	 x0,  x1, [sp], #S_FRAME_SIZE
+
+	eret
+.endm
+
+vector_stub	el1t_sync,     0
+vector_stub	el1t_irq,      1
+vector_stub	el1t_fiq,      2
+vector_stub	el1t_error,    3
+
+vector_stub	el1h_sync,     4
+vector_stub	el1h_irq,      5
+vector_stub	el1h_fiq,      6
+vector_stub	el1h_error,    7
+
+vector_stub	el0_sync_64,   8
+vector_stub	el0_irq_64,    9
+vector_stub	el0_fiq_64,   10
+vector_stub	el0_error_64, 11
+
+vector_stub	el0_sync_32,  12
+vector_stub	el0_irq_32,   13
+vector_stub	el0_fiq_32,   14
+vector_stub	el0_error_32, 15
+
+.section .text.ex
+
+.macro ventry, label
+.align 7
+	b	\label
+.endm
+
+.align 11
+vector_table:
+	ventry	el1t_sync			// Synchronous EL1t
+	ventry	el1t_irq			// IRQ EL1t
+	ventry	el1t_fiq			// FIQ EL1t
+	ventry	el1t_error			// Error EL1t
+
+	ventry	el1h_sync			// Synchronous EL1h
+	ventry	el1h_irq			// IRQ EL1h
+	ventry	el1h_fiq			// FIQ EL1h
+	ventry	el1h_error			// Error EL1h
+
+	ventry	el0_sync_64			// Synchronous 64-bit EL0
+	ventry	el0_irq_64			// IRQ 64-bit EL0
+	ventry	el0_fiq_64			// FIQ 64-bit EL0
+	ventry	el0_error_64			// Error 64-bit EL0
+
+	ventry	el0_sync_32			// Synchronous 32-bit EL0
+	ventry	el0_irq_32			// IRQ 32-bit EL0
+	ventry	el0_fiq_32			// FIQ 32-bit EL0
+	ventry	el0_error_32			// Error 32-bit EL0
diff --git a/arm/selftest.c b/arm/selftest.c
index 30f44261d47db..824af2f3c15af 100644
--- a/arm/selftest.c
+++ b/arm/selftest.c
@@ -8,12 +8,10 @@
 #include <libcflat.h>
 #include <alloc.h>
 #include <asm/setup.h>
-#ifdef __arm__
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#endif
 
 #define TESTGRP "selftest"
 
@@ -80,8 +78,10 @@ static void check_setup(int argc, char **argv)
 	assert_args(nr_tests, 2);
 }
 
-#ifdef __arm__
 static struct pt_regs expected_regs;
+static bool und_works;
+static bool svc_works;
+#if defined(__arm__)
 /*
  * Capture the current register state and execute an instruction
  * that causes an exception. The test handler will check that its
@@ -122,7 +122,6 @@ static bool check_regs(struct pt_regs *regs)
 	return true;
 }
 
-static bool und_works;
 static void und_handler(struct pt_regs *regs)
 {
 	und_works = check_regs(regs);
@@ -140,7 +139,6 @@ static bool check_und(void)
 	return und_works;
 }
 
-static bool svc_works;
 static void svc_handler(struct pt_regs *regs)
 {
 	u32 svc = *(u32 *)(regs->ARM_pc - 4) & 0xffffff;
@@ -181,13 +179,130 @@ static bool check_svc(void)
 
 	return svc_works;
 }
+#elif defined(__aarch64__)
+#include <asm/esr.h>
+
+/*
+ * Capture the current register state and execute an instruction
+ * that causes an exception. The test handler will check that its
+ * capture of the current register state matches the capture done
+ * here.
+ *
+ * NOTE: update clobber list if passed insns needs more than x0,x1
+ */
+#define test_exception(pre_insns, excptn_insn, post_insns)	\
+	asm volatile(						\
+		pre_insns "\n"					\
+		"mov	x1, %0\n"				\
+		"ldr	x0, [x1, #" xstr(S_PSTATE) "]\n"	\
+		"mrs	x1, nzcv\n"				\
+		"orr	w0, w0, w1\n"				\
+		"mov	x1, %0\n"				\
+		"str	w0, [x1, #" xstr(S_PSTATE) "]\n"	\
+		"mov	x0, sp\n"				\
+		"str	x0, [x1, #" xstr(S_SP) "]\n"		\
+		"adr	x0, 1f\n"				\
+		"str	x0, [x1, #" xstr(S_PC) "]\n"		\
+		"stp	 x2,  x3, [x1,  #16]\n"			\
+		"stp	 x4,  x5, [x1,  #32]\n"			\
+		"stp	 x6,  x7, [x1,  #48]\n"			\
+		"stp	 x8,  x9, [x1,  #64]\n"			\
+		"stp	x10, x11, [x1,  #80]\n"			\
+		"stp	x12, x13, [x1,  #96]\n"			\
+		"stp	x14, x15, [x1, #112]\n"			\
+		"stp	x16, x17, [x1, #128]\n"			\
+		"stp	x18, x19, [x1, #144]\n"			\
+		"stp	x20, x21, [x1, #160]\n"			\
+		"stp	x22, x23, [x1, #176]\n"			\
+		"stp	x24, x25, [x1, #192]\n"			\
+		"stp	x26, x27, [x1, #208]\n"			\
+		"stp	x28, x29, [x1, #224]\n"			\
+		"str	x30, [x1, #" xstr(S_LR) "]\n"		\
+		"stp	 x0,  x1, [x1]\n"			\
+	"1:"	excptn_insn "\n"				\
+		post_insns "\n"					\
+	:: "r" (&expected_regs) : "x0", "x1")
+
+static bool check_regs(struct pt_regs *regs)
+{
+	unsigned i;
+
+	/* exception handlers should always run in EL1 */
+	if (current_level() != CurrentEL_EL1)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(regs->regs); ++i) {
+		if (regs->regs[i] != expected_regs.regs[i])
+			return false;
+	}
+
+	regs->pstate &= 0xf0000000 /* NZCV */ | 0x3c0 /* DAIF */
+			| PSR_MODE_MASK;
+
+	return regs->sp == expected_regs.sp
+		&& regs->pc == expected_regs.pc
+		&& regs->pstate == expected_regs.pstate;
+}
+
+static enum vector check_vector_prep(void)
+{
+	unsigned long daif;
+
+	if (user_mode)
+		return EL0_SYNC_64;
+
+	asm volatile("mrs %0, daif" : "=r" (daif) ::);
+	expected_regs.pstate = daif | PSR_MODE_EL1h;
+	return EL1H_SYNC;
+}
+
+static void unknown_handler(struct pt_regs *regs, unsigned int esr __unused)
+{
+	und_works = check_regs(regs);
+	regs->pc += 4;
+}
+
+static bool check_und(void)
+{
+	enum vector v = check_vector_prep();
+
+	install_exception_handler(v, ESR_EL1_EC_UNKNOWN, unknown_handler);
+
+	/* try to read an el2 sysreg from el0/1 */
+	test_exception("", "mrs x0, sctlr_el2", "");
+
+	install_exception_handler(v, ESR_EL1_EC_UNKNOWN, NULL);
+
+	return und_works;
+}
+
+static void svc_handler(struct pt_regs *regs, unsigned int esr)
+{
+	u16 svc = esr & 0xffff;
+
+	expected_regs.pc += 4;
+	svc_works = check_regs(regs) && svc == 123;
+}
+
+static bool check_svc(void)
+{
+	enum vector v = check_vector_prep();
+
+	install_exception_handler(v, ESR_EL1_EC_SVC64, svc_handler);
+
+	test_exception("", "svc #123", "");
+
+	install_exception_handler(v, ESR_EL1_EC_SVC64, NULL);
+
+	return svc_works;
+}
+#endif
 
 static void check_vectors(void *arg __unused)
 {
 	report("%s", check_und() && check_svc(), testname);
 	exit(report_summary());
 }
-#endif
 
 int main(int argc, char **argv)
 {
@@ -199,7 +314,6 @@ int main(int argc, char **argv)
 
 		check_setup(argc-1, &argv[1]);
 
-#ifdef __arm__
 	} else if (strcmp(argv[0], "vectors-kernel") == 0) {
 
 		check_vectors(NULL);
@@ -209,7 +323,6 @@ int main(int argc, char **argv)
 		void *sp = memalign(PAGE_SIZE, PAGE_SIZE);
 		memset(sp, 0, PAGE_SIZE);
 		start_usr(check_vectors, NULL, (unsigned long)sp + PAGE_SIZE);
-#endif
 	}
 
 	return report_summary();
diff --git a/arm/unittests.cfg b/arm/unittests.cfg
index 9ac6ecaa55d3b..efcca6bf24af6 100644
--- a/arm/unittests.cfg
+++ b/arm/unittests.cfg
@@ -22,11 +22,9 @@ groups = selftest
 file = selftest.flat
 extra_params = -append 'vectors-kernel'
 groups = selftest
-arch = arm
 
 # Test vector setup and exception handling (user mode).
 [selftest::vectors-user]
 file = selftest.flat
 extra_params = -append 'vectors-user'
 groups = selftest
-arch = arm
diff --git a/config/config-arm64.mak b/config/config-arm64.mak
index 37db3d6026424..91255e7d01432 100644
--- a/config/config-arm64.mak
+++ b/config/config-arm64.mak
@@ -10,6 +10,7 @@ kernel_offset = 0x80000
 CFLAGS += -D__aarch64__
 
 cstart.o = $(TEST_DIR)/cstart64.o
+cflatobjs += lib/arm64/processor.o
 
 # arm64 specific tests
 tests =
diff --git a/lib/arm64/asm-offsets.c b/lib/arm64/asm-offsets.c
index c85b9a1e97e44..d7d33f4d917ab 100644
--- a/lib/arm64/asm-offsets.c
+++ b/lib/arm64/asm-offsets.c
@@ -7,8 +7,24 @@
  */
 #include <libcflat.h>
 #include <kbuild.h>
+#include <asm/ptrace.h>
 
 int main(void)
 {
+	OFFSET(S_X0, pt_regs, regs[0]);
+	OFFSET(S_X1, pt_regs, regs[1]);
+	OFFSET(S_X2, pt_regs, regs[2]);
+	OFFSET(S_X3, pt_regs, regs[3]);
+	OFFSET(S_X4, pt_regs, regs[4]);
+	OFFSET(S_X5, pt_regs, regs[5]);
+	OFFSET(S_X6, pt_regs, regs[6]);
+	OFFSET(S_X7, pt_regs, regs[7]);
+	OFFSET(S_LR, pt_regs, regs[30]);
+	OFFSET(S_SP, pt_regs, sp);
+	OFFSET(S_PC, pt_regs, pc);
+	OFFSET(S_PSTATE, pt_regs, pstate);
+	OFFSET(S_ORIG_X0, pt_regs, orig_x0);
+	OFFSET(S_SYSCALLNO, pt_regs, syscallno);
+	DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
 	return 0;
 }
diff --git a/lib/arm64/asm/esr.h b/lib/arm64/asm/esr.h
new file mode 100644
index 0000000000000..8407b003afa7f
--- /dev/null
+++ b/lib/arm64/asm/esr.h
@@ -0,0 +1,43 @@
+/*
+ * From Linux kernel arch/arm64/include/asm/esr.h
+ */
+#ifndef _ASMARM64_ESR_H_
+#define _ASMARM64_ESR_H_
+
+#define ESR_EL1_WRITE		(1 << 6)
+#define ESR_EL1_CM		(1 << 8)
+#define ESR_EL1_IL		(1 << 25)
+
+#define ESR_EL1_EC_SHIFT	(26)
+#define ESR_EL1_EC_UNKNOWN	(0x00)
+#define ESR_EL1_EC_WFI		(0x01)
+#define ESR_EL1_EC_CP15_32	(0x03)
+#define ESR_EL1_EC_CP15_64	(0x04)
+#define ESR_EL1_EC_CP14_MR	(0x05)
+#define ESR_EL1_EC_CP14_LS	(0x06)
+#define ESR_EL1_EC_FP_ASIMD	(0x07)
+#define ESR_EL1_EC_CP10_ID	(0x08)
+#define ESR_EL1_EC_CP14_64	(0x0C)
+#define ESR_EL1_EC_ILL_ISS	(0x0E)
+#define ESR_EL1_EC_SVC32	(0x11)
+#define ESR_EL1_EC_SVC64	(0x15)
+#define ESR_EL1_EC_SYS64	(0x18)
+#define ESR_EL1_EC_IABT_EL0	(0x20)
+#define ESR_EL1_EC_IABT_EL1	(0x21)
+#define ESR_EL1_EC_PC_ALIGN	(0x22)
+#define ESR_EL1_EC_DABT_EL0	(0x24)
+#define ESR_EL1_EC_DABT_EL1	(0x25)
+#define ESR_EL1_EC_SP_ALIGN	(0x26)
+#define ESR_EL1_EC_FP_EXC32	(0x28)
+#define ESR_EL1_EC_FP_EXC64	(0x2C)
+#define ESR_EL1_EC_SERROR	(0x2F)
+#define ESR_EL1_EC_BREAKPT_EL0	(0x30)
+#define ESR_EL1_EC_BREAKPT_EL1	(0x31)
+#define ESR_EL1_EC_SOFTSTP_EL0	(0x32)
+#define ESR_EL1_EC_SOFTSTP_EL1	(0x33)
+#define ESR_EL1_EC_WATCHPT_EL0	(0x34)
+#define ESR_EL1_EC_WATCHPT_EL1	(0x35)
+#define ESR_EL1_EC_BKPT32	(0x38)
+#define ESR_EL1_EC_BRK64	(0x3C)
+
+#endif /* _ASMARM64_ESR_H_ */
diff --git a/lib/arm64/asm/processor.h b/lib/arm64/asm/processor.h
new file mode 100644
index 0000000000000..66296f549f87e
--- /dev/null
+++ b/lib/arm64/asm/processor.h
@@ -0,0 +1,52 @@
+#ifndef _ASMARM64_PROCESSOR_H_
+#define _ASMARM64_PROCESSOR_H_
+/*
+ * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include <asm/ptrace.h>
+
+enum vector {
+	EL1T_SYNC,
+	EL1T_IRQ,
+	EL1T_FIQ,
+	EL1T_ERROR,
+	EL1H_SYNC,
+	EL1H_IRQ,
+	EL1H_FIQ,
+	EL1H_ERROR,
+	EL0_SYNC_64,
+	EL0_IRQ_64,
+	EL0_FIQ_64,
+	EL0_ERROR_64,
+	EL0_SYNC_32,
+	EL0_IRQ_32,
+	EL0_FIQ_32,
+	EL0_ERROR_32,
+	VECTOR_MAX,
+};
+
+#define EC_MAX 64
+
+typedef void (*vector_fn)(enum vector v, struct pt_regs *regs,
+			  unsigned int esr);
+typedef void (*exception_fn)(struct pt_regs *regs, unsigned int esr);
+extern void install_vector_handler(enum vector v, vector_fn fn);
+extern void install_exception_handler(enum vector v, unsigned int ec,
+				      exception_fn fn);
+
+extern void show_regs(struct pt_regs *regs);
+extern void *get_sp(void);
+
+static inline unsigned long current_level(void)
+{
+	unsigned long el;
+	asm volatile("mrs %0, CurrentEL" : "=r" (el));
+	return el & 0xc;
+}
+
+extern bool user_mode;
+extern void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr);
+
+#endif /* _ASMARM64_PROCESSOR_H_ */
diff --git a/lib/arm64/asm/ptrace.h b/lib/arm64/asm/ptrace.h
new file mode 100644
index 0000000000000..dd89d82063b1d
--- /dev/null
+++ b/lib/arm64/asm/ptrace.h
@@ -0,0 +1,95 @@
+#ifndef _ASMARM64_PTRACE_H_
+#define _ASMARM64_PTRACE_H_
+/*
+ * Adapted from Linux kernel headers
+ * arch/arm64/include/asm/ptrace.h
+ * arch/arm64/include/uapi/asm/ptrace.h
+ */
+
+/* Current Exception Level values, as contained in CurrentEL */
+#define CurrentEL_EL1	(1 << 2)
+#define CurrentEL_EL2	(2 << 2)
+
+/*
+ * PSR bits
+ */
+#define PSR_MODE_EL0t	0x00000000
+#define PSR_MODE_EL1t	0x00000004
+#define PSR_MODE_EL1h	0x00000005
+#define PSR_MODE_EL2t	0x00000008
+#define PSR_MODE_EL2h	0x00000009
+#define PSR_MODE_EL3t	0x0000000c
+#define PSR_MODE_EL3h	0x0000000d
+#define PSR_MODE_MASK	0x0000000f
+
+/* AArch32 CPSR bits */
+#define PSR_MODE32_BIT	0x00000010
+
+/* AArch64 SPSR bits */
+#define PSR_F_BIT	0x00000040
+#define PSR_I_BIT	0x00000080
+#define PSR_A_BIT	0x00000100
+#define PSR_D_BIT	0x00000200
+#define PSR_Q_BIT	0x08000000
+#define PSR_V_BIT	0x10000000
+#define PSR_C_BIT	0x20000000
+#define PSR_Z_BIT	0x40000000
+#define PSR_N_BIT	0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f		0xff000000	/* Flags                */
+#define PSR_s		0x00ff0000	/* Status               */
+#define PSR_x		0x0000ff00	/* Extension            */
+#define PSR_c		0x000000ff	/* Control              */
+
+#ifndef __ASSEMBLY__
+#include <libcflat.h>
+
+struct user_pt_regs {
+	u64		regs[31];
+	u64		sp;
+	u64		pc;
+	u64		pstate;
+};
+
+struct user_fpsimd_state {
+	__uint128_t	vregs[32];
+	u32		fpsr;
+	u32		fpcr;
+};
+
+/*
+ * This struct defines the way the registers are stored on the stack during an
+ * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
+ * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
+ */
+struct pt_regs {
+	union {
+		struct user_pt_regs user_regs;
+		struct {
+			u64 regs[31];
+			u64 sp;
+			u64 pc;
+			u64 pstate;
+		};
+	};
+	u64 orig_x0;
+	u64 syscallno;
+};
+
+#define user_mode(regs) \
+	(((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
+
+#define processor_mode(regs) \
+	((regs)->pstate & PSR_MODE_MASK)
+
+#define interrupts_enabled(regs) \
+	(!((regs)->pstate & PSR_I_BIT))
+
+#define fast_interrupts_enabled(regs) \
+	(!((regs)->pstate & PSR_F_BIT))
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASMARM64_PTRACE_H_ */
diff --git a/lib/arm64/processor.c b/lib/arm64/processor.c
new file mode 100644
index 0000000000000..7230a8ab3f702
--- /dev/null
+++ b/lib/arm64/processor.c
@@ -0,0 +1,168 @@
+/*
+ * processor control and status functions
+ *
+ * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include <libcflat.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/esr.h>
+
+static char *vector_names[] = {
+	"el1t_sync",
+	"el1t_irq",
+	"el1t_fiq",
+	"el1t_error",
+	"el1h_sync",
+	"el1h_irq",
+	"el1h_fiq",
+	"el1h_error",
+	"el0_sync_64",
+	"el0_irq_64",
+	"el0_fiq_64",
+	"el0_error_64",
+	"el0_sync_32",
+	"el0_irq_32",
+	"el0_fiq_32",
+	"el0_error_32",
+};
+
+static char *ec_names[EC_MAX] = {
+	[ESR_EL1_EC_UNKNOWN]		= "UNKNOWN",
+	[ESR_EL1_EC_WFI]		= "WFI",
+	[ESR_EL1_EC_CP15_32]		= "CP15_32",
+	[ESR_EL1_EC_CP15_64]		= "CP15_64",
+	[ESR_EL1_EC_CP14_MR]		= "CP14_MR",
+	[ESR_EL1_EC_CP14_LS]		= "CP14_LS",
+	[ESR_EL1_EC_FP_ASIMD]		= "FP_ASMID",
+	[ESR_EL1_EC_CP10_ID]		= "CP10_ID",
+	[ESR_EL1_EC_CP14_64]		= "CP14_64",
+	[ESR_EL1_EC_ILL_ISS]		= "ILL_ISS",
+	[ESR_EL1_EC_SVC32]		= "SVC32",
+	[ESR_EL1_EC_SVC64]		= "SVC64",
+	[ESR_EL1_EC_SYS64]		= "SYS64",
+	[ESR_EL1_EC_IABT_EL0]		= "IABT_EL0",
+	[ESR_EL1_EC_IABT_EL1]		= "IABT_EL1",
+	[ESR_EL1_EC_PC_ALIGN]		= "PC_ALIGN",
+	[ESR_EL1_EC_DABT_EL0]		= "DABT_EL0",
+	[ESR_EL1_EC_DABT_EL1]		= "DABT_EL1",
+	[ESR_EL1_EC_SP_ALIGN]		= "SP_ALIGN",
+	[ESR_EL1_EC_FP_EXC32]		= "FP_EXC32",
+	[ESR_EL1_EC_FP_EXC64]		= "FP_EXC64",
+	[ESR_EL1_EC_SERROR]		= "SERROR",
+	[ESR_EL1_EC_BREAKPT_EL0]	= "BREAKPT_EL0",
+	[ESR_EL1_EC_BREAKPT_EL1]	= "BREAKPT_EL1",
+	[ESR_EL1_EC_SOFTSTP_EL0]	= "SOFTSTP_EL0",
+	[ESR_EL1_EC_SOFTSTP_EL1]	= "SOFTSTP_EL1",
+	[ESR_EL1_EC_WATCHPT_EL0]	= "WATCHPT_EL0",
+	[ESR_EL1_EC_WATCHPT_EL1]	= "WATCHPT_EL1",
+	[ESR_EL1_EC_BKPT32]		= "BKPT32",
+	[ESR_EL1_EC_BRK64]		= "BRK64",
+};
+
+void show_regs(struct pt_regs *regs)
+{
+	int i;
+
+	printf("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
+			regs->pc, regs->regs[30], regs->pstate);
+	printf("sp : %016llx\n", regs->sp);
+
+	for (i = 29; i >= 0; --i) {
+		printf("x%-2d: %016llx ", i, regs->regs[i]);
+		if (i % 2 == 0)
+			printf("\n");
+	}
+	printf("\n");
+}
+
+void *get_sp(void)
+{
+	register unsigned long sp asm("sp");
+	return (void *)sp;
+}
+
+static void bad_exception(enum vector v, struct pt_regs *regs,
+			  unsigned int esr, bool bad_vector)
+{
+	unsigned int ec = esr >> ESR_EL1_EC_SHIFT;
+
+	if (bad_vector) {
+		if (v < VECTOR_MAX)
+			printf("Unhandled vector %d (%s)\n", v,
+					vector_names[v]);
+		else
+			printf("Got bad vector=%d\n", v);
+	} else {
+		if (ec_names[ec])
+			printf("Unhandled exception ec=0x%x (%s)\n", ec,
+					ec_names[ec]);
+		else
+			printf("Got bad ec=0x%x\n", ec);
+	}
+
+	printf("Vector: %d (%s)\n", v, vector_names[v]);
+	printf("ESR_EL1: %08lx, ec=0x%x (%s)\n", esr, ec, ec_names[ec]);
+	printf("Exception frame registers:\n");
+	show_regs(regs);
+	abort();
+}
+
+static exception_fn exception_handlers[VECTOR_MAX][EC_MAX];
+
+void install_exception_handler(enum vector v, unsigned int ec, exception_fn fn)
+{
+	if (v < VECTOR_MAX && ec < EC_MAX)
+		exception_handlers[v][ec] = fn;
+}
+
+static void default_vector_handler(enum vector v, struct pt_regs *regs,
+				   unsigned int esr)
+{
+	unsigned int ec = esr >> ESR_EL1_EC_SHIFT;
+
+	if (ec < EC_MAX && exception_handlers[v][ec])
+		exception_handlers[v][ec](regs, esr);
+	else
+		bad_exception(v, regs, esr, false);
+}
+
+static vector_fn vector_handlers[VECTOR_MAX] = {
+	[EL1H_SYNC]	= default_vector_handler,
+	[EL1H_IRQ]	= default_vector_handler,
+	[EL0_SYNC_64]	= default_vector_handler,
+	[EL0_IRQ_64]	= default_vector_handler,
+};
+
+void do_handle_exception(enum vector v, struct pt_regs *regs, unsigned int esr)
+{
+	if (v < VECTOR_MAX && vector_handlers[v])
+		vector_handlers[v](v, regs, esr);
+	else
+		bad_exception(v, regs, esr, true);
+}
+
+void install_vector_handler(enum vector v, vector_fn fn)
+{
+	if (v < VECTOR_MAX)
+		vector_handlers[v] = fn;
+}
+
+bool user_mode;
+void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr)
+{
+	sp_usr &= (~15UL); /* stack ptr needs 16-byte alignment */
+
+	user_mode = true;
+
+	asm volatile(
+		"mov	x0, %0\n"
+		"msr	sp_el0, %1\n"
+		"msr	elr_el1, %2\n"
+		"mov	x3, xzr\n"	/* clear and "set" PSR_MODE_EL0t */
+		"msr	spsr_el1, x3\n"
+		"eret\n"
+	:: "r" (arg), "r" (sp_usr), "r" (func) : "x0", "x3");
+}
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux