* Some helper functions defined in asm/kvm_emulate.h are used
by mmu.c only and seem not to be used by other source file
in near future. They are moved to mmu.c and renamed accordingly.>
is_exec_fault: kvm_vcpu_trap_is_exec_fault
is_write_fault: kvm_is_write_fault()
Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/arm64/include/asm/esr.h | 6 ++++
arch/arm64/include/asm/kvm_emulate.h | 27 ++---------------
arch/arm64/include/asm/kvm_host.h | 4 +++
arch/arm64/kvm/mmu.c | 43 ++++++++++++++++++++++------
4 files changed, 48 insertions(+), 32 deletions(-)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 29f97eb3dad4..0f2cb27691de 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -321,8 +321,14 @@
ESR_ELx_CP15_32_ISS_DIR_READ)
#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
#include <asm/types.h>
+#define esr_dabt_fault_type(esr) (esr & ESR_ELx_FSC_TYPE)
+#define esr_dabt_fault_level(esr) (FIELD_GET(ESR_ELx_FSC_LEVEL, esr))
+#define esr_dabt_is_wnr(esr) (!!(FIELD_GET(ESR_ELx_WNR, esr)))
+#define esr_dabt_is_s1ptw(esr) (!!(FIELD_GET(ESR_ELx_S1PTW, esr)))
+
static inline bool esr_is_data_abort(u32 esr)
{
const u32 ec = ESR_ELx_EC(esr);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 923b4d08ea9a..90742f4b1acd 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -285,13 +285,13 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
+ return esr_dabt_is_s1ptw(kvm_vcpu_get_esr(vcpu));
}
/* Always check for S1PTW *before* using this. */
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
+ return esr_dabt_is_wnr(kvm_vcpu_get_esr(vcpu));
}
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -320,11 +320,6 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
-static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
-}
-
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
@@ -332,12 +327,7 @@ static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
-}
-
-static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
+ return esr_dabt_fault_type(kvm_vcpu_get_esr(vcpu));
}
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
@@ -365,17 +355,6 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
return ESR_ELx_SYS64_ISS_RT(esr);
}
-static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_abt_iss1tw(vcpu))
- return true;
-
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 1824f7e1f9ab..581825b9df77 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -606,6 +606,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_handle_user_mem_abort(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot,
+ phys_addr_t fault_ipa, unsigned long hva,
+ unsigned int esr, bool prefault);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0625bf2353c2..e4038c5e931d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -892,9 +892,34 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
return 0;
}
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_memory_slot *memslot, unsigned long hva,
- unsigned long fault_status)
+static inline bool is_exec_fault(unsigned int esr)
+{
+ if (ESR_ELx_EC(esr) != ESR_ELx_EC_IABT_LOW)
+ return false;
+
+ if (esr_dabt_is_s1ptw(esr))
+ return false;
+
+ return true;
+}
+
+static inline bool is_write_fault(unsigned int esr)
+{
+ if (esr_dabt_is_s1ptw(esr))
+ return true;
+
+ if (ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW)
+ return false;
+
+ return esr_dabt_is_wnr(esr);
+}
+
+int kvm_handle_user_mem_abort(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot,
+ phys_addr_t fault_ipa,
+ unsigned long hva,
+ unsigned int esr,
+ bool prefault)