In order to re-map the guest vectors at EL2 when pKVM is enabled, refactor __kvm_vector_slot2idx() and kvm_init_vector_slot() to move all the address calculation logic in a static inline function. Signed-off-by: Quentin Perret <qperret@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_mmu.h | 8 ++++++++ arch/arm64/kvm/arm.c | 9 +-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 5168a0c516ae..cb104443d8e4 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -171,6 +171,14 @@ phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_get_idmap_vector(void); int kvm_mmu_init(void); +static inline void *__kvm_vector_slot2addr(void *base, + enum arm64_hyp_spectre_vector slot) +{ + int idx = slot - (slot != HYP_VECTOR_DIRECT); + + return base + (idx * SZ_2K); +} + struct kvm; #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c7f8fca97202..b1e1747e4bbf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1318,16 +1318,9 @@ static unsigned long nvhe_percpu_order(void) /* A lookup table holding the hypervisor VA for each vector slot */ static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; -static int __kvm_vector_slot2idx(enum arm64_hyp_spectre_vector slot) -{ - return slot - (slot != HYP_VECTOR_DIRECT); -} - static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) { - int idx = __kvm_vector_slot2idx(slot); - - hyp_spectre_vector_selector[slot] = base + (idx * SZ_2K); + hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); } static int kvm_init_vector_slots(void) -- 2.29.2.299.gdc1121823c-goog