Define a generic function __kvm_tlb_flush_range() to invalidate the TLBs over a range of addresses. The implementation accepts 'op' as a generic TLBI operation. Upcoming patches will use this to implement IPA based TLB invalidations (ipas2e1is). If the system doesn't support FEAT_TLBIRANGE, the implementation falls back to flushing the pages one by one for the range supplied. Signed-off-by: Raghavendra Rao Ananta <rananta@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_asm.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 43c3bc0f9544d..995ff048e8851 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -221,6 +221,24 @@ DECLARE_KVM_NVHE_SYM(__per_cpu_end); DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) +#define __kvm_tlb_flush_range(op, mmu, start, end, level, tlb_level) do { \ + unsigned long pages, stride; \ + \ + stride = kvm_granule_size(level); \ + start = round_down(start, stride); \ + end = round_up(end, stride); \ + pages = (end - start) >> PAGE_SHIFT; \ + \ + if ((!system_supports_tlb_range() && \ + (end - start) >= (MAX_TLBI_OPS * stride)) || \ + pages >= MAX_TLBI_RANGE_PAGES) { \ + __kvm_tlb_flush_vmid(mmu); \ + break; \ + } \ + \ + __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false); \ +} while (0) + extern void __kvm_flush_vm_context(void); extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, -- 2.39.1.519.gcb327c4b5f-goog