From: Julien Grall <julien.grall@xxxxxxx> Flushing the local context will vary depending on the actual user of the ASID allocator. Introduce a new callback to flush the local context and move the call to flush local TLB in it. Signed-off-by: Julien Grall <julien.grall@xxxxxxx> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@xxxxxxxxxx> --- arch/arm64/mm/context.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 901472a57b5d..ee446f7535a3 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -32,6 +32,8 @@ static struct asid_info unsigned long *pinned_map; unsigned long max_pinned_asids; unsigned long nr_pinned_asids; + /* Callback to locally flush the context. */ + void (*flush_cpu_ctxt_cb)(void); } asid_info; #define active_asid(info, cpu) (*per_cpu_ptr((info)->active, cpu)) @@ -245,8 +247,9 @@ static void asid_new_context(struct asid_info *info, atomic64_t *pasid, atomic64_set(pasid, asid); } - if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) - local_flush_tlb_all(); + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending) && + info->flush_cpu_ctxt_cb) + info->flush_cpu_ctxt_cb(); atomic64_set(&active_asid(info, cpu), asid); raw_spin_unlock_irqrestore(&info->lock, flags); @@ -427,6 +430,11 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) post_ttbr_update_workaround(); } +static void asid_flush_cpu_ctxt(void) +{ + local_flush_tlb_all(); +} + static int asids_update_limit(void) { struct asid_info *info = &asid_info; @@ -499,6 +507,7 @@ static int asids_init(void) info->active = &active_asids; info->reserved = &reserved_asids; + info->flush_cpu_ctxt_cb = asid_flush_cpu_ctxt; /* * We cannot call set_reserved_asid_bits() here because CPU -- 2.17.1 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm