This is a note to let you know that I've just added the patch titled arm64: mm: Allocate ASIDs in pairs to the 4.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: arm64-mm-allocate-asids-in-pairs.patch and it can be found in the queue-4.9 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Thu Apr 5 21:39:27 CEST 2018 From: Mark Rutland <mark.rutland@xxxxxxx> Date: Tue, 3 Apr 2018 12:08:59 +0100 Subject: arm64: mm: Allocate ASIDs in pairs To: stable@xxxxxxxxxxxxxxx Cc: mark.brown@xxxxxxxxxx, ard.biesheuvel@xxxxxxxxxx, marc.zyngier@xxxxxxx, will.deacon@xxxxxxx Message-ID: <20180403110923.43575-4-mark.rutland@xxxxxxx> From: Will Deacon <will.deacon@xxxxxxx> commit 0c8ea531b774 upstream. In preparation for separate kernel/user ASIDs, allocate them in pairs for each mm_struct. The bottom bit distinguishes the two: if it is set, then the ASID will map only userspace. Reviewed-by: Mark Rutland <mark.rutland@xxxxxxx> Tested-by: Laura Abbott <labbott@xxxxxxxxxx> Tested-by: Shanker Donthineni <shankerd@xxxxxxxxxxxxxx> Signed-off-by: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxx> [v4.9 backport] Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> [v4.9 backport] Tested-by: Will Deacon <will.deacon@xxxxxxx> Tested-by: Greg Hackmann <ghackmann@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/mmu.h | 2 ++ arch/arm64/mm/context.c | 25 +++++++++++++++++-------- 2 files changed, 19 insertions(+), 8 deletions(-) --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -16,6 +16,8 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H +#define USER_ASID_FLAG (UL(1) << 48) + typedef struct { atomic64_t id; void *vdso; --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -104,7 +113,7 @@ static void flush_context(unsigned int c */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -159,16 +168,16 @@ static u64 new_context(struct mm_struct * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) + if (!__test_and_set_bit(asid2idx(asid), asid_map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -185,7 +194,7 @@ static u64 new_context(struct mm_struct set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return asid | generation; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) Patches currently in stable-queue which might be from mark.rutland@xxxxxxx are queue-4.9/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch queue-4.9/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch queue-4.9/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch queue-4.9/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch queue-4.9/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch queue-4.9/arm64-mm-allocate-asids-in-pairs.patch queue-4.9/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch queue-4.9/arm64-use-ret-instruction-for-exiting-the-trampoline.patch queue-4.9/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch queue-4.9/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch queue-4.9/arm64-mm-use-non-global-mappings-for-kernel-space.patch queue-4.9/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch queue-4.9/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch queue-4.9/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch queue-4.9/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch queue-4.9/module-extend-rodata-off-boot-cmdline-parameter-to-module-mappings.patch queue-4.9/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch queue-4.9/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch queue-4.9/arm64-allow-checking-of-a-cpu-local-erratum.patch queue-4.9/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch queue-4.9/arm64-kconfig-add-config_unmap_kernel_at_el0.patch queue-4.9/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch queue-4.9/arm64-factor-out-entry-stack-manipulation.patch queue-4.9/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch queue-4.9/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch queue-4.9/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch queue-4.9/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch