From: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> The gcr_user mask is a per thread mask that represents the tags that are excluded from random generation when the Memory Tagging Extension is present and an 'irg' instruction is invoked. gcr_user affects the behavior on EL0 only. Currently that mask is an include mask and it is controlled by the user via prctl() while GCR_EL1 accepts an exclude mask. Convert the include mask into an exclude one to make it easier the register setting. Note: This change will affect gcr_kernel (for EL1) introduced with a future patch. Signed-off-by: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> --- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/mte.c | 29 +++++++++++++++-------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fec204d28fce..ed9efa5be8eb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -153,7 +153,7 @@ struct thread_struct { #endif #ifdef CONFIG_ARM64_MTE u64 sctlr_tcf0; - u64 gcr_user_incl; + u64 gcr_user_excl; #endif }; diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index e2d708b4583d..7717ea9bc2a7 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -135,23 +135,22 @@ static void set_sctlr_el1_tcf0(u64 tcf0) preempt_enable(); } -static void update_gcr_el1_excl(u64 incl) +static void update_gcr_el1_excl(u64 excl) { - u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; /* - * Note that 'incl' is an include mask (controlled by the user via - * prctl()) while GCR_EL1 accepts an exclude mask. + * Note that the mask controlled by the user via prctl() is an + * include while GCR_EL1 accepts an exclude mask. * No need for ISB since this only affects EL0 currently, implicit * with ERET. */ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); } -static void set_gcr_el1_excl(u64 incl) +static void set_gcr_el1_excl(u64 excl) { - current->thread.gcr_user_incl = incl; - update_gcr_el1_excl(incl); + current->thread.gcr_user_excl = excl; + update_gcr_el1_excl(excl); } void flush_mte_state(void) @@ -166,7 +165,7 @@ void flush_mte_state(void) /* disable tag checking */ set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ - set_gcr_el1_excl(0); + set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } void mte_thread_switch(struct task_struct *next) @@ -177,7 +176,7 @@ void mte_thread_switch(struct task_struct *next) /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); - update_gcr_el1_excl(next->thread.gcr_user_incl); + update_gcr_el1_excl(next->thread.gcr_user_excl); } void mte_suspend_exit(void) @@ -185,13 +184,14 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return; - update_gcr_el1_excl(current->thread.gcr_user_incl); + update_gcr_el1_excl(current->thread.gcr_user_excl); } long set_mte_ctrl(struct task_struct *task, unsigned long arg) { u64 tcf0; - u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT; + u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & + SYS_GCR_EL1_EXCL_MASK; if (!system_supports_mte()) return 0; @@ -212,10 +212,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) if (task != current) { task->thread.sctlr_tcf0 = tcf0; - task->thread.gcr_user_incl = gcr_incl; + task->thread.gcr_user_excl = gcr_excl; } else { set_sctlr_el1_tcf0(tcf0); - set_gcr_el1_excl(gcr_incl); + set_gcr_el1_excl(gcr_excl); } return 0; @@ -224,11 +224,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) long get_mte_ctrl(struct task_struct *task) { unsigned long ret; + u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK; if (!system_supports_mte()) return 0; - ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT; + ret = incl << PR_MTE_TAG_SHIFT; switch (task->thread.sctlr_tcf0) { case SCTLR_EL1_TCF0_NONE: -- 2.28.0.220.ged08abb693-goog