On Mon, Sep 28, 2020 at 02:44:57PM +0200, Lukas Bulwahn wrote: > diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c > index 6b0f4c88b07c..90515c04d90a 100644 > --- a/arch/x86/mm/tlb.c > +++ b/arch/x86/mm/tlb.c > @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(leave_mm); > > int enable_l1d_flush_for_task(struct task_struct *tsk) > { > - int cpu, ret = 0, i; > + int i; > > /* > * Do not enable L1D_FLUSH_OUT if > @@ -329,7 +329,7 @@ int enable_l1d_flush_for_task(struct task_struct *tsk) > !static_cpu_has(X86_FEATURE_FLUSH_L1D)) > return -EINVAL; > > - cpu = get_cpu(); > + get_cpu(); > > for_each_cpu(i, &tsk->cpus_mask) { > if (cpu_data(i).smt_active == true) { > @@ -340,7 +340,7 @@ int enable_l1d_flush_for_task(struct task_struct *tsk) > > set_ti_thread_flag(&tsk->thread_info, TIF_SPEC_L1D_FLUSH); > put_cpu(); > - return ret; > + return 0; > } If you don't use the return value of get_cpu(), then change it over to preempt_{dis,en}able(), but this got me looking at the function, wtf is that garbage supposed to do in the first place What do we need to disable preemption for? Please explain the desired semantics against sched_setaffinity().