On Mon, Aug 15, 2022 at 03:37:16PM +0200, Peter Zijlstra wrote: > On Mon, Aug 15, 2022 at 07:17:58AM +0300, Kirill A. Shutemov wrote: > > > +static void enable_lam_func(void *mm) > > +{ > > + struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); > > + unsigned long lam_mask; > > + unsigned long cr3; > > + > > + if (loaded_mm != mm) > > + return; > > + > > + lam_mask = READ_ONCE(loaded_mm->context.lam_cr3_mask); > > + > > + /* Update CR3 to get LAM active on the CPU */ > > + cr3 = __read_cr3(); > > + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); > > + cr3 |= lam_mask; > > + write_cr3(cr3); > > + set_tlbstate_cr3_lam_mask(lam_mask); > > +} > > + > > +static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) > > +{ > > + int ret = 0; > > + > > + if (!cpu_feature_enabled(X86_FEATURE_LAM)) > > + return -ENODEV; > > + > > + mutex_lock(&mm->context.lock); > > + > > + /* Already enabled? */ > > + if (mm->context.lam_cr3_mask) { > > + ret = -EBUSY; > > + goto out; > > + } > > + > > + if (!nr_bits) { > > + ret = -EINVAL; > > + goto out; > > + } else if (nr_bits <= 6) { > > + mm->context.lam_cr3_mask = X86_CR3_LAM_U57; > > + mm->context.untag_mask = ~GENMASK(62, 57); > > + } else { > > + ret = -EINVAL; > > + goto out; > > + } > > + > > + /* Make lam_cr3_mask and untag_mask visible on other CPUs */ > > + smp_mb(); > > smp_mb() doesn't make visible -- it is about ordering, what does it > order against that the below on_each_cpu_mask() doesn't already take > care of? You are right it is redundant. I will drop it. -- Kiryl Shutsemau / Kirill A. Shutemov