On Fri, Apr 03, 2020 at 12:35:50PM +0300, Jarkko Sakkinen wrote: > From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> > @@ -221,12 +224,16 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) > return ret; > } > > + /* > + * The page reclaimer uses list version for synchronization instead of > + * synchronize_scru() because otherwise we could conflict with > + * dup_mmap(). > + */ > spin_lock(&encl->mm_lock); > list_add_rcu(&encl_mm->list, &encl->mm_list); You dropped the smp_wmb(). > + encl->mm_list_version++; > spin_unlock(&encl->mm_lock); > > - synchronize_srcu(&encl->srcu); > - > return 0; > } > > diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h > index 44b353aa8866..f0f72e591244 100644 > --- a/arch/x86/kernel/cpu/sgx/encl.h > +++ b/arch/x86/kernel/cpu/sgx/encl.h > @@ -74,6 +74,7 @@ struct sgx_encl { > struct mutex lock; > struct list_head mm_list; > spinlock_t mm_lock; > + unsigned long mm_list_version; > struct file *backing; > struct kref refcount; > struct srcu_struct srcu; > diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c > index 39f0ddefbb79..5fb8bdfa6a1f 100644 > --- a/arch/x86/kernel/cpu/sgx/reclaim.c > +++ b/arch/x86/kernel/cpu/sgx/reclaim.c > @@ -184,28 +184,38 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) > struct sgx_encl_page *page = epc_page->owner; > unsigned long addr = SGX_ENCL_PAGE_ADDR(page); > struct sgx_encl *encl = page->encl; > + unsigned long mm_list_version; > struct sgx_encl_mm *encl_mm; > struct vm_area_struct *vma; > int idx, ret; > > - idx = srcu_read_lock(&encl->srcu); > + do { > + mm_list_version = encl->mm_list_version; > > - list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { > - if (!mmget_not_zero(encl_mm->mm)) > - continue; > + /* Fence reads as the CPU can reorder them. This guarantees > + * that we don't access old list with a new version. This comment is flat out wrong. This has nothing to do the CPU reordering things. The smp_{r,w}mb() are nothing more than compiler barriers, and even those go away when the kernel is built with SMP=0. I don't mind gutting the other comments, but there is a well established pattern form smb_wmb()/smp_rmb() pairs, I would strongly prefer to keep the exact comment I submitted. > + */ > + smp_rmb(); > > - down_read(&encl_mm->mm->mmap_sem); > + idx = srcu_read_lock(&encl->srcu); > > - ret = sgx_encl_find(encl_mm->mm, addr, &vma); > - if (!ret && encl == vma->vm_private_data) > - zap_vma_ptes(vma, addr, PAGE_SIZE); > + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { > + if (!mmget_not_zero(encl_mm->mm)) > + continue; > > - up_read(&encl_mm->mm->mmap_sem); > + down_read(&encl_mm->mm->mmap_sem); > > - mmput_async(encl_mm->mm); > - } > + ret = sgx_encl_find(encl_mm->mm, addr, &vma); > + if (!ret && encl == vma->vm_private_data) > + zap_vma_ptes(vma, addr, PAGE_SIZE); > > - srcu_read_unlock(&encl->srcu, idx); > + up_read(&encl_mm->mm->mmap_sem); > + > + mmput_async(encl_mm->mm); > + } > + > + srcu_read_unlock(&encl->srcu, idx); > + } while (unlikely(encl->mm_list_version != mm_list_version)); > > mutex_lock(&encl->lock); > > @@ -250,6 +260,11 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) > struct sgx_encl_mm *encl_mm; > int idx; > > + /* > + * Can race with sgx_encl_mm_add(), but ETRACK has already been > + * executed, which means that the CPUs running in the new mm will enter > + * into the enclave with a fresh epoch. > + */ > cpumask_clear(cpumask); > > idx = srcu_read_lock(&encl->srcu); > -- > 2.25.1 >