On Fri, Jan 04, 2013 at 11:18:17PM -0600, Simon Jeons wrote: > > +static int > > +change_prot_numa_range(struct mm_struct *mm, struct vm_area_struct *vma, > > + unsigned long address) > > +{ > > + pgd_t *pgd; > > + pud_t *pud; > > + pmd_t *pmd; > > + pte_t *pte, *_pte; > > + struct page *page; > > + unsigned long _address, end; > > + spinlock_t *ptl; > > + int ret = 0; > > + > > + VM_BUG_ON(address & ~PAGE_MASK); > > + > > + pgd = pgd_offset(mm, address); > > + if (!pgd_present(*pgd)) > > + goto out; > > + > > + pud = pud_offset(pgd, address); > > + if (!pud_present(*pud)) > > + goto out; > > + > > + pmd = pmd_offset(pud, address); > > + if (pmd_none(*pmd)) > > + goto out; > > + > > + if (pmd_trans_huge_lock(pmd, vma) == 1) { > > + int page_nid; > > + ret = HPAGE_PMD_NR; > > + > > + VM_BUG_ON(address & ~HPAGE_PMD_MASK); > > + > > + if (pmd_numa(*pmd)) { > > + spin_unlock(&mm->page_table_lock); > > + goto out; > > + } > > + > > + page = pmd_page(*pmd); > > + > > + /* only check non-shared pages */ > > + if (page_mapcount(page) != 1) { > > + spin_unlock(&mm->page_table_lock); > > + goto out; > > + } > > + > > + page_nid = page_to_nid(page); > > + > > + if (pmd_numa(*pmd)) { > > + spin_unlock(&mm->page_table_lock); > > + goto out; > > + } > > + > > Hi Gorman, > > Since pmd_trans_huge_lock has already held &mm->page_table_lock, then > why check pmd_numa(*pmd) again? > It looks like oversight. I've added a TODO item to clean it up when I revisit NUMA balancing some time soon. Thanks. -- Mel Gorman SUSE Labs -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>