On Thu, Jul 11, 2013 at 10:46:48AM +0100, Mel Gorman wrote: > +++ b/mm/memory.c > @@ -3560,8 +3560,13 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, > set_pte_at(mm, addr, ptep, pte); > update_mmu_cache(vma, addr, ptep); > > + /* > + * Do not account for faults against the huge zero page. The read-only s/huge // > + * data is likely to be read-cached on the local CPUs and it is less > + * useful to know about local versus remote hits on the zero page. > + */ > page = vm_normal_page(vma, addr, pte); > - if (!page) { > + if (!page || is_zero_pfn(page_to_pfn(page))) { > pte_unmap_unlock(ptep, ptl); > return 0; > } > -- > 1.8.1.4 > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>