On 01/16/2020 09:41 AM, Li Xinhai wrote: > Checking hstate at early phase when isolating page, instead of during > unmap and move phase, to avoid useless isolation. > > Signed-off-by: Li Xinhai <lixinhai.lxh@xxxxxxxxx> > Cc: Michal Hocko <mhocko@xxxxxxxx> > Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> > --- Change log from the previous versions ? > include/linux/hugetlb.h | 10 ++++++++++ > include/linux/mempolicy.h | 29 +---------------------------- > mm/mempolicy.c | 28 ++++++++++++++++++++++++++++ > 3 files changed, 39 insertions(+), 28 deletions(-) > > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h > index 31d4920..c9d871d 100644 > --- a/include/linux/hugetlb.h > +++ b/include/linux/hugetlb.h > @@ -598,6 +598,11 @@ static inline bool hugepage_migration_supported(struct hstate *h) > return arch_hugetlb_migration_supported(h); > } > > +static inline bool vm_hugepage_migration_supported(struct vm_area_struct *vma) > +{ > + return hugepage_migration_supported(hstate_vma(vma)); > +} Another wrapper around hugepage_migration_supported() is not necessary. > + > /* > * Movability check is different as compared to migration check. > * It determines whether or not a huge page should be placed on > @@ -809,6 +814,11 @@ static inline bool hugepage_migration_supported(struct hstate *h) > return false; > } > > +static inline bool vm_hugepage_migration_supported(struct vm_area_struct *vma) > +{ > + return false; > +} > + > static inline bool hugepage_movable_supported(struct hstate *h) > { > return false; > diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h > index 5228c62..8165278 100644 > --- a/include/linux/mempolicy.h > +++ b/include/linux/mempolicy.h > @@ -173,34 +173,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, > extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); > > /* Check if a vma is migratable */ > -static inline bool vma_migratable(struct vm_area_struct *vma) > -{ > - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) > - return false; > - > - /* > - * DAX device mappings require predictable access latency, so avoid > - * incurring periodic faults. > - */ > - if (vma_is_dax(vma)) > - return false; > - > -#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION > - if (vma->vm_flags & VM_HUGETLB) > - return false; > -#endif > - > - /* > - * Migration allocates pages in the highest zone. If we cannot > - * do so then migration (at least from node to node) is not > - * possible. > - */ > - if (vma->vm_file && > - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) > - < policy_zone) > - return false; > - return true; > -} Why vma_migratable() is being moved ? > +extern bool vma_migratable(struct vm_area_struct *vma); > > extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); > extern void mpol_put_task_policy(struct task_struct *); > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index 067cf7d..8a01fb1 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -1714,6 +1714,34 @@ static int kernel_get_mempolicy(int __user *policy, > > #endif /* CONFIG_COMPAT */ > > +bool vma_migratable(struct vm_area_struct *vma) > +{ > + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) > + return false; > + > + /* > + * DAX device mappings require predictable access latency, so avoid > + * incurring periodic faults. > + */ > + if (vma_is_dax(vma)) > + return false; > + > + if (is_vm_hugetlb_page(vma) && > + !vm_hugepage_migration_supported(vma)) > + return false; This (use hugepage_migration_supported instead) can be added above without the code movement. > + > + /* > + * Migration allocates pages in the highest zone. If we cannot > + * do so then migration (at least from node to node) is not > + * possible. > + */ > + if (vma->vm_file && > + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) > + < policy_zone) > + return false; > + return true; > +} > + > struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, > unsigned long addr) > { >