VMAs containing coherent device memory should be marked with VM_CDM. These VMAs need to be identified in various core kernel paths and this new flag will help in this regard. Signed-off-by: Anshuman Khandual <khandual@xxxxxxxxxxxxxxxxxx> --- include/linux/mm.h | 5 +++++ mm/mempolicy.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3a19185..acee4d1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -182,6 +182,11 @@ extern unsigned int kobjsize(const void *objp); #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ + +#ifdef CONFIG_COHERENT_DEVICE +#define VM_CDM 0x00800000 /* Contains coherent device memory */ +#endif + #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ #define VM_ARCH_2 0x02000000 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cb1ba01..b983cea 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -174,6 +174,47 @@ static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, nodes_onto(*ret, tmp, *rel); } +#ifdef CONFIG_COHERENT_DEVICE +static bool nodemask_contains_cdm(nodemask_t *nodes) +{ + int weight, nid, i; + nodemask_t mask; + + + if (!nodes) + return false; + + mask = *nodes; + weight = nodes_weight(mask); + nid = first_node(mask); + for (i = 0; i < weight; i++) { + if (isolated_cdm_node(nid)) + return true; + nid = next_node(nid, mask); + } + return false; +} + +static void update_coherent_vma_flag(nodemask_t *nmask, + struct page *page, struct vm_area_struct *vma) +{ + if (!page) + return; + + if (nodemask_contains_cdm(nmask)) { + if (!(vma->vm_flags & VM_CDM)) { + if (isolated_cdm_node(page_to_nid(page))) + vma->vm_flags |= VM_CDM; + } + } +} +#else +static void update_coherent_vma_flag(nodemask_t *nmask, + struct page *page, struct vm_area_struct *vma) +{ +} +#endif + static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) @@ -2045,6 +2086,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, zl = policy_zonelist(gfp, pol, node); mpol_cond_put(pol); page = __alloc_pages_nodemask(gfp, order, zl, nmask); + update_coherent_vma_flag(nmask, page, vma); + out: if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; -- 2.1.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>