To follow code-of-conduct better. Signed-off-by: "Huang, Ying" <ying.huang@xxxxxxxxx> Suggested-by: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Acked-by: Rafael Aquini <aquini@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxx> Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Rafael Aquini <aquini@xxxxxxxxxx> --- include/uapi/linux/mempolicy.h | 2 +- kernel/sched/debug.c | 2 +- mm/mempolicy.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 3354774af61e..3c3666d017e6 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -60,7 +60,7 @@ enum { #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ -#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */ +#define MPOL_F_MOPRON (1 << 4) /* Migrate On Protnone Reference On Node */ #endif /* _UAPI_LINUX_MEMPOLICY_H */ diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 0655524700d2..8bfb6adb3f31 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -898,7 +898,7 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) task_lock(p); pol = p->mempolicy; - if (pol && !(pol->flags & MPOL_F_MORON)) + if (pol && !(pol->flags & MPOL_F_MOPRON)) pol = NULL; mpol_get(pol); task_unlock(p); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3fde772ef5ef..f6948b659643 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2511,7 +2511,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long } /* Migrate the page towards the node whose CPU is referencing it */ - if (pol->flags & MPOL_F_MORON) { + if (pol->flags & MPOL_F_MOPRON) { polnid = thisnid; if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) @@ -2802,7 +2802,7 @@ void __init numa_policy_init(void) preferred_node_policy[nid] = (struct mempolicy) { .refcnt = ATOMIC_INIT(1), .mode = MPOL_PREFERRED, - .flags = MPOL_F_MOF | MPOL_F_MORON, + .flags = MPOL_F_MOF | MPOL_F_MOPRON, .v = { .preferred_node = nid, }, }; } @@ -3010,7 +3010,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; - if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { + if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MOPRON)) { mode = pol->mode; flags = pol->flags; } -- 2.28.0