+ selftests-cgroup-add-test-for-memorylow-corner-cases.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: selftests: cgroup: add test for memory.low corner cases
has been added to the -mm tree.  Its filename is
     selftests-cgroup-add-test-for-memorylow-corner-cases.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/selftests-cgroup-add-test-for-memorylow-corner-cases.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/selftests-cgroup-add-test-for-memorylow-corner-cases.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Roman Gushchin <guro@xxxxxx>
Subject: selftests: cgroup: add test for memory.low corner cases

Add a more complicated test for memory.low hierarchical behavior.  It
creates the following hierarchy:

        A memory.low = 50M
        B memory.low = 50M
        C memory.low = 50M
        D memory.low = 50M memory.(swap.)max = 200M
        E memory.low = 50M
       / \
      F'  F memory.low = 50M
          G memory.low = 50M
          H memory.low = 50M
          I memory.low = 50M
          J memory.low = 50M
          K memory.low = 50M, memory.usage = 50M

First, it creates local memory pressure by charging pagecache to F' and
checks that K's memory.low actually works (usage ~= 50M).  The it sets C's
memory.low to 0 and repeats the test to check that K's memory.low is not
working anymore, despite that memory.low is disabled above the cgroup with
memory pressure (D).

Link: http://lkml.kernel.org/r/20180522133106.24306-1-guro@xxxxxx
Signed-off-by: Roman Gushchin <guro@xxxxxx>
Cc: Johannes Weiner <hannes@xxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx>
Cc: Greg Thelen <gthelen@xxxxxxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Shuah Khan <shuah@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 tools/testing/selftests/cgroup/test_memcontrol.c |  115 +++++++++++++
 1 file changed, 115 insertions(+)

diff -puN tools/testing/selftests/cgroup/test_memcontrol.c~selftests-cgroup-add-test-for-memorylow-corner-cases tools/testing/selftests/cgroup/test_memcontrol.c
--- a/tools/testing/selftests/cgroup/test_memcontrol.c~selftests-cgroup-add-test-for-memorylow-corner-cases
+++ a/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -512,6 +512,120 @@ cleanup:
 	return ret;
 }
 
+static int alloc_pagecache_500M(const char *cgroup, void *arg)
+{
+	int fd = (long)arg;
+
+	return alloc_pagecache(fd, MB(500));
+}
+
+/*
+ * The test creates 10 nested memory cgroups with memory.min set to 50M,
+ * with 50M of pagecache charget to the leaf cgroup.
+ * Then it sets memory.max and memory.swap.max to 200M on the 3rd level,
+ * and creates memory pressure on 5th level.
+ * First it checks that memory.low actually works:
+ * expected usage on 9th level is 50M.
+ * Then it set memory.low on 2nd level to 0 and checks
+ * that memory.low stopped working:
+ * expected usage on 9th level is < 20M.
+ */
+static int test_memcg_low_nested(const char *root)
+{
+	int ret = KSFT_FAIL;
+	char *cgroup[10] = {NULL};
+	char *cgroup2;
+	long usage;
+	int i, fd;
+
+	fd = get_temp_fd();
+	if (fd < 0)
+		goto cleanup;
+
+	for (i = 0; i < ARRAY_SIZE(cgroup); i++) {
+		cgroup[i] = cg_name_indexed(i ? cgroup[i - 1] : root, "cg", i);
+		if (!cgroup[i])
+			goto cleanup;
+
+		if (cg_create(cgroup[i]))
+			goto cleanup;
+
+		if (i < ARRAY_SIZE(cgroup) - 1)
+			if (cg_write(cgroup[i], "cgroup.subtree_control",
+				     "+memory"))
+				goto cleanup;
+
+		if (i == 3) {
+			if (cg_write(cgroup[i], "memory.max", "200M"))
+				goto cleanup;
+
+			if (cg_write(cgroup[i], "memory.swap.max", "0"))
+				goto cleanup;
+		}
+
+		if (cg_write(cgroup[i], "memory.low", "50M"))
+			goto cleanup;
+	}
+
+	cgroup2 = cg_name(cgroup[5], "memcg_pressure");
+	if (!cgroup2)
+		goto cleanup;
+
+	if (cg_create(cgroup2))
+		goto cleanup;
+
+	/* Part 1 */
+	if (cg_run(cgroup[ARRAY_SIZE(cgroup) - 1], alloc_pagecache_50M,
+		   (void *)(long)fd))
+		goto cleanup;
+
+	if (cg_run(cgroup2, alloc_pagecache_500M, (void *)(long)fd))
+		goto cleanup;
+
+	if (!values_close(cg_read_long(cgroup[ARRAY_SIZE(cgroup) - 1],
+				       "memory.current"), MB(50), 3))
+		goto cleanup;
+
+	close(fd);
+	fd = get_temp_fd();
+	if (fd < 0)
+		goto cleanup;
+
+	/* Part 2 */
+	if (cg_write(cgroup[2], "memory.low", "0"))
+		goto cleanup;
+
+	if (cg_run(cgroup[ARRAY_SIZE(cgroup) - 1], alloc_pagecache_50M,
+		   (void *)(long)fd))
+		goto cleanup;
+
+	if (cg_run(cgroup2, alloc_pagecache_500M, (void *)(long)fd))
+		goto cleanup;
+
+	usage = cg_read_long(cgroup[ARRAY_SIZE(cgroup) - 1], "memory.current");
+	if (usage > MB(20))
+		goto cleanup;
+
+	ret = KSFT_PASS;
+
+cleanup:
+	if (cgroup2) {
+		cg_destroy(cgroup2);
+		free(cgroup2);
+	}
+
+	for (i = ARRAY_SIZE(cgroup) - 1; i >= 0; i--) {
+		if (!cgroup[i])
+			continue;
+
+		cg_destroy(cgroup[i]);
+		free(cgroup[i]);
+	}
+
+	close(fd);
+	return ret;
+}
+
 static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
 {
 	size_t size = MB(50);
@@ -781,6 +895,7 @@ struct memcg_test {
 	T(test_memcg_current),
 	T(test_memcg_min),
 	T(test_memcg_low),
+	T(test_memcg_low_nested),
 	T(test_memcg_high),
 	T(test_memcg_max),
 	T(test_memcg_oom_events),
_

Patches currently in -mm which might be from guro@xxxxxx are

mm-rename-page_counters-count-limit-into-usage-max.patch
mm-memorylow-hierarchical-behavior.patch
mm-treat-memorylow-value-inclusive.patch
mm-docs-describe-memorylow-refinements.patch
mm-introduce-memorymin.patch
mm-introduce-memorymin-fix.patch
mm-oom-refactor-the-oom_kill_process-function.patch
mm-implement-mem_cgroup_scan_tasks-for-the-root-memory-cgroup.patch
mm-oom-cgroup-aware-oom-killer.patch
mm-oom-introduce-memoryoom_group.patch
mm-oom-introduce-memoryoom_group-fix.patch
mm-oom-add-cgroup-v2-mount-option-for-cgroup-aware-oom-killer.patch
mm-oom-docs-describe-the-cgroup-aware-oom-killer.patch
mm-oom-docs-describe-the-cgroup-aware-oom-killer-fix.patch
cgroup-list-groupoom-in-cgroup-features.patch
mm-fix-oom_kill-event-handling.patch
mm-propagate-memory-effective-protection-on-setting-memorymin-low.patch
mm-dont-skip-memory-guarantee-calculations.patch
selftests-cgroup-add-test-for-memorylow-corner-cases.patch

--
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux