On Mon, Aug 10, 2020 at 10:13 PM Martin KaFai Lau <kafai@xxxxxx> wrote: > > On Mon, Aug 10, 2020 at 06:31:39PM +0300, Yauheni Kaliuta wrote: > > The idea of adv_mmap tests is to map/unmap pages in arbitrary > > order. It works fine as soon as the kernel allocates first 3 pages > > for from a region with unallocated page after that. If it's not the > > case, the last remapping of 4 pages with MAP_FIXED will remap the > > page to bpf map which will break the code which worked with the data > > located there before. > > > > Change the test to map first the whole bpf map, 4 pages, and then > > manipulate the mappings. > > > > Signed-off-by: Yauheni Kaliuta <yauheni.kaliuta@xxxxxxxxxx> > > --- > > tools/testing/selftests/bpf/prog_tests/mmap.c | 23 ++++++++++++------- > > 1 file changed, 15 insertions(+), 8 deletions(-) > > > > diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c > > index 43d0b5578f46..5768af1e16a7 100644 > > --- a/tools/testing/selftests/bpf/prog_tests/mmap.c > > +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c > > @@ -183,38 +183,45 @@ void test_mmap(void) > > > > /* check some more advanced mmap() manipulations */ > > > > - /* map all but last page: pages 1-3 mapped */ > > - tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, > > + /* map all 4 pages */ > > + tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, > > data_map_fd, 0); > > if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) > > goto cleanup; > > > > - /* unmap second page: pages 1, 3 mapped */ > > + /* unmap second page: pages 1, 3, 4 mapped */ > > err = munmap(tmp1 + page_size, page_size); > > if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { > > munmap(tmp1, map_sz); > > goto cleanup; > > } > > > > + /* unmap forth page: pages 1, 3 mapped */ > > + err = munmap(tmp1 + (3 * page_size), page_size); > > + if (CHECK(err, "adv_mmap3", "errno %d\n", errno)) { > > + munmap(tmp1, map_sz); > 1, 3, and 4 are mapped here but only one munmap() with "map_sz" is used. Actually works for me: 7ffff7dd4000-7ffff7dfd000 r-xp 00000000 fd:05 3147480 /usr/lib64/ld-2.28.so 7ffff7fe3000-7ffff7fe4000 r--s 00000000 00:0e 10298 anon_inode:bpf-map 7ffff7fe5000-7ffff7fe7000 r--s 00002000 00:0e 10298 anon_inode:bpf-map 7ffff7fe7000-7ffff7fee000 rw-p 00000000 00:00 0 7ffff7ff1000-7ffff7ff5000 r--s 00000000 00:0e 10298 anon_inode:bpf-map After such unmap: 7ffff7dd4000-7ffff7dfd000 r-xp 00000000 fd:05 3147480 /usr/lib64/ld-2.28.so 7ffff7fe7000-7ffff7fee000 rw-p 00000000 00:00 0 7ffff7ff1000-7ffff7ff5000 r--s 00000000 00:0e 10298 anon_inode:bpf-map > > > + goto cleanup; > > + } > > + > > /* map page 2 back */ > > tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, > > MAP_SHARED | MAP_FIXED, data_map_fd, 0); > > - if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { > > + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap4", "errno %d\n", errno)) { > > munmap(tmp1, page_size); > > munmap(tmp1 + 2*page_size, page_size); > 1 and 3 are mapped here. However, multiple munmap() are used. > > Both will work the same? For the case when we do not care about the already unmapped page. But may be I should unify and do it the same how it was done before. Thanks! > > > goto cleanup; > > } > > - CHECK(tmp1 + page_size != tmp2, "adv_mmap4", > > + CHECK(tmp1 + page_size != tmp2, "adv_mmap5", > > "tmp1: %p, tmp2: %p\n", tmp1, tmp2); > > > > /* re-map all 4 pages */ > > tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, > > data_map_fd, 0); > > - if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { > > + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap6", "errno %d\n", errno)) { > > munmap(tmp1, 3 * page_size); /* unmap page 1 */ > > goto cleanup; > > } > > - CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); > > + CHECK(tmp1 != tmp2, "adv_mmap7", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); > > > > map_data = tmp2; > > CHECK_FAIL(bss_data->in_val != 321); > > @@ -231,7 +238,7 @@ void test_mmap(void) > > /* map all 4 pages, but with pg_off=1 page, should fail */ > > tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, > > data_map_fd, page_size /* initial page shift */); > > - if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) { > > + if (CHECK(tmp1 != MAP_FAILED, "adv_mmap8", "unexpected success")) { > > munmap(tmp1, 4 * page_size); > > goto cleanup; > > } > > -- > > 2.26.2 > > > -- WBR, Yauheni