It's assumed that 1024 host pages, instead of guest pages, are dirtied in each iteration in guest_code(). The current implementation misses the case of mismatched page sizes in host and guest. For example, ARM64 could have 64KB page size in guest, but 4KB page size in host. (TEST_PAGES_PER_LOOP / 16), instead of TEST_PAGES_PER_LOOP, host pages are dirtied in every iteration. Fix the issue by touching all sub-pages when we have mismatched page sizes in host and guest. Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx> --- tools/testing/selftests/kvm/dirty_log_test.c | 50 +++++++++++++++----- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 9c883c94d478..50b02186ce12 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -70,6 +70,7 @@ * that may change. */ static uint64_t host_page_size; +static uint64_t host_num_pages; static uint64_t guest_page_size; static uint64_t guest_num_pages; static uint64_t random_array[TEST_PAGES_PER_LOOP]; @@ -94,8 +95,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; */ static void guest_code(void) { + uint64_t num_pages, page_size, sub_page_size; uint64_t addr; - int i; + int pages_per_loop, i, j; + + /* + * The page sizes on host and VM could be different. We need + * to perform writing on all sub-pages. + */ + if (host_page_size >= guest_page_size) { + num_pages = host_num_pages; + page_size = host_page_size; + sub_page_size = host_page_size; + } else { + num_pages = guest_num_pages; + page_size = guest_page_size; + sub_page_size = host_page_size; + } /* * On s390x, all pages of a 1M segment are initially marked as dirty @@ -103,18 +119,29 @@ static void guest_code(void) * To compensate this specialty in this test, we need to touch all * pages during the first iteration. */ - for (i = 0; i < guest_num_pages; i++) { - addr = guest_test_virt_mem + i * guest_page_size; - *(uint64_t *)addr = READ_ONCE(iteration); + for (i = 0; i < num_pages; i++) { + addr = guest_test_virt_mem + i * page_size; + addr = align_down(addr, page_size); + + for (j = 0; j < page_size / sub_page_size; j++) { + *(uint64_t *)(addr + j * sub_page_size) = + READ_ONCE(iteration); + } } + pages_per_loop = (TEST_PAGES_PER_LOOP * sub_page_size) / page_size; + while (true) { - for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { + for (i = 0; i < pages_per_loop; i++) { addr = guest_test_virt_mem; - addr += (READ_ONCE(random_array[i]) % guest_num_pages) - * guest_page_size; - addr = align_down(addr, host_page_size); - *(uint64_t *)addr = READ_ONCE(iteration); + addr += (READ_ONCE(random_array[i]) % num_pages) + * page_size; + addr = align_down(addr, page_size); + + for (j = 0; j < page_size / sub_page_size; j++) { + *(uint64_t *)(addr + j * sub_page_size) = + READ_ONCE(iteration); + } } /* Tell the host that we need more random numbers */ @@ -713,14 +740,14 @@ static void run_test(enum vm_guest_mode mode, void *arg) 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code); guest_page_size = vm->page_size; + host_page_size = getpagesize(); + /* * A little more than 1G of guest page sized pages. Cover the * case where the size is not aligned to 64 pages. */ guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3; guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); - - host_page_size = getpagesize(); host_num_pages = vm_num_host_pages(mode, guest_num_pages); if (!p->phys_offset) { @@ -760,6 +787,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) sync_global_to_guest(vm, host_page_size); sync_global_to_guest(vm, guest_page_size); sync_global_to_guest(vm, guest_test_virt_mem); + sync_global_to_guest(vm, host_num_pages); sync_global_to_guest(vm, guest_num_pages); /* Start the iterations */ -- 2.23.0 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm