Re: [PATCH v8 11/14] KVM: selftests: Introduce after_vcpu_run hook for dirty log test

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Mar 31, 2020 at 02:59:57PM -0400, Peter Xu wrote:
> Provide a hook for the checks after vcpu_run() completes.  Preparation
> for the dirty ring test because we'll need to take care of another
> exit reason.
> 
> Since at it, drop the pages_count because after all we have a better
> summary right now with statistics, and clean it up a bit.

I don't see what you mean by "drop the pages_count", because it's still
there. But otherwise

Reviewed-by: Andrew Jones <drjones@xxxxxxxxxx>

> 
> Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
> ---
>  tools/testing/selftests/kvm/dirty_log_test.c | 36 +++++++++++++-------
>  1 file changed, 24 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 139ccb550618..a2160946bcf5 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -178,6 +178,15 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
>  	kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
>  }
>  
> +static void default_after_vcpu_run(struct kvm_vm *vm)
> +{
> +	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
> +
> +	TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
> +		    "Invalid guest sync status: exit_reason=%s\n",
> +		    exit_reason_str(run->exit_reason));
> +}
> +
>  struct log_mode {
>  	const char *name;
>  	/* Return true if this mode is supported, otherwise false */
> @@ -187,16 +196,20 @@ struct log_mode {
>  	/* Hook to collect the dirty pages into the bitmap provided */
>  	void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
>  				     void *bitmap, uint32_t num_pages);
> +	/* Hook to call when after each vcpu run */
> +	void (*after_vcpu_run)(struct kvm_vm *vm);
>  } log_modes[LOG_MODE_NUM] = {
>  	{
>  		.name = "dirty-log",
>  		.collect_dirty_pages = dirty_log_collect_dirty_pages,
> +		.after_vcpu_run = default_after_vcpu_run,
>  	},
>  	{
>  		.name = "clear-log",
>  		.supported = clear_log_supported,
>  		.create_vm_done = clear_log_create_vm_done,
>  		.collect_dirty_pages = clear_log_collect_dirty_pages,
> +		.after_vcpu_run = default_after_vcpu_run,
>  	},
>  };
>  
> @@ -247,6 +260,14 @@ static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
>  	mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
>  }
>  
> +static void log_mode_after_vcpu_run(struct kvm_vm *vm)
> +{
> +	struct log_mode *mode = &log_modes[host_log_mode];
> +
> +	if (mode->after_vcpu_run)
> +		mode->after_vcpu_run(vm);
> +}
> +
>  static void generate_random_array(uint64_t *guest_array, uint64_t size)
>  {
>  	uint64_t i;
> @@ -261,25 +282,16 @@ static void *vcpu_worker(void *data)
>  	struct kvm_vm *vm = data;
>  	uint64_t *guest_array;
>  	uint64_t pages_count = 0;
> -	struct kvm_run *run;
> -
> -	run = vcpu_state(vm, VCPU_ID);
>  
>  	guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
> -	generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
>  
>  	while (!READ_ONCE(host_quit)) {
> +		generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
> +		pages_count += TEST_PAGES_PER_LOOP;
>  		/* Let the guest dirty the random pages */
>  		ret = _vcpu_run(vm, VCPU_ID);
>  		TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
> -		if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
> -			pages_count += TEST_PAGES_PER_LOOP;
> -			generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
> -		} else {
> -			TEST_FAIL("Invalid guest sync status: "
> -				  "exit_reason=%s\n",
> -				  exit_reason_str(run->exit_reason));
> -		}
> +		log_mode_after_vcpu_run(vm);
>  	}
>  
>  	pr_info("Dirtied %"PRIu64" pages\n", pages_count);
> -- 
> 2.24.1
> 




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux