Dirty ring feature need call kvm_cpu_synchronize_kick_all to flush hardware buffers into KVMslots, but when aucoverge run kvm_cpu_synchronize_kick_all calling will become more and more time consuming. This will significantly reduce the efficiency of dirty page queries, especially when memory pressure is high and the speed limit is high. When the CPU speed limit is high and kvm_cpu_synchronize_kick_all is time-consuming, the rate of dirty pages generated by the VM will also be significantly reduced, so it is not necessary to call kvm_cpu_synchronize_kick_all at this time, just call it once before stopping the VM. This will significantly improve the efficiency of dirty page queries under high pressure. Signed-off-by: Chongyun Wu <wucy11@xxxxxxxxxxxxxxx> --- accel/kvm/kvm-all.c | 23 +++-------------------- include/sysemu/cpus.h | 1 + migration/migration.c | 12 ++++++++++++ softmmu/cpus.c | 18 ++++++++++++++++++ 4 files changed, 34 insertions(+), 20 deletions(-) diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 51012f4..64a211b 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -839,25 +839,6 @@ static uint64_t kvm_dirty_ring_reap(KVMState *s) return total; } -static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg) -{ - /* No need to do anything */ -} - -/* - * Kick all vcpus out in a synchronized way. When returned, we - * guarantee that every vcpu has been kicked and at least returned to - * userspace once. - */ -static void kvm_cpu_synchronize_kick_all(void) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); - } -} - /* * Flush all the existing dirty pages to the KVM slot buffers. When * this call returns, we guarantee that all the touched dirty pages @@ -879,7 +860,9 @@ static void kvm_dirty_ring_flush(void) * First make sure to flush the hardware buffers by kicking all * vcpus out in a synchronous way. */ - kvm_cpu_synchronize_kick_all(); + if (!cpu_throttle_get_percentage()) { + qemu_kvm_cpu_synchronize_kick_all(); + } kvm_dirty_ring_reap(kvm_state); trace_kvm_dirty_ring_flush(1); } diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h index 868f119..3225b27 100644 --- a/include/sysemu/cpus.h +++ b/include/sysemu/cpus.h @@ -56,5 +56,6 @@ extern int smp_threads; #endif void list_cpus(const char *optarg); +void qemu_kvm_cpu_synchronize_kick_all(void); #endif diff --git a/migration/migration.c b/migration/migration.c index bcc385b..1114b2f 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -61,6 +61,8 @@ #include "sysemu/cpus.h" #include "yank_functions.h" #include "sysemu/qtest.h" +#include "sysemu/kvm.h" +#include "sysemu/cpus.h" #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ @@ -3177,6 +3179,16 @@ static void migration_completion(MigrationState *s) if (!ret) { bool inactivate = !migrate_colo_enabled(); + /* + * Before stop vm do qemu_kvm_cpu_synchronize_kick_all to + * fulsh hardware buffer into KVMslots for dirty ring + * optmiaztion, If qemu_kvm_cpu_synchronize_kick_all is not + * called when the CPU speed is limited to improve efficiency + */ + if (kvm_dirty_ring_enabled() + && cpu_throttle_get_percentage()) { + qemu_kvm_cpu_synchronize_kick_all(); + } ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); trace_migration_completion_vm_stop(ret); if (ret >= 0) { diff --git a/softmmu/cpus.c b/softmmu/cpus.c index 035395a..505ed3e 100644 --- a/softmmu/cpus.c +++ b/softmmu/cpus.c @@ -807,3 +807,21 @@ void qmp_inject_nmi(Error **errp) nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp); } +static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg) +{ + /* No need to do anything */ +} + +/* + * Kick all vcpus out in a synchronized way. When returned, we + * guarantee that every vcpu has been kicked and at least returned to + * userspace once. + */ +void qemu_kvm_cpu_synchronize_kick_all(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); + } +} -- 1.8.3.1 -- Best Regard, Chongyun Wu