Make kvm_arch_sync_dirty_log return a value, which is needed to propagate errors that could happen when getting dirty pages via a page tracking device. Signed-off-by: Lilit Janpoladyan <lilitj@xxxxxxxxxx> --- arch/loongarch/kvm/mmu.c | 3 ++- arch/mips/kvm/mips.c | 4 ++-- arch/powerpc/kvm/book3s.c | 4 ++-- arch/powerpc/kvm/booke.c | 4 ++-- arch/riscv/kvm/mmu.c | 3 ++- arch/s390/kvm/kvm-s390.c | 3 ++- arch/x86/kvm/x86.c | 11 ++++++----- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 15 ++++++++++++--- 9 files changed, 31 insertions(+), 18 deletions(-) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 28681dfb4b85..825c60d35529 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -943,8 +943,9 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) return 0; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { + return 0; } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index edc6f473af4e..4326b8c721e9 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -984,9 +984,9 @@ int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot return 0; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { - + return 0; } int kvm_arch_flush_remote_tlbs(struct kvm *kvm) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 4c4a3ecc301c..aab6f5c62aee 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -854,9 +854,9 @@ int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot return 0; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { - + return 0; } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index f263ebc8fa49..60629a320222 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1824,9 +1824,9 @@ int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot return -EOPNOTSUPP; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { - + return -EOPNOTSUPP; } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index b63650f9b966..53ad23432b31 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -402,8 +402,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gstage_wp_range(kvm, start, end); } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { + return 0; } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d6a8f7dbc644..5f1bb4bd4121 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -677,7 +677,7 @@ int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot return 0; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { int i; gfn_t cur_gfn, last_gfn; @@ -705,6 +705,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) return; cond_resched(); } + return 0; } /* Section: vm related */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1be8bacfe2bd..e95e070c9bf3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6498,7 +6498,7 @@ int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot return 0; } -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { /* @@ -6510,11 +6510,12 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) struct kvm_vcpu *vcpu; unsigned long i; - if (!kvm_x86_ops.cpu_dirty_log_size) - return; + if (kvm_x86_ops.cpu_dirty_log_size) { + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); + } - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_kick(vcpu); + return 0; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ae905f54ec47..245b4172a7fb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1477,7 +1477,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, unsigned long mask); int kvm_arch_enable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot); int kvm_arch_disable_dirty_logging(struct kvm *kvm, const struct kvm_memory_slot *memslot); -void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); +int kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1fd5e234c188..d55d92f599b0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2145,6 +2145,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int i, as_id, id; unsigned long n; unsigned long any = 0; + int r; /* Dirty ring tracking may be exclusive to dirty log tracking */ if (!kvm_use_dirty_bitmap(kvm)) @@ -2163,7 +2164,9 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, if (!(*memslot) || !(*memslot)->dirty_bitmap) return -ENOENT; - kvm_arch_sync_dirty_log(kvm, *memslot); + r = kvm_arch_sync_dirty_log(kvm, *memslot); + if (r) + return r; n = kvm_dirty_bitmap_bytes(*memslot); @@ -2210,6 +2213,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool flush; + int r; /* Dirty ring tracking may be exclusive to dirty log tracking */ if (!kvm_use_dirty_bitmap(kvm)) @@ -2227,7 +2231,9 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) dirty_bitmap = memslot->dirty_bitmap; - kvm_arch_sync_dirty_log(kvm, memslot); + r = kvm_arch_sync_dirty_log(kvm, memslot); + if (r) + return r; n = kvm_dirty_bitmap_bytes(memslot); flush = false; @@ -2322,6 +2328,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool flush; + int r; /* Dirty ring tracking may be exclusive to dirty log tracking */ if (!kvm_use_dirty_bitmap(kvm)) @@ -2349,7 +2356,9 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) return -EINVAL; - kvm_arch_sync_dirty_log(kvm, memslot); + r = kvm_arch_sync_dirty_log(kvm, memslot); + if (r) + return r; flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); -- 2.40.1