Refactor mark_page_dirty_in_slot() to bail early if the memory slot isn't existing or dirty page tracking is disabled on it. It's the preparatory work for the forth coming fixes. No functional change intended. Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx> --- virt/kvm/kvm_main.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9c60384b5ae0..90f538433916 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3317,6 +3317,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, gfn_t gfn) { struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); + unsigned long rel_gfn; + u32 slot; #ifdef CONFIG_HAVE_KVM_DIRTY_RING if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) @@ -3325,15 +3327,16 @@ void mark_page_dirty_in_slot(struct kvm *kvm, WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); #endif - if (memslot && kvm_slot_dirty_track_enabled(memslot)) { - unsigned long rel_gfn = gfn - memslot->base_gfn; - u32 slot = (memslot->as_id << 16) | memslot->id; + if (!memslot || !kvm_slot_dirty_track_enabled(memslot)) + return; - if (kvm->dirty_ring_size && vcpu) - kvm_dirty_ring_push(vcpu, slot, rel_gfn); - else if (memslot->dirty_bitmap) - set_bit_le(rel_gfn, memslot->dirty_bitmap); - } + rel_gfn = gfn - memslot->base_gfn; + slot = (memslot->as_id << 16) | memslot->id; + + if (kvm->dirty_ring_size && vcpu) + kvm_dirty_ring_push(vcpu, slot, rel_gfn); + else if (memslot->dirty_bitmap) + set_bit_le(rel_gfn, memslot->dirty_bitmap); } EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); -- 2.23.0