From: "Maciej S. Szmigiero" <maciej.szmigiero@xxxxxxxxxx> Do a quick lookup for possibly overlapping gfns when creating or moving a memslot instead of performing a linear scan of the whole memslot set. Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@xxxxxxxxxx> --- virt/kvm/kvm_main.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5fea467d6fec..78dad8c6376f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1667,6 +1667,30 @@ static int kvm_delete_memslot(struct kvm *kvm, return kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); } +static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, + struct kvm_memory_slot *nslot) +{ + int idx = slots->node_idx; + gfn_t nend = nslot->base_gfn + nslot->npages; + struct rb_node *node; + + kvm_for_each_memslot_in_gfn_range(node, slots, nslot->base_gfn, nend) { + struct kvm_memory_slot *cslot; + gfn_t cend; + + cslot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); + cend = cslot->base_gfn + cslot->npages; + if (cslot->id == nslot->id) + continue; + + /* kvm_for_each_in_gfn_no_more() guarantees that cslot->base_gfn < nend */ + if (cend > nslot->base_gfn) + return true; + } + + return false; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -1752,16 +1776,10 @@ int __kvm_set_memory_region(struct kvm *kvm, } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { - int bkt; - /* Check for overlaps */ - kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) { - if (tmp->id == id) - continue; - if (!((new.base_gfn + new.npages <= tmp->base_gfn) || - (new.base_gfn >= tmp->base_gfn + tmp->npages))) - return -EEXIST; - } + if (kvm_check_memslot_overlap(__kvm_memslots(kvm, as_id), + &new)) + return -EEXIST; } /* Allocate/free page dirty bitmap as needed */