Move the check whether there are available entries to within the spinlock. This allows working with larger amount of VCPUs and reduces premature exits when using a large number of VCPUs. Cc: Avi Kivity <avi@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: Marcelo Tosatti <mtosatti@xxxxxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Signed-off-by: Sasha Levin <levinsasha928@xxxxxxxxx> --- virt/kvm/coalesced_mmio.c | 9 ++++++--- 1 files changed, 6 insertions(+), 3 deletions(-) diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index fc84875..34188db 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -37,7 +37,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, */ ring = dev->kvm->coalesced_mmio_ring; avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; - if (avail < KVM_MAX_VCPUS) { + if (avail == 0) { /* full */ return 0; } @@ -63,11 +63,14 @@ static int coalesced_mmio_write(struct kvm_io_device *this, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; - if (!coalesced_mmio_in_range(dev, addr, len)) - return -EOPNOTSUPP; spin_lock(&dev->lock); + if (!coalesced_mmio_in_range(dev, addr, len)) { + spin_unlock(&dev->lock); + return -EOPNOTSUPP; + } + /* copy data in first free entry of the ring */ ring->coalesced_mmio[ring->last].phys_addr = addr; -- 1.7.6 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html