Re: [PATCH v3] MMIO: Make coalesced mmio use a device per zone

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Jul 19, 2011 at 04:00:07PM +0300, Sasha Levin wrote:
> This patch changes coalesced mmio to create one mmio device per
> zone instead of handling all zones in one device.
> 
> Doing so enables us to take advantage of existing locking and prevents
> a race condition between coalesced mmio registration/unregistration
> and lookups.
> 
> Cc: Avi Kivity <avi@xxxxxxxxxx>
> Cc: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
> Suggested-by: Avi Kivity <avi@xxxxxxxxxx>
> Signed-off-by: Sasha Levin <levinsasha928@xxxxxxxxx>
> ---
>  include/linux/kvm_host.h  |    7 ++-
>  virt/kvm/coalesced_mmio.c |  119 ++++++++++++++++++---------------------------
>  virt/kvm/coalesced_mmio.h |    7 +--
>  3 files changed, 55 insertions(+), 78 deletions(-)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index eabb21a..4766178 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -63,7 +63,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
>   */
>  struct kvm_io_bus {
>  	int                   dev_count;
> -#define NR_IOBUS_DEVS 200
> +#define NR_IOBUS_DEVS 300
>  	struct kvm_io_device *devs[NR_IOBUS_DEVS];
>  };
>  
> @@ -256,8 +256,11 @@ struct kvm {
>  	struct kvm_arch arch;
>  	atomic_t users_count;
>  #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
> -	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
>  	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
> +	struct {
> +		spinlock_t lock;
> +		struct list_head items;
> +	} coalesced_zones;

This suggests the lock protects "items", but this is not the case.
Better name the lock "ring_lock" and put it under struct kvm.

>  {
>  	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
>  
> +	list_del(&dev->list);
> +
>  	kfree(dev);
>  }
>  
> @@ -105,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
>  
>  int kvm_coalesced_mmio_init(struct kvm *kvm)
>  {
> -	struct kvm_coalesced_mmio_dev *dev;
>  	struct page *page;
>  	int ret;
>  
> @@ -113,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
>  	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
>  	if (!page)
>  		goto out_err;
> -	kvm->coalesced_mmio_ring = page_address(page);
> -
> -	ret = -ENOMEM;
> -	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
> -	if (!dev)
> -		goto out_free_page;
> -	spin_lock_init(&dev->lock);
> -	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
> -	dev->kvm = kvm;
> -	kvm->coalesced_mmio_dev = dev;
>  
> -	mutex_lock(&kvm->slots_lock);
> -	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
> -	mutex_unlock(&kvm->slots_lock);
> -	if (ret < 0)
> -		goto out_free_dev;
> +	ret = 0;
> +	kvm->coalesced_mmio_ring = page_address(page);
>  
> -	return ret;
> +	/*
> +	 * We're using this spinlock to sync access to the coalesced ring.
> +	 * The list doesn't need it's own lock since device registration and
> +	 * unregistration should only happen when kvm->slots_lock is held.
> +	 */
> +	spin_lock_init(&kvm->coalesced_zones.lock);
> +	INIT_LIST_HEAD(&kvm->coalesced_zones.items);
>  
> -out_free_dev:
> -	kvm->coalesced_mmio_dev = NULL;
> -	kfree(dev);
> -out_free_page:
> -	kvm->coalesced_mmio_ring = NULL;
> -	__free_page(page);
>  out_err:
>  	return ret;
>  }
> @@ -151,51 +129,48 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
>  int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
>  					 struct kvm_coalesced_mmio_zone *zone)
>  {
> -	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
> +	int ret;
> +	struct kvm_coalesced_mmio_dev *dev;
>  
> -	if (dev == NULL)
> -		return -ENXIO;
> +	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
> +	if (!dev)
> +		return -ENOMEM;
> +
> +	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
> +	dev->kvm = kvm;
> +	dev->zone = *zone;
>  
>  	mutex_lock(&kvm->slots_lock);
> -	if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
> -		mutex_unlock(&kvm->slots_lock);
> -		return -ENOBUFS;
> -	}
> +	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
> +	mutex_unlock(&kvm->slots_lock);
> +	if (ret < 0)
> +		goto out_free_dev;
>  
> -	dev->zone[dev->nb_zones] = *zone;
> -	dev->nb_zones++;
> +	list_add_tail(&dev->list, &kvm->coalesced_zones.items);

Must hold slots_lock.

> +
> +	return ret;
> +
> +out_free_dev:
> +	kfree(dev);
> +
> +	if (dev == NULL)
> +		return -ENXIO;
>  
> -	mutex_unlock(&kvm->slots_lock);
>  	return 0;
>  }
>  
>  int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
>  					   struct kvm_coalesced_mmio_zone *zone)
>  {
> -	int i;
> -	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
> -	struct kvm_coalesced_mmio_zone *z;
> -
> -	if (dev == NULL)
> -		return -ENXIO;
> +	struct kvm_coalesced_mmio_dev *dev;
>  
>  	mutex_lock(&kvm->slots_lock);
>  
> -	i = dev->nb_zones;
> -	while (i) {
> -		z = &dev->zone[i - 1];
> -
> -		/* unregister all zones
> -		 * included in (zone->addr, zone->size)
> -		 */
> -
> -		if (zone->addr <= z->addr &&
> -		    z->addr + z->size <= zone->addr + zone->size) {
> -			dev->nb_zones--;
> -			*z = dev->zone[dev->nb_zones];
> +	list_for_each_entry(dev, &kvm->coalesced_zones.items, list)

_safe.

> +		if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
> +			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
> +			kvm_iodevice_destructor(&dev->dev);
>  		}
> -		i--;
> -	}
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux