On 11/08/2016 04:45 AM, Paolo Bonzini wrote: > On 07/11/2016 19:28, Alex Williamson wrote: >>>> Can the reference become invalid? >>> >>> No, this is guaranteed by virt/kvm/vfio.c + the udata.lock mutex (which >>> probably should be renamed...). >> >> The caller gets a reference to kvm, but there's no guarantee that the >> association of that kvm reference to the group stays valid. Once we're >> outside of that mutex, we might as well consider that kvm:group >> association stale. >> >>>> The caller may still hold >>>> a kvm references, but couldn't the group be detached from one kvm >>>> instance and re-attached to another? >>> >>> Can this be handled by the vendor driver? Does it get a callback when >>> it's detached from a KVM instance? >> >> The only release callback through vfio is when the user closes the >> device, the code in this series is the full extent of vfio awareness of >> kvm. Thanks, > > Maybe there should be an mdev callback at the point of association and > deassociation between VFIO and KVM. Then the vendor driver can just use > the same mutex for association, deassociation and usage. I'm not even > sure that these patches are necessary once you have that callback. Hi Alex & Paolo, So I cooked another draft version of this, there is no kvm pointer saved in vfio_group in this version, and notifier will be called on attach/detach, please kindly have a look :-) -- Thanks, Jike diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index ed2361e4..20b5da9 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -34,6 +34,7 @@ #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/wait.h> +#include <linux/kvm_host.h> #define DRIVER_VERSION "0.3" #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@xxxxxxxxxx>" @@ -86,6 +87,10 @@ struct vfio_group { struct mutex unbound_lock; atomic_t opened; bool noiommu; + struct { + struct mutex lock; + struct blocking_notifier_head notifier; + } udata; }; struct vfio_device { @@ -333,6 +338,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) mutex_init(&group->device_lock); INIT_LIST_HEAD(&group->unbound_list); mutex_init(&group->unbound_lock); + mutex_init(&group->udata.lock); atomic_set(&group->container_users, 0); atomic_set(&group->opened, 0); group->iommu_group = iommu_group; @@ -414,10 +420,11 @@ static void vfio_group_release(struct kref *kref) iommu_group_put(iommu_group); } -static void vfio_group_put(struct vfio_group *group) +void vfio_group_put(struct vfio_group *group) { kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } +EXPORT_SYMBOL_GPL(vfio_group_put); /* Assume group_lock or group reference is held */ static void vfio_group_get(struct vfio_group *group) @@ -480,7 +487,7 @@ static struct vfio_group *vfio_group_get_from_minor(int minor) return group; } -static struct vfio_group *vfio_group_get_from_dev(struct device *dev) +struct vfio_group *vfio_group_get_from_dev(struct device *dev) { struct iommu_group *iommu_group; struct vfio_group *group; @@ -494,6 +501,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev) return group; } +EXPORT_SYMBOL_GPL(vfio_group_get_from_dev); /** * Device objects - create, release, get, put, search @@ -1745,6 +1753,44 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) } EXPORT_SYMBOL_GPL(vfio_external_check_extension); +int vfio_group_register_notifier(struct vfio_group *group, struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&group->udata.notifier, nb); +} +EXPORT_SYMBOL_GPL(vfio_group_register_notifier); + +int vfio_group_unregister_notifier(struct vfio_group *group, struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&group->udata.notifier, nb); +} +EXPORT_SYMBOL_GPL(vfio_group_unregister_notifier); + +void vfio_group_attach_kvm(struct vfio_group *group, struct kvm *kvm, + void (*fn)(struct kvm *)) +{ + mutex_lock(&group->udata.lock); + + fn(kvm); + blocking_notifier_call_chain(&group->udata.notifier, + VFIO_GROUP_NOTIFY_ATTACH_KVM, kvm); + + mutex_unlock(&group->udata.lock); +} +EXPORT_SYMBOL_GPL(vfio_group_attach_kvm); + +void vfio_group_detach_kvm(struct vfio_group *group, struct kvm *kvm, + void (*fn)(struct kvm *)) +{ + mutex_lock(&group->udata.lock); + + blocking_notifier_call_chain(&group->udata.notifier, + VFIO_GROUP_NOTIFY_DETACH_KVM, kvm); + fn(kvm); + + mutex_unlock(&group->udata.lock); +} +EXPORT_SYMBOL_GPL(vfio_group_detach_kvm); + /** * Sub-module support */ diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 87c9afe..4819a45 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -102,6 +102,21 @@ extern void vfio_unregister_iommu_driver( extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); +extern struct vfio_group *vfio_group_get_from_dev(struct device *dev); +extern void vfio_group_put(struct vfio_group *group); + +#define VFIO_GROUP_NOTIFY_ATTACH_KVM 1 +#define VFIO_GROUP_NOTIFY_DETACH_KVM 2 +struct kvm; +extern void vfio_group_attach_kvm(struct vfio_group *group, struct kvm *kvm, + void (*fn)(struct kvm *)); +extern void vfio_group_detach_kvm(struct vfio_group *group, struct kvm *kvm, + void (*fn)(struct kvm *)); +extern int vfio_group_register_notifier(struct vfio_group *group, + struct notifier_block *nb); +extern int vfio_group_unregister_notifier(struct vfio_group *group, + struct notifier_block *nb); + /* * Sub-module helpers */ diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 1dd087d..d889b56 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -60,6 +60,32 @@ static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) symbol_put(vfio_group_put_external_user); } +static void kvm_vfio_group_attach_kvm(struct vfio_group *group, struct kvm_device *dev) +{ + void (*fn)(struct vfio_group *, struct kvm *, void (*fn)(struct kvm *)); + + fn = symbol_get(vfio_group_attach_kvm); + if (!fn) + return; + + fn(group, dev->kvm, kvm_get_kvm); + + symbol_put(vfio_group_attach_kvm); +} + +static void kvm_vfio_group_detach_kvm(struct vfio_group *group, struct kvm *kvm) +{ + void (*fn)(struct vfio_group *, struct kvm *, void (*fn)(struct kvm *)); + + fn = symbol_get(vfio_group_detach_kvm); + if (!fn) + return; + + fn(group, kvm, kvm_put_kvm); + + symbol_put(vfio_group_detach_kvm); +} + static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) { long (*fn)(struct vfio_group *, unsigned long); @@ -155,6 +181,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) list_add_tail(&kvg->node, &kv->group_list); kvg->vfio_group = vfio_group; + kvm_vfio_group_attach_kvm(vfio_group, dev); + kvm_arch_start_assignment(dev->kvm); mutex_unlock(&kv->lock); @@ -196,6 +224,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) mutex_unlock(&kv->lock); + kvm_vfio_group_detach_kvm(vfio_group, dev->kvm); + kvm_vfio_group_put_external_user(vfio_group); kvm_vfio_update_coherency(dev); @@ -240,6 +270,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev) struct kvm_vfio_group *kvg, *tmp; list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { + kvm_vfio_group_detach_kvm(kvg->vfio_group, dev->kvm); kvm_vfio_group_put_external_user(kvg->vfio_group); list_del(&kvg->node); kfree(kvg); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html