On Mon, Jul 13, 2009 at 05:37:50PM +0300, Gleb Natapov wrote: > On Mon, Jul 13, 2009 at 05:23:20PM +0300, Michael S. Tsirkin wrote: > > On Mon, Jul 13, 2009 at 02:48:44PM +0300, Gleb Natapov wrote: > > > On Mon, Jul 13, 2009 at 02:45:51PM +0300, Michael S. Tsirkin wrote: > > > > On Mon, Jul 13, 2009 at 12:12:33PM +0300, Gleb Natapov wrote: > > > > > > > > > > Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx> > > > > > > > > This one is probably better off left as is, > > > What do you mean "as is"? > > > > This is a slow operation. It seems that we could use irq_lock or switch > > to slot lock or kvm lock here. Why do we need another one? > > > irq_lock is completely removed. So either we don't remove it and use it > here (and we don't need mutex so we change it to spinlock too), or we add > another lock with the name that actually tell us what its purpose. I prefer > second option. I am not sure you can use kvm lock without deadlock, and > slot lock? How this connected to slots management?! slots_lock is just a bad name now. See slots_lock is taken for read on every exit. So taking slots_lock for write means all guests are stopped in a known synchronization state. If the write side of the operation is very unfrequent (such as registration of irq ack/mask notifiers), down_write(slots_lock) works as a simpler replacement for RCU. We want to get rid of slots_lock on every exit at some point, though. > And this is not about speed of the operation. It is about making reader > lockless. > > > > > with RCU in place list modifications are slow anyway. > > > > > > > > > --- > > > > > include/linux/kvm_host.h | 1 + > > > > > virt/kvm/irq_comm.c | 16 ++++++++-------- > > > > > virt/kvm/kvm_main.c | 1 + > > > > > 3 files changed, 10 insertions(+), 8 deletions(-) > > > > > > > > > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > > > > index 3f2a4fc..12f4ee2 100644 > > > > > --- a/include/linux/kvm_host.h > > > > > +++ b/include/linux/kvm_host.h > > > > > @@ -165,6 +165,7 @@ struct kvm { > > > > > spinlock_t irq_routing_lock; > > > > > struct hlist_head mask_notifier_list; > > > > > struct hlist_head irq_ack_notifier_list; > > > > > + spinlock_t irq_notifier_list_lock; > > > > > #endif > > > > > > > > > > #ifdef KVM_ARCH_WANT_MMU_NOTIFIER > > > > > diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c > > > > > index 3dbba41..59c1cde 100644 > > > > > --- a/virt/kvm/irq_comm.c > > > > > +++ b/virt/kvm/irq_comm.c > > > > > @@ -191,17 +191,17 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) > > > > > void kvm_register_irq_ack_notifier(struct kvm *kvm, > > > > > struct kvm_irq_ack_notifier *kian) > > > > > { > > > > > - mutex_lock(&kvm->irq_lock); > > > > > + spin_lock(&kvm->irq_notifier_list_lock); > > > > > hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); > > > > > - mutex_unlock(&kvm->irq_lock); > > > > > + spin_unlock(&kvm->irq_notifier_list_lock); > > > > > } > > > > > > > > > > void kvm_unregister_irq_ack_notifier(struct kvm *kvm, > > > > > struct kvm_irq_ack_notifier *kian) > > > > > { > > > > > - mutex_lock(&kvm->irq_lock); > > > > > + spin_lock(&kvm->irq_notifier_list_lock); > > > > > hlist_del_init_rcu(&kian->link); > > > > > - mutex_unlock(&kvm->irq_lock); > > > > > + spin_unlock(&kvm->irq_notifier_list_lock); > > > > > synchronize_rcu(); > > > > > } > > > > > > > > > > @@ -247,18 +247,18 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) > > > > > void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, > > > > > struct kvm_irq_mask_notifier *kimn) > > > > > { > > > > > - mutex_lock(&kvm->irq_lock); > > > > > + spin_lock(&kvm->irq_notifier_list_lock); > > > > > kimn->irq = irq; > > > > > hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); > > > > > - mutex_unlock(&kvm->irq_lock); > > > > > + spin_unlock(&kvm->irq_notifier_list_lock); > > > > > } > > > > > > > > > > void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, > > > > > struct kvm_irq_mask_notifier *kimn) > > > > > { > > > > > - mutex_lock(&kvm->irq_lock); > > > > > + spin_lock(&kvm->irq_notifier_list_lock); > > > > > hlist_del_rcu(&kimn->link); > > > > > - mutex_unlock(&kvm->irq_lock); > > > > > + spin_unlock(&kvm->irq_notifier_list_lock); > > > > > synchronize_rcu(); > > > > > } > > > > > > > > > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > > > > > index 4e31634..018bde8 100644 > > > > > --- a/virt/kvm/kvm_main.c > > > > > +++ b/virt/kvm/kvm_main.c > > > > > @@ -948,6 +948,7 @@ static struct kvm *kvm_create_vm(void) > > > > > spin_lock_init(&kvm->irq_routing_lock); > > > > > INIT_HLIST_HEAD(&kvm->mask_notifier_list); > > > > > INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); > > > > > + spin_lock_init(&kvm->irq_notifier_list_lock); > > > > > #endif > > > > > > > > > > #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET > > > > > -- > > > > > 1.6.2.1 > > > > > > > > > > -- > > > > > To unsubscribe from this list: send the line "unsubscribe kvm" in > > > > > the body of a message to majordomo@xxxxxxxxxxxxxxx > > > > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > > > > > -- > > > Gleb. > > > -- > > > To unsubscribe from this list: send the line "unsubscribe kvm" in > > > the body of a message to majordomo@xxxxxxxxxxxxxxx > > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > -- > Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html