On Mon, Jul 13, 2009 at 04:05:58PM +0300, Gleb Natapov wrote: > On Mon, Jul 13, 2009 at 03:56:40PM +0300, Michael S. Tsirkin wrote: > > On Sun, Jul 12, 2009 at 03:03:53PM +0300, Gleb Natapov wrote: > > > Use RCU locking for mask/ack notifiers lists. > > > > > > Signed-off-by: Gleb Natapov <gleb@xxxxxxxxxx> > > > --- > > > virt/kvm/irq_comm.c | 20 +++++++++++--------- > > > 1 files changed, 11 insertions(+), 9 deletions(-) > > > > > > diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c > > > index 5dde1ef..ba3a115 100644 > > > --- a/virt/kvm/irq_comm.c > > > +++ b/virt/kvm/irq_comm.c > > > @@ -179,18 +179,18 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) > > > break; > > > } > > > } > > > - rcu_read_unlock(); > > > > > > - hlist_for_each_entry(kian, n, &kvm->irq_ack_notifier_list, link) > > > + hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, link) > > > if (kian->gsi == gsi) > > > kian->irq_acked(kian); > > > + rcu_read_unlock(); > > > } > > > > > > void kvm_register_irq_ack_notifier(struct kvm *kvm, > > > struct kvm_irq_ack_notifier *kian) > > > { > > > mutex_lock(&kvm->irq_lock); > > > - hlist_add_head(&kian->link, &kvm->irq_ack_notifier_list); > > > + hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); > > > mutex_unlock(&kvm->irq_lock); > > > > I think we need synchronize_rcu here as well. If the user adds a > > notifier, he expects to get notified of irqs immediately > > after the function returns, not after the next rcu grace period. > > > If ack notifier registration races with acknowledgment of the interrupt > it adds notifier for a user already does something terribly wrong. Hmm, good point. > > > } > > > > > > @@ -198,8 +198,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, > > > struct kvm_irq_ack_notifier *kian) > > > { > > > mutex_lock(&kvm->irq_lock); > > > - hlist_del_init(&kian->link); > > > + hlist_del_init_rcu(&kian->link); > > > mutex_unlock(&kvm->irq_lock); > > > + synchronize_rcu(); > > > } > > > > > > int kvm_request_irq_source_id(struct kvm *kvm) > > > @@ -246,7 +247,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, > > > { > > > mutex_lock(&kvm->irq_lock); > > > kimn->irq = irq; > > > - hlist_add_head(&kimn->link, &kvm->mask_notifier_list); > > > + hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); > > > mutex_unlock(&kvm->irq_lock); > > > } > > > > > > @@ -254,8 +255,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, > > > struct kvm_irq_mask_notifier *kimn) > > > { > > > mutex_lock(&kvm->irq_lock); > > > - hlist_del(&kimn->link); > > > + hlist_del_rcu(&kimn->link); > > > mutex_unlock(&kvm->irq_lock); > > > + synchronize_rcu(); > > > } > > > > > > void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) > > > @@ -263,11 +265,11 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) > > > struct kvm_irq_mask_notifier *kimn; > > > struct hlist_node *n; > > > > > > - WARN_ON(!mutex_is_locked(&kvm->irq_lock)); > > > - > > > - hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) > > > + rcu_read_lock(); > > > + hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) > > > if (kimn->irq == irq) > > > kimn->func(kimn, mask); > > > + rcu_read_unlock(); > > > } > > > > > > void kvm_free_irq_routing(struct kvm *kvm) > > > -- > > > 1.6.2.1 > > > > > > -- > > > To unsubscribe from this list: send the line "unsubscribe kvm" in > > > the body of a message to majordomo@xxxxxxxxxxxxxxx > > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > -- > Gleb. > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html