The global irq_domain_mutex is now held in all paths that update the revmap structures so there is no longer any need for the revmap mutex. Tested-by: Hsin-Yi Wang <hsinyi@xxxxxxxxxxxx> Tested-by: Mark-PK Tsai <mark-pk.tsai@xxxxxxxxxxxx> Signed-off-by: Johan Hovold <johan+linaro@xxxxxxxxxx> --- include/linux/irqdomain.h | 2 -- kernel/irq/irqdomain.c | 13 ++++++------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 5b2718e1c6de..7fd3939328c2 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -143,7 +143,6 @@ struct irq_domain_chip_generic; * Revmap data, used internally by the irq domain code: * @revmap_size: Size of the linear map table @revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map - * @revmap_mutex: Lock for the revmap * @revmap: Linear table of irq_data pointers */ struct irq_domain { @@ -171,7 +170,6 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct mutex revmap_mutex; struct irq_data __rcu *revmap[]; }; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 23f5919e58b7..248e6acfafbe 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -214,7 +214,6 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); - mutex_init(&domain->revmap_mutex); domain->ops = ops; domain->host_data = host_data; domain->hwirq_max = hwirq_max; @@ -501,30 +500,30 @@ static bool irq_domain_is_nomap(struct irq_domain *domain) static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(domain)) return; - mutex_lock(&domain->revmap_mutex); if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], NULL); else radix_tree_delete(&domain->revmap_tree, hwirq); - mutex_unlock(&domain->revmap_mutex); } static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(domain)) return; - mutex_lock(&domain->revmap_mutex); if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], irq_data); else radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); - mutex_unlock(&domain->revmap_mutex); } static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) @@ -1511,11 +1510,12 @@ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(d->domain)) return; /* Fix up the revmap. */ - mutex_lock(&d->domain->revmap_mutex); if (d->hwirq < d->domain->revmap_size) { /* Not using radix tree */ rcu_assign_pointer(d->domain->revmap[d->hwirq], d); @@ -1524,7 +1524,6 @@ static void irq_domain_fix_revmap(struct irq_data *d) if (slot) radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); } - mutex_unlock(&d->domain->revmap_mutex); } /** -- 2.38.2