We could use the irq_desc::tot_count member to avoid the summation loop for interrupts which are not marked as 'PER_CPU' interrupts in 'show_interrupts'. This could reduce the time overhead of reading /proc/interrupts. Originally-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Bitao Hu <yaoma@xxxxxxxxxxxxxxxxx> --- include/linux/irqdesc.h | 2 ++ kernel/irq/irqdesc.c | 2 +- kernel/irq/proc.c | 9 +++++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 2912b1998670..1ee96d7232b4 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -121,6 +121,8 @@ static inline void irq_unlock_sparse(void) { } extern struct irq_desc irq_desc[NR_IRQS]; #endif +extern bool irq_is_nmi(struct irq_desc *desc); + static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, unsigned int cpu) { diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9cd17080b2d8..56a767957a9d 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -955,7 +955,7 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } -static bool irq_is_nmi(struct irq_desc *desc) +bool irq_is_nmi(struct irq_desc *desc) { return desc->istate & IRQS_NMI; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6954e0a02047..b3b1b93f0410 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -489,8 +489,13 @@ int show_interrupts(struct seq_file *p, void *v) goto outsparse; if (desc->kstat_irqs) { - for_each_online_cpu(j) - any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j)); + if (!irq_settings_is_per_cpu_devid(desc) && + !irq_settings_is_per_cpu(desc) && + !irq_is_nmi(desc)) + any_count = data_race(desc->tot_count); + else + for_each_online_cpu(j) + any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j)); } if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) -- 2.37.1 (Apple Git-137.1)