Robert, You've applied the fix only to the generic X86 interrupt handler which is currently used by AMD64 processors. It seems to me that this "in-flight interrupt after disable" problem may also happen on Intel and should therefore also be added to intel_pmu_handle_irq(). Don't you think so? On Fri, Sep 24, 2010 at 12:41 PM, tip-bot for Robert Richter <robert.richter@xxxxxxx> wrote: > > Commit-ID: Â63e6be6d98e1a2bcdca86872b67052e51ab6afa1 > Gitweb:   http://git.kernel.org/tip/63e6be6d98e1a2bcdca86872b67052e51ab6afa1 > Author:   Robert Richter <robert.richter@xxxxxxx> > AuthorDate: Wed, 15 Sep 2010 18:20:34 +0200 > Committer: ÂIngo Molnar <mingo@xxxxxxx> > CommitDate: Fri, 24 Sep 2010 12:21:41 +0200 > > perf, x86: Catch spurious interrupts after disabling counters > > Some cpus still deliver spurious interrupts after disabling a > counter. This caused 'undelivered NMI' messages. This patch > fixes this. Introduced by: > > Â4177c42: perf, x86: Try to handle unknown nmis with an enabled PMU > > Reported-by: Ingo Molnar <mingo@xxxxxxx> > Signed-off-by: Robert Richter <robert.richter@xxxxxxx> > Cc: Don Zickus <dzickus@xxxxxxxxxx> > Cc: gorcunov@xxxxxxxxx <gorcunov@xxxxxxxxx> > Cc: fweisbec@xxxxxxxxx <fweisbec@xxxxxxxxx> > Cc: ying.huang@xxxxxxxxx <ying.huang@xxxxxxxxx> > Cc: ming.m.lin@xxxxxxxxx <ming.m.lin@xxxxxxxxx> > Cc: yinghai@xxxxxxxxxx <yinghai@xxxxxxxxxx> > Cc: andi@xxxxxxxxxxxxxx <andi@xxxxxxxxxxxxxx> > Cc: eranian@xxxxxxxxxx <eranian@xxxxxxxxxx> > Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> > LKML-Reference: <20100915162034.GO13563@xxxxxxxxxxxx> > Signed-off-by: Ingo Molnar <mingo@xxxxxxx> > --- > Âarch/x86/kernel/cpu/perf_event.c |  12 +++++++++++- > Â1 files changed, 11 insertions(+), 1 deletions(-) > > diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c > index 3efdf28..03a5b03 100644 > --- a/arch/x86/kernel/cpu/perf_event.c > +++ b/arch/x86/kernel/cpu/perf_event.c > @@ -102,6 +102,7 @@ struct cpu_hw_events { >     */ >    Âstruct perf_event    *events[X86_PMC_IDX_MAX]; /* in counter order */ >    Âunsigned long      active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; > +    unsigned long      running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; >    Âint           enabled; > >    Âint           n_events; > @@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event) >    Âx86_perf_event_set_period(event); >    Âcpuc->events[idx] = event; >    Â__set_bit(idx, cpuc->active_mask); > +    __set_bit(idx, cpuc->running); >    Âx86_pmu.enable(event); >    Âperf_event_update_userpage(event); > > @@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) >    Âcpuc = &__get_cpu_var(cpu_hw_events); > >    Âfor (idx = 0; idx < x86_pmu.num_counters; idx++) { > -        if (!test_bit(idx, cpuc->active_mask)) > +        if (!test_bit(idx, cpuc->active_mask)) { > +            /* > +            Â* Though we deactivated the counter some cpus > +            Â* might still deliver spurious interrupts still > +            Â* in flight. Catch them: > +            Â*/ > +            if (__test_and_clear_bit(idx, cpuc->running)) > +                handled++; >            Âcontinue; > +        } > >        Âevent = cpuc->events[idx]; >        Âhwc = &event->hw; -- To unsubscribe from this list: send the line "unsubscribe linux-tip-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html
![]() |