On Thu, 2011-01-06 at 15:31 -0800, Kevin D. Kissell wrote: > On 01/06/11 12:23, Anoop P A wrote: > > I'm sure I've said this before, and it's in various comments in the SMTC > code, but remember, one of the main problems that the SMTC kernel > had to solve was to prevent all TCs of a VPE from "convoying" after every > interrupt. The way this is done is that the interrupt vector code, before > clearing EXL, masks off the Status.IM bit associated with the incoming > interrupt. Of course, to get another interrupt from the same source > (or collection of sources), that IM bit needs to be restored. The "correct" > mechanism for this is by having the appropriate irq_hwmask[] value set, > so that smtc_im_ack_irq(), which should be invoked on an irq "ack()" > (meaning that the source has been quenched and any new occurrence > should be considered a new interrupt), will restore the bit in Status. > This function got moved around a bit in the various SMTC prototypes, > but it proved least intrusive to put it into the xxx_mask_and_ack() > functions > for the interrupt controllers - see irq-msc01.c and i8259.c. If you haven't > done the same in any equivalent code for a different on-chip controller, > you'll definitely have problems. > > The Backstop scheme works OK for peripheral interrupts that didn't > have an appropriate irq_hwmask[] value set up, but clock interrupts > don't follow the same code paths and can't depend on the backstop. Ok. Well thanks much for your detailed explanation. Well I hope I found the root cause . smtc_clockevent_init() was overriding irq_hwmask even if are using platform specific get_c0_compare_int. With following patch everything seems to be working for me. ------------------------------------------------------------------------ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 2e72d30..a25fc59 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c @@ -310,9 +310,14 @@ int __cpuinit smtc_clockevent_init(void) return 0; /* * And we need the hwmask associated with the c0_compare - * vector to be initialized. + * vector to be initialized. However incase of platform + * specific get_co_compare_int, don't override irq_hwmask + * expect platform code to set a valid mask value. */ - irq_hwmask[irq] = (0x100 << cp0_compare_irq); + + if (!get_c0_compare_int) + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) return 0; ----------------------------------------------------------------------- Attaching my msp_ir_cic.c . Kindly have a look if possible. Thanks Anoop > > Regards, > > Kevin K.
/* * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c * * This file define the irq handler for MSP CIC subsystem interrupts. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/irq.h> #include <asm/mipsregs.h> #include <asm/system.h> #include <msp_cic_int.h> #include <msp_regs.h> /* * External API */ extern void msp_per_irq_init(void); extern void msp_per_irq_dispatch(void); /* * Convenience Macro. Should be somewhere generic. */ #define get_current_vpe() \ ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) #ifdef CONFIG_SMP #define LOCK_VPE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dmt(); \ } while (0) #define UNLOCK_VPE(flags, mtflags) \ do { \ emt(mtflags); \ local_irq_restore(flags);\ } while (0) #define LOCK_CORE(flags, mtflags) \ do { \ local_irq_save(flags); \ mtflags = dvpe(); \ } while (0) #define UNLOCK_CORE(flags, mtflags) \ do { \ evpe(mtflags); \ local_irq_restore(flags);\ } while (0) #else #define LOCK_VPE(flags, mtflags) #define UNLOCK_VPE(flags, mtflags) #endif /* ensure writes to cic are completed */ static inline void cic_wmb(void) { const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG; volatile u32 dummy_read; wmb(); dummy_read = __raw_readl(cic_mem); dummy_read++; } static inline void unmask_cic_irq(unsigned int irq) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe; #ifdef CONFIG_SMP unsigned int mtflags; unsigned long flags; /* * Make sure we have IRQ affinity. It may have changed while * we were processing the IRQ. */ if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) return; #endif vpe = get_current_vpe(); LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] |= (1 << (irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static inline void mask_cic_irq(unsigned int irq) { volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; int vpe = get_current_vpe(); #ifdef CONFIG_SMP unsigned long flags, mtflags; #endif LOCK_VPE(flags, mtflags); cic_msk_reg[vpe] &= ~(1 << (irq - MSP_CIC_INTBASE)); UNLOCK_VPE(flags, mtflags); cic_wmb(); } static inline void msp_cic_irq_ack(unsigned int irq) { mask_cic_irq(irq); /* * Only really necessary for 18, 16-14 and sometimes 3:0 * (since these can be edge sensitive) but it doesn't * hurt for the others */ *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE)); smtc_im_ack_irq(irq); } static void msp_cic_irq_end(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) unmask_cic_irq(irq); } #ifdef CONFIG_SMP static inline int msp_cic_irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { int cpu; unsigned long flags; unsigned int mtflags; unsigned long imask = (1 << (irq - MSP_CIC_INTBASE)); volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; /* timer balancing should be disabled in kernel code */ BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER); LOCK_CORE(flags, mtflags); /* enable if any of each VPE's TCs require this IRQ */ for_each_online_cpu(cpu) { if (cpumask_test_cpu(cpu, cpumask)) cic_mask[cpu] |= imask; else cic_mask[cpu] &= ~imask; } UNLOCK_CORE(flags, mtflags); return 0; } #endif static struct irq_chip msp_cic_irq_controller = { .name = "MSP_CIC", .mask = msp_cic_irq_ack, .mask_ack = msp_cic_irq_ack, .unmask = unmask_cic_irq, .ack = msp_cic_irq_ack, .end = msp_cic_irq_end, #ifdef CONFIG_SMP .set_affinity = msp_cic_irq_set_affinity, #endif }; void __init msp_cic_irq_init(void) { int i; /* Mask/clear interrupts. */ *CIC_VPE0_MSK_REG = 0x00000000; *CIC_VPE1_MSK_REG = 0x00000000; *CIC_STS_REG = 0xFFFFFFFF; /* * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. * These inputs map to EXT_INT_POL[6:4] inside the CIC. * They are to be active low, level sensitive. */ *CIC_EXT_CFG_REG &= 0xFFFF8F8F; /* initialize all the IRQ descriptors */ for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { set_irq_chip_and_handler(i, &msp_cic_irq_controller, handle_level_irq); #ifdef CONFIG_MIPS_MT_SMTC /* Mask of CIC interrupt */ irq_hwmask[i] = C_IRQ4; #endif } /* Initialize the PER interrupt sub-system */ msp_per_irq_init(); } /* CIC masked by CIC vector processing before dispatch called */ void msp_cic_irq_dispatch(void) { volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG; u32 cic_mask; u32 pending; int cic_status = *CIC_STS_REG; cic_mask = cic_msk_reg[get_current_vpe()]; pending = cic_status & cic_mask; if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE0_TIMER); } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) { do_IRQ(MSP_INT_VPE1_TIMER); } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) { msp_per_irq_dispatch(); } else if (pending) { do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1); } else{ spurious_interrupt(); /* Re-enable the CIC cascaded interrupt. */ irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC); } }