On Tue, Feb 20 2024 at 11:37, Anup Patel wrote: > The RISC-V advanced interrupt architecture (AIA) specification > defines a new MSI controller called incoming message signalled > interrupt controller (IMSIC) which manages MSI on per-HART (or > per-CPU) basis. It also supports IPIs as software injected MSIs. > (For more details refer https://github.com/riscv/riscv-aia) > > Let us add an early irqchip driver for RISC-V IMSIC which sets > up the IMSIC state and provide IPIs. s/Let us add/Add/ > +#else > +static void imsic_ipi_starting_cpu(void) > +{ > +} > + > +static void imsic_ipi_dying_cpu(void) > +{ > +} > + > +static int __init imsic_ipi_domain_init(void) > +{ > + return 0; > +} Please condense this into static void imsic_ipi_starting_cpu(void) { } static void imsic_ipi_dying_cpu(void) { } static int __init imsic_ipi_domain_init(void) { return 0; } No point in wasting space for these stubs. > + * To handle an interrupt, we read the TOPEI CSR and write zero in one > + * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to > + * Linux interrupt number and let Linux IRQ subsystem handle it. > + */ > +static void imsic_handle_irq(struct irq_desc *desc) > +{ > + struct irq_chip *chip = irq_desc_get_chip(desc); > + int err, cpu = smp_processor_id(); > + struct imsic_vector *vec; > + unsigned long local_id; > + > + chained_irq_enter(chip, desc); > + > + while ((local_id = csr_swap(CSR_TOPEI, 0))) { > + local_id = local_id >> TOPEI_ID_SHIFT; > + > + if (local_id == IMSIC_IPI_ID) { > +#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP)) > + ipi_mux_process(); > +#endif > + continue; > + } > + > +/* MUST be called with lpriv->lock held */ Instead of a comment which cannot be enforced just have lockdep_assert_held(&lpriv->lock); right at the top of the function. That documents the requirement and lets lockdep yell if not followed. > +#ifdef CONFIG_SMP > +static void imsic_local_timer_callback(struct timer_list *timer) > +{ > + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); > + unsigned long flags; > + > + raw_spin_lock_irqsave(&lpriv->lock, flags); > + __imsic_local_sync(lpriv); > + raw_spin_unlock_irqrestore(&lpriv->lock, flags); > +} > + > +/* MUST be called with lpriv->lock held */ Ditto > +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) > +void imsic_vector_mask(struct imsic_vector *vec) > +{ > + struct imsic_local_priv *lpriv; > + > + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); > + if (WARN_ON(&lpriv->vectors[vec->local_id] != vec)) > + return; WARN_ON_ONCE(), no? > +bool imsic_vector_isenabled(struct imsic_vector *vec) > +{ > + struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); > + unsigned long flags; > + bool ret; > + > + raw_spin_lock_irqsave(&lpriv->lock, flags); > + ret = vec->enable; > + raw_spin_unlock_irqrestore(&lpriv->lock, flags); I'm not sure what you are trying to protect here. vec->enable can obviously change right after the lock is dropped. So that's just a snapshot, which is not any better than using READ_ONCE(vec->enable); and a corresponding WRITE_ONCE() at the update site, which obviously needs serialization. > +static void __init imsic_local_cleanup(void) > +{ > + int cpu; > + struct imsic_local_priv *lpriv; struct imsic_local_priv *lpriv; int cpu; Please. > +void imsic_state_offline(void) > +{ > +#ifdef CONFIG_SMP > + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); > +#endif You can move that into the #ifdef below. > + unsigned long flags; > + > + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); > + irq_matrix_offline(imsic->matrix); > + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); > + > +#ifdef CONFIG_SMP > + raw_spin_lock_irqsave(&lpriv->lock, flags); > + WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0); > + raw_spin_unlock_irqrestore(&lpriv->lock, flags); > +#endif > +} Thanks, tglx