On Mon, Oct 23 2023 at 22:57, Anup Patel wrote: > +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS > +void imsic_vector_debug_show(struct seq_file *m, > + struct imsic_vector *vec, int ind) > +{ > + unsigned int mcpu = 0, mlocal_id = 0; > + struct imsic_local_priv *lpriv; > + bool move_in_progress = false; > + struct imsic_vector *mvec; > + bool is_enabled = false; > + unsigned long flags; > + > + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); > + if (WARN_ON(&lpriv->vectors[vec->local_id] != vec)) > + return; > + > + raw_spin_lock_irqsave(&lpriv->ids_lock, flags); > + if (test_bit(vec->local_id, lpriv->ids_enabled_bitmap)) > + is_enabled = true; > + mvec = lpriv->ids_move[vec->local_id]; > + if (mvec) { > + move_in_progress = true; > + mcpu = mvec->cpu; > + mlocal_id = mvec->local_id; > + } > + raw_spin_unlock_irqrestore(&lpriv->ids_lock, flags); > + > + seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu); > + seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id); > + seq_printf(m, "%*sis_reserved : %5u\n", ind, "", > + (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0); > + seq_printf(m, "%*sis_enabled : %5u\n", ind, "", > + (move_in_progress) ? 1 : 0); > + seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", > + (move_in_progress) ? 1 : 0); > + if (move_in_progress) { > + seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mcpu); > + seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mlocal_id); > + } > +} > + > +void imsic_vector_debug_show_summary(struct seq_file *m, int ind) > +{ > + unsigned int cpu, total_avail = 0, total_used = 0; > + struct imsic_global_config *global = &imsic->global; > + struct imsic_local_priv *lpriv; > + unsigned long flags; > + > + for_each_possible_cpu(cpu) { > + lpriv = per_cpu_ptr(imsic->lpriv, cpu); > + > + total_avail += global->nr_ids; > + > + raw_spin_lock_irqsave(&lpriv->ids_lock, flags); > + total_used += bitmap_weight(lpriv->ids_used_bitmap, > + global->nr_ids + 1) - 1; > + raw_spin_unlock_irqrestore(&lpriv->ids_lock, flags); > + } > + > + seq_printf(m, "%*stotal : %5u\n", ind, "", total_avail); > + seq_printf(m, "%*sused : %5u\n", ind, "", total_used); > + seq_printf(m, "%*s| CPU | tot | usd | vectors\n", ind, " "); > + > + cpus_read_lock(); > + for_each_online_cpu(cpu) { > + lpriv = per_cpu_ptr(imsic->lpriv, cpu); > + > + raw_spin_lock_irqsave(&lpriv->ids_lock, flags); > + total_used = bitmap_weight(lpriv->ids_used_bitmap, > + global->nr_ids + 1) - 1; > + seq_printf(m, "%*s %4d %4u %4u %*pbl\n", ind, " ", > + cpu, global->nr_ids, total_used, > + global->nr_ids + 1, lpriv->ids_used_bitmap); > + raw_spin_unlock_irqrestore(&lpriv->ids_lock, flags); > + } > + cpus_read_unlock(); This looks very close to the matrix alocator information, just done differently. > +static unsigned int imsic_vector_best_cpu(const struct cpumask *mask, > + unsigned int order) > +{ > + struct imsic_global_config *global = &imsic->global; > + unsigned int cpu, best_cpu, free, maxfree = 0; > + struct imsic_local_priv *lpriv; > + unsigned long flags; > + > + best_cpu = UINT_MAX; > + for_each_cpu(cpu, mask) { > + if (!cpu_online(cpu)) > + continue; > + > + lpriv = per_cpu_ptr(imsic->lpriv, cpu); > + raw_spin_lock_irqsave(&lpriv->ids_lock, flags); > + free = bitmap_weight(lpriv->ids_used_bitmap, > + global->nr_ids + 1); > + free = (global->nr_ids + 1) - free; > + raw_spin_unlock_irqrestore(&lpriv->ids_lock, flags); > + if (free < BIT(order) || free <= maxfree) > + continue; > + > + best_cpu = cpu; > + maxfree = free; > + } > + > + return best_cpu; Looks very much like what the matrix allocator provides, right? What's the actual reason that you can't use it? Thanks, tglx