Eiointc interrupt controller support 256 interrupt vectors at most, and irq handler gets interrupt status from base register group EIOINTC_REG_ISR plus specific offset. It needs to read register group EIOINTC_REG_ISR four times to get all 256 interrupt vectors status. Eiointc registers including EIOINTC_REG_ISR is software emulated for VMs, there will be VM-exits when accessing eiointc registers. Here one method is introduced so that eiointc interrupt controller can route to different cpu interrupt pins for every 64 interrupt vectors. So irq handler knows interrupt pin information and reads specific EIOINTC_REG_ISR register. And there is only once EIOINTC_REG_ISR register access rather than four loop times, it reduces VM-exit times. Signed-off-by: Bibo Mao <maobibo@xxxxxxxxxxx> --- drivers/irqchip/irq-loongson-eiointc.c | 81 +++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 72979f9f0e05..e504a5d3f0b1 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -46,6 +46,7 @@ #define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f)) #define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2)) #define EIOINTC_USE_CPU_ENCODE BIT(0) +#define EIOINTC_ROUTE_MULT_IP BIT(1) #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) @@ -59,6 +60,13 @@ #define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector)) static int nr_pics; +struct eiointc_priv; +typedef struct eiointc_ip_route { + struct eiointc_priv *priv; + /* Offset Routed destination IP */ + int start; + int end; +} eiointc_ip_route; struct eiointc_priv { u32 node; @@ -69,6 +77,7 @@ struct eiointc_priv { struct irq_domain *eiointc_domain; int flags; irq_hw_number_t parent_hwirq; + eiointc_ip_route route_info[VEC_REG_COUNT]; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -189,6 +198,7 @@ static int eiointc_router_init(unsigned int cpu) { int i, bit, cores, index, node; unsigned int data; + int hwirq, mask; node = cpu_to_eio_node(cpu); index = eiointc_index(node); @@ -198,6 +208,13 @@ static int eiointc_router_init(unsigned int cpu) return -EINVAL; } + /* Enable cpu interrupt pin from eiointc */ + hwirq = eiointc_priv[index]->parent_hwirq; + mask = BIT(hwirq); + if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) + mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3); + set_csr_ecfg(mask); + if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE)) cores = CORES_PER_EIO_NODE; else @@ -215,10 +232,28 @@ static int eiointc_router_init(unsigned int cpu) /* * Route to interrupt pin, relative offset used here * Offset 0 means routing to IP0 and so on - * Every 32 vector routing to one interrupt pin + * + * If flags is set with EIOINTC_ROUTE_MULT_IP, + * every 64 vector routes to different consecutive + * IPs, otherwise all vector routes to the same IP */ - bit = BIT(eiointc_priv[index]->parent_hwirq - INT_HWI0); - data = bit | (bit << 8) | (bit << 16) | (bit << 24); + if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) { + /* The first 64 vectors route to hwirq */ + bit = BIT(hwirq++ - INT_HWI0); + data = bit | (bit << 8); + + /* The second 64 vectors route to hwirq + 1 */ + bit = BIT(hwirq++ - INT_HWI0); + data |= (bit << 16) | (bit << 24); + + /* + * Route to hwirq + 2/hwirq + 3 separately + * in next loop + */ + } else { + bit = BIT(hwirq - INT_HWI0); + data = bit | (bit << 8) | (bit << 16) | (bit << 24); + } iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); } @@ -251,11 +286,18 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) u64 pending; bool handled = false; struct irq_chip *chip = irq_desc_get_chip(desc); - struct eiointc_priv *priv = irq_desc_get_handler_data(desc); + eiointc_ip_route *info = irq_desc_get_handler_data(desc); chained_irq_enter(chip, desc); - for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { + /* + * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in + * eiointc interrupt controller routes to different cpu interrupt pins + * + * Every cpu interrupt pin has its own irq handler, it is ok to + * read ISR for these 64 interrupt vectors rather than all vectors + */ + for (i = info->start; i < info->end; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); /* Skip handling if pending bitmap is zero */ @@ -268,7 +310,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) int bit = __ffs(pending); int irq = bit + VEC_COUNT_PER_REG * i; - generic_handle_domain_irq(priv->eiointc_domain, irq); + generic_handle_domain_irq(info->priv->eiointc_domain, irq); pending &= ~BIT(bit); handled = true; } @@ -468,8 +510,33 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, } eiointc_priv[nr_pics++] = priv; + /* + * Only the first eiointc device on VM supports routing to + * different Interrupt Pins. The later eiointc devices use + * generic method if there are multiple eiointc devices in future + */ + if (cpu_has_hypervisor && (nr_pics == 1)) { + priv->flags |= EIOINTC_ROUTE_MULT_IP; + priv->parent_hwirq = INT_HWI0; + } + + if (priv->flags & EIOINTC_ROUTE_MULT_IP) { + for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) { + priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i; + priv->route_info[i].end = priv->route_info[i].start + 1; + priv->route_info[i].priv = priv; + parent_irq = get_percpu_irq(priv->parent_hwirq + i); + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, + &priv->route_info[i]); + } + } else { + priv->route_info[0].start = 0; + priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG; + priv->route_info[0].priv = priv; + irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, + &priv->route_info[0]); + } eiointc_router_init(0); - irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); if (nr_pics == 1) { register_syscore_ops(&eiointc_syscore_ops); -- 2.39.3