This is a note to let you know that I've just added the patch titled crypto: inside-secure - irq balance to the 5.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: crypto-inside-secure-irq-balance.patch and it can be found in the queue-5.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 4e518de9135e00d5276c46fcca91da8ac5c50b46 Author: Sven Auhagen <Sven.Auhagen@xxxxxxxxxxxx> Date: Tue Jul 21 06:37:59 2020 +0200 crypto: inside-secure - irq balance [ Upstream commit c6720415907f21b9c53efbe679b96c3cc9d06404 ] Balance the irqs of the inside secure driver over all available cpus. Currently all interrupts are handled by the first CPU. From my testing with IPSec AES-GCM 256 on my MCbin with 4 Cores I get a 50% speed increase: Before the patch: 99.73 Kpps With the patch: 151.25 Kpps Signed-off-by: Sven Auhagen <sven.auhagen@xxxxxxxxxxxx> Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Stable-dep-of: ca25c00ccbc5 ("crypto: safexcel - Cleanup ring IRQ workqueues on load failure") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9534f52210af0..2d34a3832d8e6 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1090,11 +1090,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) static int safexcel_request_ring_irq(void *pdev, int irqid, int is_pci_dev, + int ring_id, irq_handler_t handler, irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { - int ret, irq; + int ret, irq, cpu; struct device *dev; if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { @@ -1132,6 +1133,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid, return ret; } + /* Set affinity */ + cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE); + irq_set_affinity_hint(irq, get_cpu_mask(cpu)); + return irq; } @@ -1482,6 +1487,7 @@ static int safexcel_probe_generic(void *pdev, irq = safexcel_request_ring_irq(pdev, EIP197_IRQ_NUMBER(i, is_pci_dev), is_pci_dev, + i, safexcel_irq_ring, safexcel_irq_ring_thread, ring_irq); @@ -1490,6 +1496,7 @@ static int safexcel_probe_generic(void *pdev, return irq; } + priv->ring[i].irq = irq; priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; INIT_WORK(&priv->ring[i].work_data.work, @@ -1627,8 +1634,10 @@ static int safexcel_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); - for (i = 0; i < priv->config.rings; i++) + for (i = 0; i < priv->config.rings; i++) { + irq_set_affinity_hint(priv->ring[i].irq, NULL); destroy_workqueue(priv->ring[i].workqueue); + } return 0; } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 930cc48a6f859..6a4d7f09bca96 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -640,6 +640,9 @@ struct safexcel_ring { */ struct crypto_async_request *req; struct crypto_async_request *backlog; + + /* irq of this ring */ + int irq; }; /* EIP integration context flags */