[PATCH v1 3/9] crypto/ycc: Add irq support for ycc kernel rings

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Zelin Deng <zelin.deng@xxxxxxxxxxxxxxxxx>

Each kernel ring has its own command done irq. Temporarily user rings
will not enable irq.

Signed-off-by: Zelin Deng <zelin.deng@xxxxxxxxxxxxxxxxx>
---
 drivers/crypto/ycc/ycc_isr.c | 92 ++++++++++++++++++++++++++++++++++++++------
 1 file changed, 80 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/ycc/ycc_isr.c b/drivers/crypto/ycc/ycc_isr.c
index cd2a2d7..a86c8d7 100644
--- a/drivers/crypto/ycc/ycc_isr.c
+++ b/drivers/crypto/ycc/ycc_isr.c
@@ -12,6 +12,17 @@
 #include <linux/interrupt.h>
 
 #include "ycc_isr.h"
+#include "ycc_dev.h"
+#include "ycc_ring.h"
+
+
+static irqreturn_t ycc_resp_isr(int irq, void *data)
+{
+	struct ycc_ring *ring = (struct ycc_ring *)data;
+
+	tasklet_hi_schedule(&ring->resp_handler);
+	return IRQ_HANDLED;
+}
 
 /*
  * TODO: will implement when ycc ring actually work.
@@ -38,11 +49,13 @@ static irqreturn_t ycc_g_err_isr(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-/*
- * TODO: will implement when ycc ring actually work.
- */
 void ycc_resp_process(uintptr_t ring_addr)
 {
+	struct ycc_ring *ring = (struct ycc_ring *)ring_addr;
+
+	ycc_dequeue(ring);
+	if (ring->ydev->is_polling)
+		tasklet_hi_schedule(&ring->resp_handler);
 }
 
 int ycc_enable_msix(struct ycc_dev *ydev)
@@ -83,34 +96,89 @@ static void ycc_cleanup_global_err_workqueue(struct ycc_dev *ydev)
 		destroy_workqueue(ydev->dev_err_q);
 }
 
-/*
- * TODO: Just request irq for global err. Irq for each ring
- * will be requested when ring actually work.
- */
 int ycc_alloc_irqs(struct ycc_dev *ydev)
 {
 	struct pci_dev *rcec_pdev = ydev->assoc_dev->pdev;
 	int num = ydev->is_vf ? 1 : YCC_RINGPAIR_NUM;
-	int ret;
+	int cpu, cpus = num_online_cpus();
+	int ret, i, j;
 
+	/* The 0 - (YCC_RINGPAIR_NUM-1) are rings irqs, the last one is dev error irq */
 	sprintf(ydev->err_irq_name, "ycc_dev_%d_global_err", ydev->id);
 	ret = request_irq(pci_irq_vector(rcec_pdev, num),
 			  ycc_g_err_isr, 0, ydev->err_irq_name, ydev);
-	if (ret)
+	if (ret) {
 		pr_err("Failed to alloc global irq interrupt for dev: %d\n", ydev->id);
+		goto out;
+	}
+
+	if (ydev->is_polling)
+		goto out;
+
+	for (i = 0; i < num; i++) {
+		if (ydev->rings[i].type != KERN_RING)
+			continue;
+
+		ydev->msi_name[i] = kzalloc(16, GFP_KERNEL);
+		if (!ydev->msi_name[i])
+			goto free_irq;
+		snprintf(ydev->msi_name[i], 16, "ycc_ring_%d", i);
+		ret = request_irq(pci_irq_vector(rcec_pdev, i), ycc_resp_isr,
+				  0, ydev->msi_name[i], &ydev->rings[i]);
+		if (ret) {
+			kfree(ydev->msi_name[i]);
+			goto free_irq;
+		}
+		if (!ydev->is_vf)
+			cpu = (i % YCC_RINGPAIR_NUM) % cpus;
+		else
+			cpu = smp_processor_id() % cpus;
+
+		ret = irq_set_affinity_hint(pci_irq_vector(rcec_pdev, i),
+					    get_cpu_mask(cpu));
+		if (ret) {
+			free_irq(pci_irq_vector(rcec_pdev, i), &ydev->rings[i]);
+			kfree(ydev->msi_name[i]);
+			goto free_irq;
+		}
+	}
+
+	return 0;
+
+free_irq:
+	for (j = 0; j < i; j++) {
+		if (ydev->rings[i].type != KERN_RING)
+			continue;
+
+		free_irq(pci_irq_vector(rcec_pdev, j), &ydev->rings[j]);
+		kfree(ydev->msi_name[j]);
+	}
+	free_irq(pci_irq_vector(rcec_pdev, num), ydev);
+out:
 
 	return ret;
 }
 
-/*
- * TODO: Same as the allocate action.
- */
 void ycc_free_irqs(struct ycc_dev *ydev)
 {
 	struct pci_dev *rcec_pdev = ydev->assoc_dev->pdev;
 	int num = ydev->is_vf ? 1 : YCC_RINGPAIR_NUM;
+	int i;
 
+	/* Free device err irq */
 	free_irq(pci_irq_vector(rcec_pdev, num), ydev);
+
+	if (ydev->is_polling)
+		return;
+
+	for (i = 0; i < num; i++) {
+		if (ydev->rings[i].type != KERN_RING)
+			continue;
+
+		irq_set_affinity_hint(pci_irq_vector(rcec_pdev, i), NULL);
+		free_irq(pci_irq_vector(rcec_pdev, i), &ydev->rings[i]);
+		kfree(ydev->msi_name[i]);
+	}
 }
 
 int ycc_init_global_err(struct ycc_dev *ydev)
-- 
1.8.3.1




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux