4.9-stable review patch. If anyone has any objections, please let me know. ------------------ From: Gary R Hook <gary.hook@xxxxxxx> commit 7b537b24e76a1e8e6d7ea91483a45d5b1426809b upstream. The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Signed-off-by: Gary R Hook <gary.hook@xxxxxxx> Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/crypto/ccp/ccp-dev-v3.c | 120 +++++++++++++++++++++++----------------- drivers/crypto/ccp/ccp-dev.h | 3 + drivers/crypto/ccp/ccp-pci.c | 2 3 files changed, 75 insertions(+), 50 deletions(-) --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } +static void ccp_disable_queue_interrupts(struct ccp_device *ccp) +{ + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); +} + +static void ccp_enable_queue_interrupts(struct ccp_device *ccp) +{ + iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG); +} + +static void ccp_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + struct ccp_cmd_queue *cmd_q; + u32 q_int, status; + unsigned int i; + + status = ioread32(ccp->io_regs + IRQ_STATUS_REG); + + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + q_int = status & (cmd_q->int_ok | cmd_q->int_err); + if (q_int) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + + ccp_disable_queue_interrupts(ccp); + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp_irq_bh((unsigned long)ccp); + + return IRQ_HANDLED; +} + static int ccp_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; - unsigned int qmr, qim, i; + unsigned int qmr, i; int ret; /* Find available queues */ - qim = 0; + ccp->qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); for (i = 0; i < MAX_HW_QUEUES; i++) { if (!(qmr & (1 << i))) @@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *c init_waitqueue_head(&cmd_q->int_queue); /* Build queue interrupt mask (two interrupts per queue) */ - qim |= cmd_q->int_ok | cmd_q->int_err; + ccp->qim |= cmd_q->int_ok | cmd_q->int_err; #ifdef CONFIG_ARM64 /* For arm64 set the recommended queue cache settings */ @@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *c dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); /* Disable and clear interrupts until ready */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ ret = ccp->get_irq(ccp); @@ -408,6 +464,11 @@ static int ccp_init(struct ccp_device *c init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); + /* Initialize the ISR tasklet? */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp_irq_bh, + (unsigned long)ccp); + dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { @@ -430,7 +491,7 @@ static int ccp_init(struct ccp_device *c dev_dbg(dev, "Enabling interrupts...\n"); /* Enable interrupts */ - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); + ccp_enable_queue_interrupts(ccp); dev_dbg(dev, "Registering device...\n"); ccp_add_device(ccp); @@ -467,7 +528,7 @@ static void ccp_destroy(struct ccp_devic { struct ccp_cmd_queue *cmd_q; struct ccp_cmd *cmd; - unsigned int qim, i; + unsigned int i; /* Unregister the DMA engine */ ccp_dmaengine_unregister(ccp); @@ -478,22 +539,15 @@ static void ccp_destroy(struct ccp_devic /* Remove this device from the list of available units */ ccp_del_device(ccp); - /* Build queue interrupt mask (two interrupt masks per queue) */ - qim = 0; - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - qim |= cmd_q->int_ok | cmd_q->int_err; - } - /* Disable and clear interrupts */ - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); + ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Stop the queue kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) @@ -520,40 +574,6 @@ static void ccp_destroy(struct ccp_devic } } -static irqreturn_t ccp_irq_handler(int irq, void *data) -{ - struct device *dev = data; - struct ccp_device *ccp = dev_get_drvdata(dev); - struct ccp_cmd_queue *cmd_q; - u32 q_int, status; - unsigned int i; - - status = ioread32(ccp->io_regs + IRQ_STATUS_REG); - - for (i = 0; i < ccp->cmd_q_count; i++) { - cmd_q = &ccp->cmd_q[i]; - - q_int = status & (cmd_q->int_ok | cmd_q->int_err); - if (q_int) { - cmd_q->int_status = status; - cmd_q->q_status = ioread32(cmd_q->reg_status); - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); - - /* On error, only save the first error value */ - if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - - cmd_q->int_rcvd = 1; - - /* Acknowledge the interrupt and wake the kthread */ - iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); - wake_up_interruptible(&cmd_q->int_queue); - } - } - - return IRQ_HANDLED; -} - static const struct ccp_actions ccp3_actions = { .aes = ccp_perform_aes, .xts_aes = ccp_perform_xts_aes, --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -332,7 +332,10 @@ struct ccp_device { void *dev_specific; int (*get_irq)(struct ccp_device *ccp); void (*free_irq)(struct ccp_device *ccp); + unsigned int qim; unsigned int irq; + bool use_tasklet; + struct tasklet_struct irq_tasklet; /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_ goto e_irq; } } + ccp->use_tasklet = true; return 0; @@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_de dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); goto e_msi; } + ccp->use_tasklet = true; return 0;