The Nitro Enclaves PCI device exposes a MMIO space that this driver uses to submit command requests and to receive command replies e.g. for enclave creation / termination or setting enclave resources. Add logic for handling PCI device command requests based on the given command type. Register an MSI-X interrupt vector for command reply notifications to handle this type of communication events. Signed-off-by: Alexandru-Catalin Vasile <lexnv@xxxxxxxxxx> Signed-off-by: Andra Paraschiv <andraprs@xxxxxxxxxx> Fix issue reported in: https://lore.kernel.org/lkml/202004231644.xTmN4Z1z%25lkp@xxxxxxxxx/ Reported-by: kbuild test robot <lkp@xxxxxxxxx> Signed-off-by: Andra Paraschiv <andraprs@xxxxxxxxxx> --- Changelog v2 -> v3 * Remove the WARN_ON calls. * Update static calls sanity checks. * Remove "ratelimited" from the logs that are not in the ioctl call paths. v1 -> v2 * Add log pattern for NE. * Remove the BUG_ON calls. * Update goto labels to match their purpose. * Add fix for kbuild report. --- drivers/virt/nitro_enclaves/ne_pci_dev.c | 224 +++++++++++++++++++++++ 1 file changed, 224 insertions(+) diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c index 0b66166787b6..5e8bfda4bd0f 100644 --- a/drivers/virt/nitro_enclaves/ne_pci_dev.c +++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c @@ -29,6 +29,209 @@ static const struct pci_device_id ne_pci_ids[] = { MODULE_DEVICE_TABLE(pci, ne_pci_ids); +/** + * ne_submit_request - Submit command request to the PCI device based on the + * command type. + * + * This function gets called with the ne_pci_dev mutex held. + * + * @pdev: PCI device to send the command to. + * @cmd_type: command type of the request sent to the PCI device. + * @cmd_request: command request payload. + * @cmd_request_size: size of the command request payload. + * + * @returns: 0 on success, negative return value on failure. + */ +static int ne_submit_request(struct pci_dev *pdev, + enum ne_pci_dev_cmd_type cmd_type, + void *cmd_request, size_t cmd_request_size) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request, + cmd_request_size); + + iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND); + + return 0; +} + +/** + * ne_retrieve_reply - Retrieve reply from the PCI device. + * + * This function gets called with the ne_pci_dev mutex held. + * + * @pdev: PCI device to receive the reply from. + * @cmd_reply: command reply payload. + * @cmd_reply_size: size of the command reply payload. + * + * @returns: 0 on success, negative return value on failure. + */ +static int ne_retrieve_reply(struct pci_dev *pdev, + struct ne_pci_dev_cmd_reply *cmd_reply, + size_t cmd_reply_size) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA, + cmd_reply_size); + + return 0; +} + +/** + * ne_wait_for_reply - Wait for a reply of a PCI command. + * + * This function gets called with the ne_pci_dev mutex held. + * + * @pdev: PCI device for which a reply is waited. + * + * @returns: 0 on success, negative return value on failure. + */ +static int ne_wait_for_reply(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + int rc = -EINVAL; + + /* + * TODO: Update to _interruptible and handle interrupted wait event + * e.g. -ERESTARTSYS, incoming signals + add / update timeout. + */ + rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q, + atomic_read(&ne_pci_dev->cmd_reply_avail) != 0, + msecs_to_jiffies(DEFAULT_TIMEOUT_MSECS)); + if (!rc) + return -ETIMEDOUT; + + return 0; +} + +int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type, + void *cmd_request, size_t cmd_request_size, + struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size) +{ + struct ne_pci_dev *ne_pci_dev = NULL; + int rc = -EINVAL; + + if (!pdev) + return -EINVAL; + + ne_pci_dev = pci_get_drvdata(pdev); + if (!ne_pci_dev || !ne_pci_dev->iomem_base) + return -EINVAL; + + if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) { + dev_err_ratelimited(&pdev->dev, NE "Invalid cmd type=%u\n", + cmd_type); + + return -EINVAL; + } + + if (!cmd_request) { + dev_err_ratelimited(&pdev->dev, NE "Null cmd request\n"); + + return -EINVAL; + } + + if (cmd_request_size > NE_SEND_DATA_SIZE) { + dev_err_ratelimited(&pdev->dev, + NE "Invalid req size=%zu for cmd type=%u\n", + cmd_request_size, cmd_type); + + return -EINVAL; + } + + if (!cmd_reply) { + dev_err_ratelimited(&pdev->dev, NE "Null cmd reply\n"); + + return -EINVAL; + } + + if (cmd_reply_size > NE_RECV_DATA_SIZE) { + dev_err_ratelimited(&pdev->dev, NE "Invalid reply size=%zu\n", + cmd_reply_size); + + return -EINVAL; + } + + /* + * Use this mutex so that the PCI device handles one command request at + * a time. + */ + mutex_lock(&ne_pci_dev->pci_dev_mutex); + + atomic_set(&ne_pci_dev->cmd_reply_avail, 0); + + rc = ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size); + if (rc < 0) { + dev_err_ratelimited(&pdev->dev, + NE "Error in submit request [rc=%d]\n", + rc); + + goto unlock_mutex; + } + + rc = ne_wait_for_reply(pdev); + if (rc < 0) { + dev_err_ratelimited(&pdev->dev, + NE "Error in wait for reply [rc=%d]\n", + rc); + + goto unlock_mutex; + } + + rc = ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size); + if (rc < 0) { + dev_err_ratelimited(&pdev->dev, + NE "Error in retrieve reply [rc=%d]\n", + rc); + + goto unlock_mutex; + } + + atomic_set(&ne_pci_dev->cmd_reply_avail, 0); + + if (cmd_reply->rc < 0) { + dev_err_ratelimited(&pdev->dev, + NE "Error in cmd process logic [rc=%d]\n", + cmd_reply->rc); + + rc = cmd_reply->rc; + + goto unlock_mutex; + } + + mutex_unlock(&ne_pci_dev->pci_dev_mutex); + + return 0; + +unlock_mutex: + mutex_unlock(&ne_pci_dev->pci_dev_mutex); + + return rc; +} + +/** + * ne_reply_handler - Interrupt handler for retrieving a reply matching + * a request sent to the PCI device for enclave lifetime management. + * + * @irq: received interrupt for a reply sent by the PCI device. + * @args: PCI device private data structure. + * + * @returns: IRQ_HANDLED on handled interrupt, IRQ_NONE otherwise. + */ +static irqreturn_t ne_reply_handler(int irq, void *args) +{ + struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args; + + atomic_set(&ne_pci_dev->cmd_reply_avail, 1); + + /* TODO: Update to _interruptible. */ + wake_up(&ne_pci_dev->cmd_reply_wait_q); + + return IRQ_HANDLED; +} + /** * ne_setup_msix - Setup MSI-X vectors for the PCI device. * @@ -60,7 +263,26 @@ static int ne_setup_msix(struct pci_dev *pdev) return rc; } + /* + * This IRQ gets triggered every time the PCI device responds to a + * command request. The reply is then retrieved, reading from the MMIO + * space of the PCI device. + */ + rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY), + ne_reply_handler, 0, "enclave_cmd", ne_pci_dev); + if (rc < 0) { + dev_err(&pdev->dev, NE "Error in request irq reply [rc=%d]\n", + rc); + + goto free_irq_vectors; + } + return 0; + +free_irq_vectors: + pci_free_irq_vectors(pdev); + + return rc; } /** @@ -72,6 +294,8 @@ static void ne_teardown_msix(struct pci_dev *pdev) { struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev); + pci_free_irq_vectors(pdev); } -- 2.20.1 (Apple Git-117) Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania. Registration number J22/2621/2005.