static void xen_snd_drv_fini(struct xen_snd_front_info *front_info)
{
+ xen_snd_front_evtchnl_free_all(front_info);
}
static int sndback_initwait(struct xen_snd_front_info *front_info)
@@ -32,7 +34,12 @@ static int sndback_initwait(struct xen_snd_front_info *front_info)
if (ret < 0)
return ret;
- return 0;
+ /* create event channels for all streams and publish */
+ ret = xen_snd_front_evtchnl_create_all(front_info, num_streams);
+ if (ret < 0)
+ return ret;
+
+ return xen_snd_front_evtchnl_publish_all(front_info);
}
static int sndback_connect(struct xen_snd_front_info *front_info)
@@ -122,6 +129,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
return -ENOMEM;
front_info->xb_dev = xb_dev;
+ spin_lock_init(&front_info->io_lock);
dev_set_drvdata(&xb_dev->dev, front_info);
return xenbus_switch_state(xb_dev, XenbusStateInitialising);
diff --git a/sound/xen/xen_snd_front.h b/sound/xen/xen_snd_front.h
index b52226cb30bc..9c2ffbb4e4b8 100644
--- a/sound/xen/xen_snd_front.h
+++ b/sound/xen/xen_snd_front.h
@@ -13,9 +13,16 @@
#include "xen_snd_front_cfg.h"
+struct xen_snd_front_evtchnl_pair;
+
struct xen_snd_front_info {
struct xenbus_device *xb_dev;
+ /* serializer for backend IO: request/response */
+ spinlock_t io_lock;
+ int num_evt_pairs;
+ struct xen_snd_front_evtchnl_pair *evt_pairs;
+
struct xen_front_cfg_card cfg;
};
diff --git a/sound/xen/xen_snd_front_evtchnl.c b/sound/xen/xen_snd_front_evtchnl.c
new file mode 100644
index 000000000000..9ece39f938f8
--- /dev/null
+++ b/sound/xen/xen_snd_front_evtchnl.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual sound device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
+ */
+
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include "xen_snd_front.h"
+#include "xen_snd_front_cfg.h"
+#include "xen_snd_front_evtchnl.h"
+
+static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
+{
+ struct xen_snd_front_evtchnl *channel = dev_id;
+ struct xen_snd_front_info *front_info = channel->front_info;
+ struct xensnd_resp *resp;
+ RING_IDX i, rp;
+ unsigned long flags;
+
+ if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+
+again:
+ rp = channel->u.req.ring.sring->rsp_prod;
+ /* ensure we see queued responses up to rp */
+ rmb();
+
+ for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
+ resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
+ if (resp->id != channel->evt_id)
+ continue;
+ switch (resp->operation) {
+ case XENSND_OP_OPEN:
+ /* fall through */
+ case XENSND_OP_CLOSE:
+ /* fall through */
+ case XENSND_OP_READ:
+ /* fall through */
+ case XENSND_OP_WRITE:
+ /* fall through */
+ case XENSND_OP_TRIGGER:
+ channel->u.req.resp_status = resp->status;
+ complete(&channel->u.req.completion);
+ break;
+ case XENSND_OP_HW_PARAM_QUERY:
+ channel->u.req.resp_status = resp->status;
+ channel->u.req.resp.hw_param =
+ resp->resp.hw_param;
+ complete(&channel->u.req.completion);
+ break;
+
+ default:
+ dev_err(&front_info->xb_dev->dev,
+ "Operation %d is not supported\n",
+ resp->operation);
+ break;
+ }
+ }
+
+ channel->u.req.ring.rsp_cons = i;
+ if (i != channel->u.req.ring.req_prod_pvt) {
+ int more_to_do;
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
+ more_to_do);
+ if (more_to_do)
+ goto again;
+ } else {
+ channel->u.req.ring.sring->rsp_event = i + 1;
+ }
+
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
+{
+ struct xen_snd_front_evtchnl *channel = dev_id;
+ struct xen_snd_front_info *front_info = channel->front_info;
+ struct xensnd_event_page *page = channel->u.evt.page;
+ u32 cons, prod;
+ unsigned long flags;
+
+ if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+
+ prod = page->in_prod;
+ /* ensure we see ring contents up to prod */
+ virt_rmb();
+ if (prod == page->in_cons)
+ goto out;
+
+ for (cons = page->in_cons; cons != prod; cons++) {
+ struct xensnd_evt *event;
+
+ event = &XENSND_IN_RING_REF(page, cons);
+ if (unlikely(event->id != channel->evt_id++))
+ continue;
+
+ switch (event->type) {
+ case XENSND_EVT_CUR_POS:
+ /* do nothing at the moment */
+ break;
+ }
+ }
+
+ page->in_cons = cons;
+ /* ensure ring contents */
+ virt_wmb();
+
+out:
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+ return IRQ_HANDLED;
+}
+
+void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
+{
+ int notify;
+
+ channel->u.req.ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
+ if (notify)
+ notify_remote_via_irq(channel->irq);
+}
+
+static void evtchnl_free(struct xen_snd_front_info *front_info,
+ struct xen_snd_front_evtchnl *channel)
+{
+ unsigned long page = 0;
+
+ if (channel->type == EVTCHNL_TYPE_REQ)
+ page = (unsigned long)channel->u.req.ring.sring;
+ else if (channel->type == EVTCHNL_TYPE_EVT)
+ page = (unsigned long)channel->u.evt.page;
+
+ if (!page)
+ return;
+
+ channel->state = EVTCHNL_STATE_DISCONNECTED;
+ if (channel->type == EVTCHNL_TYPE_REQ) {
+ /* release all who still waits for response if any */
+ channel->u.req.resp_status = -EIO;
+ complete_all(&channel->u.req.completion);
+ }
+
+ if (channel->irq)
+ unbind_from_irqhandler(channel->irq, channel);
+
+ if (channel->port)
+ xenbus_free_evtchn(front_info->xb_dev, channel->port);
+
+ /* end access and free the page */
+ if (channel->gref != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(channel->gref, 0, page);