[RFCv2 12/12] caif_virtio: Introduce caif over virtio

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Vikram ARV <vikram.arv@xxxxxxxxxxxxxx>

caif_virtio is using the remoteproc and virtio framework
for communicating with the modem. The CAIF link layer
device is registered as a network device.

CAIF over virtio uses the virtio rings in both "directions",
and request a reversed virtio queue in the RX direction.

Signed-off-by: Vikram ARV <vikram.arv@xxxxxxxxxxxxxx>
Signed-off-by: Sjur Brændeland <sjur.brandeland@xxxxxxxxxxxxxx>
---
 drivers/net/caif/Kconfig        |    8 +
 drivers/net/caif/Makefile       |    3 +
 drivers/net/caif/caif_virtio.c  |  482 +++++++++++++++++++++++++++++++++++++++
 include/linux/virtio_caif.h     |   24 ++
 include/uapi/linux/virtio_ids.h |    1 +
 5 files changed, 518 insertions(+), 0 deletions(-)
 create mode 100644 drivers/net/caif/caif_virtio.c
 create mode 100644 include/linux/virtio_caif.h

diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index abf4d7a..a1279d5 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -47,3 +47,11 @@ config CAIF_HSI
        The caif low level driver for CAIF over HSI.
        Be aware that if you enable this then you also need to
        enable a low-level HSI driver.
+
+config CAIF_VIRTIO
+	tristate "CAIF virtio transport driver"
+	depends on CAIF
+	depends on VIRTIO
+	default m
+	---help---
+	The caif driver for CAIF over Virtio.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff86..d9ee26a 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_CAIF_SHM) += caif_shm.o
 
 # HSI interface
 obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
+
+# Virtio interface
+obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
new file mode 100644
index 0000000..94efd21
--- /dev/null
+++ b/drivers/net/caif/caif_virtio.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ * Contact: Sjur Brendeland / sjur.brandeland@xxxxxxxxxxxxxx
+ * Authors: Vicram Arv / vikram.arv@xxxxxxxxxxxxxx,
+ *	    Dmitry Tarnyagin / dmitry.tarnyagin@xxxxxxxxxxxxxx
+ *	    Sjur Brendeland / sjur.brandeland@xxxxxxxxxxxxxx
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s/%d"  fmt,  __func__, __LINE__
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/spinlock.h>
+#include <linux/virtio_caif.h>
+#include <linux/virtio_ring.h>
+#include <net/caif/caif_dev.h>
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vicram Arv <vikram.arv@xxxxxxxxxxxxxx>");
+MODULE_DESCRIPTION("Virtio CAIF Driver");
+
+/*
+ * struct cfv_info - Caif Virtio control structure
+ * @cfdev:	caif common header
+ * @vdev:	Associated virtio device
+ * @vq_rx:	rx/downlink virtqueue
+ * @vq_tx:	tx/uplink virtqueue
+ * @ndev:	associated netdevice
+ * @queued_tx:	number of buffers queued in the tx virtqueue
+ * @watermark_tx: indicates number of buffers the tx queue
+ *		should shrink to to unblock datapath
+ * @tx_lock:	protects vq_tx to allow concurrent senders
+ * @tx_hr:	transmit headroom
+ * @rx_hr:	receive headroom
+ * @tx_tr:	transmit tailroom
+ * @rx_tr:	receive tailroom
+ * @mtu:	transmit max size
+ * @mru:	receive max size
+ */
+struct cfv_info {
+	struct caif_dev_common cfdev;
+	struct virtio_device *vdev;
+	struct virtqueue *vq_rx;
+	struct virtqueue *vq_tx;
+	struct net_device *ndev;
+	unsigned int queued_tx;
+	unsigned int watermark_tx;
+	/* Protect access to vq_tx */
+	spinlock_t tx_lock;
+	/* Copied from Virtio config space */
+	u16 tx_hr;
+	u16 rx_hr;
+	u16 tx_tr;
+	u16 rx_tr;
+	u32 mtu;
+	u32 mru;
+};
+
+/*
+ * struct token_info - maintains Transmit buffer data handle
+ * @size:	size of transmit buffer
+ * @dma_handle: handle to allocated dma device memory area
+ * @vaddr:	virtual address mapping to allocated memory area
+ */
+struct token_info {
+	size_t size;
+	u8 *vaddr;
+	dma_addr_t dma_handle;
+};
+
+/* Default if virtio config space is unavailable */
+#define CFV_DEF_MTU_SIZE 4096
+#define CFV_DEF_HEADROOM 0
+#define CFV_DEF_TAILROOM 0
+
+/* Require IP header to be 4-byte aligned. */
+#define IP_HDR_ALIGN 4
+
+/*
+ * virtqueue_next_desc - get next available or linked descriptor
+ * @_vq: the struct virtqueue we're talking about
+ * @desc: "current" descriptor.
+ * @head: on return it is filled by the descriptor index in case of
+ *	available descriptor was returned, or -1 in case of linked
+ *	descriptor.
+ *
+ * The function is to be used as an iterator through received descriptors.
+ */
+static struct vring_desc *virtqueue_next_desc(struct virtqueue *_vq,
+					      struct vring_desc *desc,
+					      int *head)
+{
+	struct vring_desc *next = virtqueue_next_linked_desc(_vq, desc);
+	BUG_ON(!_vq->reversed);
+	if (next == NULL) {
+		virtqueue_add_buf_to_used(_vq, *head, 0);
+		/* tell the remote processor to recycle buffer */
+		virtqueue_kick(_vq);
+		next = virtqueue_next_avail_desc(_vq, head);
+	}
+	return next;
+}
+
+/*
+ * This is invoked whenever the remote processor completed processing
+ * a TX msg we just sent it, and the buffer is put back to the used ring.
+ */
+static void cfv_release_used_buf(struct virtqueue *vq_tx)
+{
+	struct cfv_info *cfv = vq_tx->vdev->priv;
+
+	BUG_ON(vq_tx->reversed);
+	BUG_ON(vq_tx != cfv->vq_tx);
+
+	for (;;) {
+		unsigned int len;
+		struct token_info *buf_info;
+
+		/* Get used buffer from used ring to recycle used descriptors */
+		spin_lock_bh(&cfv->tx_lock);
+		buf_info = virtqueue_get_buf(vq_tx, &len);
+
+		if (!buf_info)
+			goto out;
+
+		BUG_ON(!cfv->queued_tx);
+		if (--cfv->queued_tx < cfv->watermark_tx) {
+			cfv->watermark_tx = 0;
+			netif_tx_wake_all_queues(cfv->ndev);
+		}
+		spin_unlock_bh(&cfv->tx_lock);
+
+		dma_free_coherent(vq_tx->vdev->dev.parent->parent,
+				  buf_info->size, buf_info->vaddr,
+				  buf_info->dma_handle);
+		kfree(buf_info);
+	}
+	return;
+out:
+	spin_unlock_bh(&cfv->tx_lock);
+}
+
+static int cfv_read_desc(struct vring_desc *d,
+			 void **buf, size_t *size)
+{
+	if (d->flags & VRING_DESC_F_INDIRECT) {
+		pr_warn("Indirect descriptor not supported by CAIF\n");
+		return -EINVAL;
+	}
+
+	*buf = phys_to_virt(d->addr);
+	*size = d->len;
+	return 0;
+}
+
+static struct sk_buff *cfv_alloc_and_copy_skb(struct cfv_info *cfv,
+					      u8 *frm, u32 frm_len)
+{
+	struct sk_buff *skb;
+	u32 cfpkt_len, pad_len;
+
+	/* Verify that packet size with down-link header and mtu size */
+	if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
+		netdev_err(cfv->ndev,
+			   "Invalid frmlen:%u  mtu:%u hr:%d tr:%d\n",
+			   frm_len, cfv->mru,  cfv->rx_hr,
+			   cfv->rx_tr);
+		return NULL;
+	}
+
+	cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
+
+	pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
+
+	skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
+	if (!skb)
+		return NULL;
+
+	/* Reserve space for headers. */
+	skb_reserve(skb, cfv->rx_hr + pad_len);
+
+	memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
+	return skb;
+}
+
+/*
+ * This is invoked whenever the remote processor has sent down-link data
+ * on the Rx VQ avail ring and it's time to digest a message.
+ *
+ * CAIF virtio passes a complete CAIF frame including head/tail room
+ * in each linked descriptor. So iterate over all available buffers
+ * in available-ring and the associated linked descriptors.
+ */
+static void cfv_recv(struct virtqueue *vq_rx)
+{
+	struct cfv_info *cfv = vq_rx->vdev->priv;
+	struct vring_desc *desc;
+	struct sk_buff *skb;
+	int head = -1;
+	void *buf;
+	size_t len;
+	BUG_ON(!vq_rx->reversed);
+	for (desc = virtqueue_next_avail_desc(vq_rx, &head);
+	     desc != NULL && !cfv_read_desc(desc, &buf, &len);
+	     desc = virtqueue_next_desc(vq_rx, desc, &head)) {
+
+		skb = cfv_alloc_and_copy_skb(cfv, buf, len);
+		if (!skb)
+			goto err;
+		skb->protocol = htons(ETH_P_CAIF);
+		skb_reset_mac_header(skb);
+		skb->dev = cfv->ndev;
+		/* Push received packet up the stack. */
+		if (netif_receive_skb(skb))
+			goto err;
+		++cfv->ndev->stats.rx_packets;
+		cfv->ndev->stats.rx_bytes += skb->len;
+	}
+	return;
+err:
+	++cfv->ndev->stats.rx_dropped;
+	return;
+}
+
+static int cfv_netdev_open(struct net_device *netdev)
+{
+	netif_carrier_on(netdev);
+	return 0;
+}
+
+static int cfv_netdev_close(struct net_device *netdev)
+{
+	netif_carrier_off(netdev);
+	return 0;
+}
+
+static struct token_info *cfv_alloc_and_copy_to_dmabuf(struct cfv_info *cfv,
+						       struct sk_buff *skb,
+						       struct scatterlist *sg)
+{
+	struct caif_payload_info *info = (void *)&skb->cb;
+	struct token_info *buf_info = NULL;
+	u8 pad_len, hdr_ofs;
+
+	if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
+		netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
+			    cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
+		goto err;
+	}
+
+	buf_info = kmalloc(sizeof(struct token_info), GFP_ATOMIC);
+	if (unlikely(!buf_info))
+		goto err;
+
+	/* Make the IP header aligned in tbe buffer */
+	hdr_ofs = cfv->tx_hr + info->hdr_len;
+	pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
+	buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
+
+	if (WARN_ON_ONCE(!cfv->vdev->dev.parent))
+		goto err;
+
+	/* allocate coherent memory for the buffers */
+	buf_info->vaddr =
+		dma_alloc_coherent(cfv->vdev->dev.parent->parent,
+				   buf_info->size, &buf_info->dma_handle,
+				   GFP_ATOMIC);
+	if (unlikely(!buf_info->vaddr)) {
+		netdev_warn(cfv->ndev,
+			    "Out of DMA memory (alloc %zu bytes)\n",
+			    buf_info->size);
+		goto err;
+	}
+
+	/* copy skbuf contents to send buffer */
+	skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
+	sg_init_one(sg, buf_info->vaddr + pad_len,
+		    skb->len + cfv->tx_hr + cfv->rx_hr);
+	return buf_info;
+err:
+	kfree(buf_info);
+	return NULL;
+}
+
+/*
+ * This is invoked whenever the host processor application has sent
+ * up-link data. Send it in the TX VQ avail ring.
+ *
+ * CAIF Virtio sends does not use linked descriptors in the tx direction.
+ */
+static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct cfv_info *cfv = netdev_priv(netdev);
+	struct token_info *buf_info;
+	struct scatterlist sg;
+	bool flow_off = false;
+
+	cfv_release_used_buf(cfv->vq_tx);
+
+	buf_info = cfv_alloc_and_copy_to_dmabuf(cfv, skb, &sg);
+	if (!buf_info)
+		goto err;
+
+	spin_lock_bh(&cfv->tx_lock);
+	/*
+	 * Add buffer to avail ring.
+	 * Note: in spite of a space check at beginning of the function,
+	 * the add_buff call might fail in case of concurrent access on smp
+	 * systems.
+	 */
+	if (WARN_ON(virtqueue_add_buf(cfv->vq_tx, &sg, 0, 1,
+				      buf_info, GFP_ATOMIC) < 0)) {
+		/* It should not happen */
+		flow_off = true;
+		goto err_unlock;
+	} else {
+		/* update netdev statistics */
+		cfv->queued_tx++;
+		cfv->ndev->stats.tx_packets++;
+		cfv->ndev->stats.tx_bytes += skb->len;
+	}
+
+	/*
+	 * Flow-off check takes into account number of cpus to make sure
+	 * virtqueue will not be overfilled in any possible smp conditions.
+	 */
+	flow_off = cfv->queued_tx + num_present_cpus() >=
+		virtqueue_get_vring_size(cfv->vq_tx);
+
+	/* tell the remote processor it has a pending message to read */
+	virtqueue_kick(cfv->vq_tx);
+	if (flow_off) {
+		cfv->watermark_tx = cfv->queued_tx >> 1;
+		netif_tx_stop_all_queues(netdev);
+	}
+
+	spin_unlock_bh(&cfv->tx_lock);
+
+	dev_kfree_skb(skb);
+
+	/* Try to speculatively free used buffers */
+	if (flow_off)
+		cfv_release_used_buf(cfv->vq_tx);
+	return NETDEV_TX_OK;
+
+err_unlock:
+	spin_lock_bh(&cfv->tx_lock);
+err:
+	++cfv->ndev->stats.tx_dropped;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+
+}
+
+static const struct net_device_ops cfv_netdev_ops = {
+	.ndo_open = cfv_netdev_open,
+	.ndo_stop = cfv_netdev_close,
+	.ndo_start_xmit = cfv_netdev_tx,
+};
+
+static void cfv_netdev_setup(struct net_device *netdev)
+{
+	netdev->netdev_ops = &cfv_netdev_ops;
+	netdev->type = ARPHRD_CAIF;
+	netdev->tx_queue_len = 100;
+	netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
+	netdev->mtu = CFV_DEF_MTU_SIZE;
+	netdev->destructor = free_netdev;
+}
+
+#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
+	((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
+			   &_var, \
+			   FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
+
+static int __devinit cfv_probe(struct virtio_device *vdev)
+{
+	vq_callback_t *vq_cbs[] = { cfv_recv, cfv_release_used_buf };
+	const char *names[] = { "input", "output" };
+	const char *cfv_netdev_name = "cfvrt";
+	struct net_device *netdev;
+	struct virtqueue *vqs[2];
+	bool reversed[2];
+	struct cfv_info *cfv;
+	int err = 0;
+
+	netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
+			      cfv_netdev_setup);
+	if (!netdev)
+		return -ENOMEM;
+
+	cfv = netdev_priv(netdev);
+	cfv->vdev = vdev;
+	cfv->ndev = netdev;
+
+	spin_lock_init(&cfv->tx_lock);
+	reversed[0] = true;
+	reversed[1] = false;
+	/* Get two virtqueues, for tx/ul and rx/dl */
+	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, reversed);
+	if (err)
+		goto free_cfv;
+
+	cfv->vq_rx = vqs[0];
+	cfv->vq_tx = vqs[1];
+
+	BUG_ON(!cfv->vq_rx->reversed);
+	BUG_ON(cfv->vq_tx->reversed);
+
+	if (vdev->config->get) {
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
+		GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+	} else {
+		cfv->tx_hr = CFV_DEF_HEADROOM;
+		cfv->rx_hr = CFV_DEF_HEADROOM;
+		cfv->tx_tr = CFV_DEF_TAILROOM;
+		cfv->rx_tr = CFV_DEF_TAILROOM;
+		cfv->mtu = CFV_DEF_MTU_SIZE;
+		cfv->mru = CFV_DEF_MTU_SIZE;
+
+	}
+
+	vdev->priv = cfv;
+
+	netif_carrier_off(netdev);
+
+	/* register Netdev */
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
+		goto vqs_del;
+	}
+
+	/* tell the remote processor it can start sending messages */
+	virtqueue_kick(cfv->vq_rx);
+	return 0;
+
+vqs_del:
+	vdev->config->del_vqs(cfv->vdev);
+free_cfv:
+	free_netdev(netdev);
+	return err;
+}
+
+static void __devexit cfv_remove(struct virtio_device *vdev)
+{
+	struct cfv_info *cfv = vdev->priv;
+	vdev->config->reset(vdev);
+	vdev->config->del_vqs(cfv->vdev);
+	unregister_netdev(cfv->ndev);
+}
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static unsigned int features[] = {
+};
+
+static struct virtio_driver caif_virtio_driver = {
+	.feature_table		= features,
+	.feature_table_size	= ARRAY_SIZE(features),
+	.driver.name		= KBUILD_MODNAME,
+	.driver.owner		= THIS_MODULE,
+	.id_table		= id_table,
+	.probe			= cfv_probe,
+	.remove			= cfv_remove,
+};
+
+module_driver(caif_virtio_driver, register_virtio_driver,
+	      unregister_virtio_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h
new file mode 100644
index 0000000..5d2d312
--- /dev/null
+++ b/include/linux/virtio_caif.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ * Author: Sjur Brændeland <sjur.brandeland@xxxxxxxxxxxxxx>
+ *
+ * This header is BSD licensed so
+ * anyone can use the definitions to implement compatible remote processors
+ */
+
+#ifndef VIRTIO_CAIF_H
+#define VIRTIO_CAIF_H
+
+#include <linux/types.h>
+struct virtio_caif_transf_config {
+	u16 headroom;
+	u16 tailroom;
+	u32 mtu;
+	u8 reserved[4];
+};
+
+struct virtio_caif_config {
+	struct virtio_caif_transf_config uplink, downlink;
+	u8 reserved[8];
+};
+#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 270fb22..c2dbedd 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -37,5 +37,6 @@
 #define VIRTIO_ID_RPMSG		7 /* virtio remote processor messaging */
 #define VIRTIO_ID_SCSI		8 /* virtio scsi */
 #define VIRTIO_ID_9P		9 /* 9p virtio console */
+#define VIRTIO_ID_CAIF	       12 /* Virtio caif */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
-- 
1.7.5.4

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization



[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux