[PATCH v1 15/15] io_uring: Notify the application as the fillq is drained.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Userspace maintains a free count of space available in the fillq,
and only returns entries based on the available space.  As the
kernel removes these entries, it needs to notify the application
so more buffers can be queued.

Only one outstanding notifier per queue is used, and it provides
the most recent count of entries removed from the queue.

Also post a notifier when the NIC is unable to obtain any buffers.
When this happens, the NIC may just drop packets or stall.

Signed-off-by: Jonathan Lemon <jonathan.lemon@xxxxxxxxx>
---
 io_uring/zctap.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/io_uring/zctap.c b/io_uring/zctap.c
index c7897fe2ccf6..e6c7ed85d4ee 100644
--- a/io_uring/zctap.c
+++ b/io_uring/zctap.c
@@ -15,6 +15,7 @@
 #include "zctap.h"
 #include "rsrc.h"
 #include "kbuf.h"
+#include "refs.h"
 
 #define NR_ZCTAP_IFQS	1
 
@@ -26,7 +27,9 @@ struct ifq_region {
 	int			cache_count;
 	int			free_count;
 	int			nr_pages;
+	int			taken;
 	u16			id;
+	bool			empty;
 
 	spinlock_t		freelist_lock;
 	struct delayed_work	release_work;
@@ -44,8 +47,14 @@ struct ifq_region {
 struct io_zctap_ifq_priv {
 	struct io_zctap_ifq	ifq;
 	struct ubuf_info	uarg;
+	struct io_kiocb		req;
 };
 
+static struct io_kiocb *io_zctap_ifq_notifier(struct io_zctap_ifq *ifq)
+{
+	return &((struct io_zctap_ifq_priv *)ifq)->req;
+}
+
 static void io_zctap_ifq_put(struct io_zctap_ifq *ifq);
 
 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
@@ -131,6 +140,34 @@ static void io_zctap_recycle_buf(struct ifq_region *ifr,
 	}
 }
 
+struct io_zctap_notif {
+	struct file *file;
+	u64 udata;
+	int res;
+	int cflags;
+};
+
+static void io_zctap_post_notify(struct io_kiocb *req, bool *locked)
+{
+	struct io_zctap_notif *n = io_kiocb_to_cmd(req, struct io_zctap_notif);
+
+	io_post_aux_cqe(req->ctx, n->udata,  n->res, n->cflags, true);
+	io_req_task_complete(req, locked);
+}
+
+static void io_zctap_notify(struct io_kiocb *req, int bgid, int count)
+{
+	struct io_zctap_notif *n = io_kiocb_to_cmd(req, struct io_zctap_notif);
+
+	n->udata = 0xface0000;		/* XXX */
+	n->res = (bgid << 16) | count;
+	n->cflags = IORING_CQE_F_BUFFER|IORING_CQE_F_NOTIF;
+
+	req_ref_get(req);
+	req->io_task_work.func = io_zctap_post_notify;
+	io_req_task_work_add(req);
+}
+
 /* gets a user-supplied buffer from the fill queue
  *   note: may drain N entries, but still have no usable buffers
  *   XXX add retry limit?
@@ -159,6 +196,7 @@ static struct io_zctap_buf *io_zctap_get_buffer(struct io_zctap_ifq *ifq,
 	if (!addr)
 		return NULL;
 
+	ifr->taken++;
 	pgid = addr & 0xffff;
 	region_id = (addr >> 16) & 0xffff;
 	if (region_id) {
@@ -196,6 +234,7 @@ struct io_zctap_buf *io_zctap_get_buf(struct io_zctap_ifq *ifq, int refc)
 {
 	struct io_zctap_buf *buf;
 	struct ifq_region *ifr;
+	struct io_kiocb *req;
 	int count;
 	u16 pgid;
 
@@ -218,6 +257,12 @@ struct io_zctap_buf *io_zctap_get_buf(struct io_zctap_ifq *ifq, int refc)
 	count = io_zctap_get_buffers(ifq, ifr->cache, REGION_REFILL_COUNT);
 	ifr->cache_count += count;
 
+	req = io_zctap_ifq_notifier(ifq);
+	if (ifr->taken && atomic_read(&req->refs) == 1) {
+		io_zctap_notify(req, ifq->fill_bgid, ifr->taken);
+		ifr->taken = 0;
+	}
+
 	if (ifr->cache_count)
 		goto out;
 
@@ -234,11 +279,17 @@ struct io_zctap_buf *io_zctap_get_buf(struct io_zctap_ifq *ifq, int refc)
 	if (ifr->cache_count)
 		goto out;
 
+	if (!ifr->empty && atomic_read(&req->refs) == 1) {
+		io_zctap_notify(req, ifq->fill_bgid, 0);
+		ifr->empty = true;
+	}
+
 	return NULL;
 
 out:
 	pgid = ifr->cache[--ifr->cache_count];
 	buf = &ifr->buf[pgid];
+	ifr->empty = false;
 
 	WARN_RATELIMIT(atomic_read(&buf->refcount),
 		       "pgid:%d refc:%d cache_count:%d\n",
@@ -445,6 +496,8 @@ int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id)
 	ifr->id = id;
 	ifr->ifq = ifq;
 	ifr->delay_end = 0;
+	ifr->taken = 0;
+	ifr->empty = false;
 	ifr->cache_count = 0;
 
 	err = io_zctap_map_region(ifr, imu);
@@ -533,6 +586,11 @@ static struct io_zctap_ifq *io_zctap_ifq_alloc(struct io_ring_ctx *ctx)
 	priv->ifq.ctx = ctx;
 	priv->ifq.queue_id = -1;
 	priv->ifq.uarg = &priv->uarg;
+
+	priv->req.ctx = ctx;
+	priv->req.task = current;
+	io_req_set_refcount(&priv->req);
+
 	return &priv->ifq;
 }
 
-- 
2.30.2




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux