[RFC PATCH v2 13/13] io_uring: Make remove_ifq_region a delayed work call

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Very much a WIP!

The page backing store should not be removed until all ouststanding
packets are returned.  The packets may be inflight, owned by the
driver or sitting in a socket buffer.

This shows how the cleanup routine should check that there are no
pending packets in flight, before cleaning up the buffers.

Signed-off-by: Jonathan Lemon <jonathan.lemon@xxxxxxxxx>
---
 io_uring/zctap.c | 33 +++++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 8 deletions(-)

diff --git a/io_uring/zctap.c b/io_uring/zctap.c
index 4a551349b600..a1525a0b0245 100644
--- a/io_uring/zctap.c
+++ b/io_uring/zctap.c
@@ -25,6 +25,7 @@ struct ifq_region {
 	u16			id;
 
 	spinlock_t		freelist_lock;
+	struct delayed_work	release_work;
 
 	struct io_zctap_buf	*buf;
 	struct io_zctap_buf	*freelist[];
@@ -199,24 +200,38 @@ static void io_zctap_put_page(struct io_zctap_ifq *ifq, struct page *page)
        io_zctap_put_buf(ifq, &ifr->buf[pgid]);
 }
 
-static void io_remove_ifq_region(struct ifq_region *ifr)
+static void io_remove_ifq_region_work(struct work_struct *work)
 {
-	struct io_mapped_ubuf *imu;
-	struct page *page;
-	int i;
+	struct ifq_region *ifr = container_of(
+		to_delayed_work(work), struct ifq_region, release_work);
+	struct io_zctap_buf *buf;
+	int i, refs;
 
-	imu = ifr->imu;
 	for (i = 0; i < ifr->nr_pages; i++) {
-		page = imu->bvec[i].bv_page;
+		buf = &ifr->buf[i];
+		refs = atomic_read(&buf->refcount) & IO_ZCTAP_KREF_MASK;
+		if (refs) {
+			schedule_delayed_work(&ifr->release_work, HZ);
+			return;
+		}
+	}
 
-		ClearPagePrivate(page);
-		set_page_private(page, 0);
+	for (i = 0; i < ifr->nr_pages; i++) {
+		buf = &ifr->buf[i];
+		set_page_private(buf->page, 0);
+		ClearPagePrivate(buf->page);
 	}
 
 	kvfree(ifr->buf);
 	kvfree(ifr);
 }
 
+static void io_remove_ifq_region(struct ifq_region *ifr)
+{
+	INIT_DELAYED_WORK(&ifr->release_work, io_remove_ifq_region_work);
+	schedule_delayed_work(&ifr->release_work, 0);
+}
+
 static inline struct device *
 netdev2device(struct net_device *dev)
 {
@@ -403,6 +418,8 @@ static struct io_zctap_ifq *io_zctap_ifq_alloc(void)
 	return &priv->ifq;
 }
 
+/* XXX this seems to be called too late - MM is already torn down? */
+/* need to tear down sockets, then io_uring, then MM */
 static void io_zctap_ifq_free(struct io_zctap_ifq *ifq)
 {
 	if (ifq->queue_id != -1)
-- 
2.30.2




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux