The remap of fill and completion rings was frowned upon as they control the usage of UMEM which does not support concurrent use. At the same time this would disallow the remap of this rings into another process. A possible use case is that the user wants to transfer the socket/ UMEM ownerwhip to another process (via SYS_pidfd_getfd) and so would need to also remap this rings. This will have no impact on current usages and just relaxes the remap limitation. Signed-off-by: Nuno Gonçalves <nunog@xxxxxxxx> --- net/xdp/xsk.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 2ac58b282b5eb..2af4ff64b22bd 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1300,10 +1300,11 @@ static int xsk_mmap(struct file *file, struct socket *sock, { loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; + int state = READ_ONCE(xs->state); struct xdp_sock *xs = xdp_sk(sock->sk); struct xsk_queue *q = NULL; - if (READ_ONCE(xs->state) != XSK_READY) + if (!(state == XSK_READY || state == XSK_BOUND)) return -EBUSY; if (offset == XDP_PGOFF_RX_RING) { @@ -1314,9 +1315,11 @@ static int xsk_mmap(struct file *file, struct socket *sock, /* Matches the smp_wmb() in XDP_UMEM_REG */ smp_rmb(); if (offset == XDP_UMEM_PGOFF_FILL_RING) - q = READ_ONCE(xs->fq_tmp); + q = READ_ONCE(state == XSK_READY ? xs->fq_tmp : + xs->pool->fq); else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) - q = READ_ONCE(xs->cq_tmp); + q = READ_ONCE(state == XSK_READY ? xs->cq_tmp : + xs->pool->cq); } if (!q) -- 2.40.0