The socket file operations still implement ->poll until all protocols are switched over. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- include/linux/net.h | 3 +++ net/socket.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/include/linux/net.h b/include/linux/net.h index c2d468cb9821..94d65de30cb7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -147,6 +147,9 @@ struct proto_ops { int (*getname) (struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer); + void (*pre_poll) (const struct sock *sk); + __poll_t (*poll_mask) (struct file *file, struct socket *sock, + __poll_t events); __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, diff --git a/net/socket.c b/net/socket.c index 092baa464afc..69b2a5444558 100644 --- a/net/socket.c +++ b/net/socket.c @@ -118,8 +118,10 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); -static __poll_t sock_poll(struct file *file, - struct poll_table_struct *wait); +static struct wait_queue_head *sock_get_poll_head(struct file *file, + __poll_t events); +static __poll_t sock_poll_mask(struct file *file, __poll_t); +static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, @@ -142,6 +144,8 @@ static const struct file_operations socket_file_ops = { .llseek = no_llseek, .read_iter = sock_read_iter, .write_iter = sock_write_iter, + .get_poll_head = sock_get_poll_head, + .poll_mask = sock_poll_mask, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT @@ -1094,10 +1098,48 @@ int sock_create_lite(int family, int type, int protocol, struct socket **res) } EXPORT_SYMBOL(sock_create_lite); +static struct wait_queue_head *sock_get_poll_head(struct file *file, + __poll_t events) +{ + struct socket *sock = file->private_data; + + if (!sock->ops->poll_mask) + return NULL; + + /* once, only if requested by syscall */ + if (sk_can_busy_loop(sock->sk) && + (events && (events & POLL_BUSY_LOOP))) + sk_busy_loop(sock->sk, 1); + + if (sock->ops->pre_poll) + sock->ops->pre_poll(sock->sk); + + return sk_sleep(sock->sk); +} + +static __poll_t sock_poll_mask(struct file *file, __poll_t events) +{ + struct socket *sock = file->private_data; + __poll_t busy_flag = 0; + + /* + * We need to be sure we are in sync with the socket flags modification. + * + * This memory barrier is paired in the wq_has_sleeper. + */ + smp_mb(); + + /* this socket can poll_ll so tell the system call */ + if (sk_can_busy_loop(sock->sk)) + busy_flag = POLL_BUSY_LOOP; + + return busy_flag | sock->ops->poll_mask(file, sock, events); +} + /* No kernel lock held - perfect */ static __poll_t sock_poll(struct file *file, poll_table *wait) { - __poll_t busy_flag = 0; + __poll_t busy_flag = 0, mask = 0; struct socket *sock; /* @@ -1114,7 +1156,18 @@ static __poll_t sock_poll(struct file *file, poll_table *wait) sk_busy_loop(sock->sk, 1); } - return busy_flag | sock->ops->poll(file, sock, wait); + if (sock->ops->poll) { + mask = sock->ops->poll(file, sock, wait); + } else if (sock->ops->poll_mask) { + unsigned int events = poll_requested_events(wait); + + if (sock->ops->pre_poll) + sock->ops->pre_poll(sock->sk); + sock_poll_wait(file, sk_sleep(sock->sk), wait); + mask = sock->ops->poll_mask(file, sock, events); + } + + return busy_flag | mask; } static int sock_mmap(struct file *file, struct vm_area_struct *vma) -- 2.14.2