->bit every epitem has an element inside user item array, this bit is actually an index position of that user item array and also a bit inside ep->items_bm ->ready_events received events in the period when descriptor can't be polled from userspace and ep->rdllist is used for keeping list of ready items ->work work for offloading polling from task context if epfd is polled from userspace but driver does not provide pollflags on wakeup Signed-off-by: Roman Penyaev <rpenyaev@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Davidlohr Bueso <dbueso@xxxxxxx> Cc: Jason Baron <jbaron@xxxxxxxxxx> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: "Paul E. McKenney" <paulmck@xxxxxxxxxxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Andrea Parri <andrea.parri@xxxxxxxxxxxxxxxxxxxx> Cc: linux-fsdevel@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- fs/eventpoll.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ae288f62aa4c..637b463587c1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -9,6 +9,8 @@ * * Davide Libenzi <davidel@xxxxxxxxxxxxxxx> * + * Polling from userspace support by Roman Penyaev <rpenyaev@xxxxxxx> + * (C) Copyright 2019 SUSE, All Rights Reserved */ #include <linux/init.h> @@ -42,6 +44,7 @@ #include <linux/seq_file.h> #include <linux/compat.h> #include <linux/rculist.h> +#include <linux/workqueue.h> #include <net/busy_poll.h> /* @@ -176,6 +179,18 @@ struct epitem { /* The structure that describe the interested events and the source fd */ struct epoll_event event; + + /* Bit in user bitmap for user polling */ + unsigned int bit; + + /* + * Collect ready events for the period when descriptor is polled by user + * but events are routed to klists. + */ + __poll_t ready_events; + + /* Work for offloading event callback */ + struct work_struct work; }; #define EPOLL_USER_HEADER_SIZE 128 @@ -2557,12 +2572,6 @@ static int __init eventpoll_init(void) ep_nested_calls_init(&poll_safewake_ncalls); #endif - /* - * We can have many thousands of epitems, so prevent this from - * using an extra cache line on 64-bit (and smaller) CPUs - */ - BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128); - /* Allocates slab cache used to allocate "struct epitem" items */ epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); -- 2.19.1