From: Davidlohr Bueso <dave@xxxxxxxxxxxx> Subject: fs/eventpoll.c: simply CONFIG_NET_RX_BUSY_POLL ifdefery ... 'tis easier on the eye. [akpm@xxxxxxxxxxxxxxxxxxxx: use inlines rather than macros] Link: http://lkml.kernel.org/r/20180725185620.11020-1-dave@xxxxxxxxxxxx Signed-off-by: Davidlohr Bueso <dbueso@xxxxxxx> Cc: Jason Baron <jbaron@xxxxxxxxxx> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- diff -puN fs/eventpoll.c~fs-epoll-simply-config_net_rx_busy_poll-ifdefery fs/eventpoll.c --- a/fs/eventpoll.c~fs-epoll-simply-config_net_rx_busy_poll-ifdefery +++ a/fs/eventpoll.c @@ -391,7 +391,6 @@ static bool ep_busy_loop_end(void *p, un return ep_events_available(ep) || busy_loop_timeout(start_time); } -#endif /* CONFIG_NET_RX_BUSY_POLL */ /* * Busy poll if globally on and supporting sockets found && no events, @@ -401,20 +400,16 @@ static bool ep_busy_loop_end(void *p, un */ static void ep_busy_loop(struct eventpoll *ep, int nonblock) { -#ifdef CONFIG_NET_RX_BUSY_POLL unsigned int napi_id = READ_ONCE(ep->napi_id); if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep); -#endif } static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep) { -#ifdef CONFIG_NET_RX_BUSY_POLL if (ep->napi_id) ep->napi_id = 0; -#endif } /* @@ -422,7 +417,6 @@ static inline void ep_reset_busy_poll_na */ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) { -#ifdef CONFIG_NET_RX_BUSY_POLL struct eventpoll *ep; unsigned int napi_id; struct socket *sock; @@ -452,9 +446,24 @@ static inline void ep_set_busy_poll_napi /* record NAPI ID for use in next busy poll */ ep->napi_id = napi_id; -#endif } +#else + +static inline void ep_busy_loop(struct eventpoll *ep, int nonblock) +{ +} + +static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep) +{ +} + +static inline void ep_set_busy_poll_napi_id(struct epitem *epi) +{ +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * ep_call_nested - Perform a bound (possibly) nested call, by checking * that the recursion limit is not exceeded, and that _