The following commit has been merged into the locking/core branch of tip: Commit-ID: 335c73e7c8f7deb23537afbbbe4f8ab48bd5de52 Gitweb: https://git.kernel.org/tip/335c73e7c8f7deb23537afbbbe4f8ab48bd5de52 Author: Arnd Bergmann <arnd@xxxxxxxx> AuthorDate: Mon, 22 Mar 2021 22:42:24 +01:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Tue, 23 Mar 2021 00:08:53 +01:00 static_call: Fix function type mismatch The __static_call_return0() function is declared to return a 'long', while it aliases a couple of functions that all return 'int'. When building with 'make W=1', gcc warns about this: kernel/sched/core.c:5420:37: error: cast between incompatible function types from 'long int (*)(void)' to 'int (*)(void)' [-Werror=cast-function-type] 5420 | static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); Change all these function to return 'long' as well, but remove the cast to ensure we get a warning if any of the types ever change. Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Link: https://lore.kernel.org/r/20210322214309.730556-1-arnd@xxxxxxxxxx --- include/linux/kernel.h | 4 ++-- include/linux/sched.h | 14 +++++++------- kernel/sched/core.c | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5b7ed6d..db24f8c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -82,12 +82,12 @@ struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY -extern int __cond_resched(void); +extern long __cond_resched(void); # define might_resched() __cond_resched() #elif defined(CONFIG_PREEMPT_DYNAMIC) -extern int __cond_resched(void); +extern long __cond_resched(void); DECLARE_STATIC_CALL(might_resched, __cond_resched); diff --git a/include/linux/sched.h b/include/linux/sched.h index ef00bb2..b08080d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1875,20 +1875,20 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) * cond_resched_lock() will drop the spinlock before scheduling, */ #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) -extern int __cond_resched(void); +extern long __cond_resched(void); #ifdef CONFIG_PREEMPT_DYNAMIC DECLARE_STATIC_CALL(cond_resched, __cond_resched); -static __always_inline int _cond_resched(void) +static __always_inline long _cond_resched(void) { return static_call_mod(cond_resched)(); } #else -static inline int _cond_resched(void) +static inline long _cond_resched(void) { return __cond_resched(); } @@ -1897,7 +1897,7 @@ static inline int _cond_resched(void) #else -static inline int _cond_resched(void) { return 0; } +static inline long _cond_resched(void) { return 0; } #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ @@ -1906,9 +1906,9 @@ static inline int _cond_resched(void) { return 0; } _cond_resched(); \ }) -extern int __cond_resched_lock(spinlock_t *lock); -extern int __cond_resched_rwlock_read(rwlock_t *lock); -extern int __cond_resched_rwlock_write(rwlock_t *lock); +extern long __cond_resched_lock(spinlock_t *lock); +extern long __cond_resched_rwlock_read(rwlock_t *lock); +extern long __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9819121..927fd82 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6976,7 +6976,7 @@ SYSCALL_DEFINE0(sched_yield) } #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) -int __sched __cond_resched(void) +long __sched __cond_resched(void) { if (should_resched(0)) { preempt_schedule_common(); @@ -7006,7 +7006,7 @@ EXPORT_STATIC_CALL_TRAMP(might_resched); * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ -int __cond_resched_lock(spinlock_t *lock) +long __cond_resched_lock(spinlock_t *lock) { int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0; @@ -7026,7 +7026,7 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); -int __cond_resched_rwlock_read(rwlock_t *lock) +long __cond_resched_rwlock_read(rwlock_t *lock) { int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0; @@ -7046,7 +7046,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock) } EXPORT_SYMBOL(__cond_resched_rwlock_read); -int __cond_resched_rwlock_write(rwlock_t *lock) +long __cond_resched_rwlock_write(rwlock_t *lock) { int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0;