goto again loop can and does induce livelock in -rt. Remove it. spin_unlock_wait(lock) in -rt kernels takes/releases the lock in question, so all it takes to create a self perpetuating loop is for one task to start the ball rolling by taking the array lock, other tasks see this, and react by take/release/retry endlessly. Signed-off-by: Mike Galbraith <bitbucket@xxxxxxxxx> --- ipc/sem.c | 56 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 18 deletions(-) Index: linux-2.6/ipc/sem.c =================================================================== --- linux-2.6.orig/ipc/sem.c +++ linux-2.6/ipc/sem.c @@ -208,22 +208,11 @@ void __init sem_init (void) static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { + struct sem *sem; int locknum; - again: - if (nsops == 1 && !sma->complex_count) { - struct sem *sem = sma->sem_base + sops->sem_num; - - /* Lock just the semaphore we are interested in. */ - spin_lock(&sem->lock); - /* - * If sma->complex_count was set while we were spinning, - * we may need to look at things we did not lock here. - */ - if (unlikely(sma->complex_count)) { - spin_unlock(&sem->lock); - goto lock_array; - } + if (nsops == 1 && !sma->complex_count) { + sem = sma->sem_base + sops->sem_num; /* * Another process is holding the global lock on the @@ -231,9 +220,29 @@ static inline int sem_lock(struct sem_ar * but have to wait for the global lock to be released. */ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { - spin_unlock(&sem->lock); - spin_unlock_wait(&sma->sem_perm.lock); - goto again; + spin_lock(&sma->sem_perm.lock); + if (sma->complex_count) + goto wait_array; + + /* + * Acquiring our sem->lock under the global lock + * forces new complex operations to wait for us + * to exit our critical section. + */ + spin_lock(&sem->lock); + spin_unlock(&sma->sem_perm.lock); + } else { + /* Lock just the semaphore we are interested in. */ + spin_lock(&sem->lock); + + /* + * If sma->complex_count was set prior to acquisition, + * we must fall back to the global array lock. + */ + if (unlikely(sma->complex_count)) { + spin_unlock(&sem->lock); + goto lock_array; + } } locknum = sops->sem_num; @@ -247,11 +256,22 @@ static inline int sem_lock(struct sem_ar */ lock_array: spin_lock(&sma->sem_perm.lock); + wait_array: for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + sem = sma->sem_base + i; +#ifdef CONFIG_PREEMPT_RT_BASE + if (spin_is_locked(&sem->lock)) +#endif spin_unlock_wait(&sem->lock); } locknum = -1; + + if (nsops == 1 && !sma->complex_count) { + sem = sma->sem_base + sops->sem_num; + spin_lock(&sem->lock); + spin_unlock(&sma->sem_perm.lock); + locknum = sops->sem_num; + } } return locknum; } -- To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html