Separate the read and write lock paths to simplify handling of initial acquire failure. Derived from Michel Lespinasse's write lock stealing work on rwsem. Cc: Michel Lespinasse <walken@xxxxxxxxxx> Signed-off-by: Peter Hurley <peter@xxxxxxxxxxxxxxxxxx> --- drivers/tty/tty_ldsem.c | 70 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index a60d7e3..d849fb85 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -220,12 +220,14 @@ static void ldsem_wake(struct ld_semaphore *sem) } /* - * wait for a lock to be granted + * wait for the read lock to be granted */ static struct ld_semaphore __sched * -down_failed(struct ld_semaphore *sem, unsigned flags, long adjust, long timeout) +down_read_failed(struct ld_semaphore *sem, long timeout) { struct ldsem_waiter waiter; + long flags = LDSEM_READ_WAIT; + long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS; /* set up my own style of waitqueue */ raw_spin_lock_irq(&sem->wait_lock); @@ -279,22 +281,64 @@ down_failed(struct ld_semaphore *sem, unsigned flags, long adjust, long timeout) } /* - * wait for the read lock to be granted - */ -static struct ld_semaphore __sched * -down_read_failed(struct ld_semaphore *sem, long timeout) -{ - return down_failed(sem, LDSEM_READ_WAIT, - -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS, timeout); -} - -/* * wait for the write lock to be granted */ static struct ld_semaphore __sched * down_write_failed(struct ld_semaphore *sem, long timeout) { - return down_failed(sem, LDSEM_WRITE_WAIT, -LDSEM_ACTIVE_BIAS, timeout); + struct ldsem_waiter waiter; + long flags = LDSEM_WRITE_WAIT; + long adjust = -LDSEM_ACTIVE_BIAS; + + /* set up my own style of waitqueue */ + raw_spin_lock_irq(&sem->wait_lock); + + if (flags & LDSEM_READ_WAIT) + list_add_tail(&waiter.list, &sem->read_wait); + else + list_add_tail(&waiter.list, &sem->write_wait); + + waiter.task = current; + waiter.flags = flags; + get_task_struct(current); + + /* change the lock attempt to a wait -- + * if there are no active locks, wake the new lock owner(s) + */ + if ((ldsem_atomic_update(adjust, sem) & LDSEM_ACTIVE_MASK) == 0) + __ldsem_wake(sem, LDSEM_WAKE_NO_CHECK); + + raw_spin_unlock_irq(&sem->wait_lock); + + /* wait to be given the lock */ + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + + if (!waiter.task) + break; + if (!timeout) + break; + timeout = schedule_timeout(timeout); + } + + __set_current_state(TASK_RUNNING); + + if (!timeout) { + /* lock timed out but check if this task was just + * granted lock ownership - if so, pretend there + * was no timeout; otherwise, cleanup lock wait */ + raw_spin_lock_irq(&sem->wait_lock); + if (waiter.task) { + ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); + list_del(&waiter.list); + put_task_struct(waiter.task); + raw_spin_unlock_irq(&sem->wait_lock); + return NULL; + } + raw_spin_unlock_irq(&sem->wait_lock); + } + + return sem; } -- 1.8.1.2 -- To unsubscribe from this list: send the line "unsubscribe linux-serial" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html