1
0
Fork 0

rwsem: skip initial trylock in rwsem_down_write_failed

We can skip the initial trylock in rwsem_down_write_failed() if there
are known active lockers already, thus saving one likely-to-fail
cmpxchg.

Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Michel Lespinasse 2013-05-07 06:45:57 -07:00 committed by Linus Torvalds
parent a7d2c573ae
commit 9b0fc9c09f
1 changed files with 9 additions and 8 deletions

View File

@ -216,14 +216,15 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
/* wait until we successfully acquire the lock */
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
while (true) {
/* Try acquiring the write lock. */
count = RWSEM_ACTIVE_WRITE_BIAS;
if (!list_is_singular(&sem->wait_list))
count += RWSEM_WAITING_BIAS;
if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
if (!(count & RWSEM_ACTIVE_MASK)) {
/* Try acquiring the write lock. */
count = RWSEM_ACTIVE_WRITE_BIAS;
if (!list_is_singular(&sem->wait_list))
count += RWSEM_WAITING_BIAS;
if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
RWSEM_WAITING_BIAS)
break;
break;
}
raw_spin_unlock_irq(&sem->wait_lock);
@ -231,7 +232,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
do {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
} while (sem->count & RWSEM_ACTIVE_MASK);
} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
raw_spin_lock_irq(&sem->wait_lock);
}