1
0
Fork 0

locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling

The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
combination is repetitive.

In fact, ww_ctx should not be used at all if !use_ww_ctx.  Simplify
ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
ww_ctx) by just (ww_ctx).

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Link: https://lore.kernel.org/r/20210316153119.13802-2-longman@redhat.com
rM2-mainline
Waiman Long 2021-03-16 11:31:16 -04:00 committed by Ingo Molnar
parent 1df27313f5
commit 5de2055d31
1 changed files with 14 additions and 11 deletions

View File

@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*/ */
static __always_inline bool static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
const bool use_ww_ctx, struct mutex_waiter *waiter) struct mutex_waiter *waiter)
{ {
if (!waiter) { if (!waiter) {
/* /*
@ -702,7 +702,7 @@ fail:
#else #else
static __always_inline bool static __always_inline bool
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
const bool use_ww_ctx, struct mutex_waiter *waiter) struct mutex_waiter *waiter)
{ {
return false; return false;
} }
@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct ww_mutex *ww; struct ww_mutex *ww;
int ret; int ret;
if (!use_ww_ctx)
ww_ctx = NULL;
might_sleep(); might_sleep();
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
#endif #endif
ww = container_of(lock, struct ww_mutex, base); ww = container_of(lock, struct ww_mutex, base);
if (use_ww_ctx && ww_ctx) { if (ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY; return -EALREADY;
@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
if (__mutex_trylock(lock) || if (__mutex_trylock(lock) ||
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */ /* got the lock, yay! */
lock_acquired(&lock->dep_map, ip); lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx && ww_ctx) if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx); ww_mutex_set_context_fastpath(ww, ww_ctx);
preempt_enable(); preempt_enable();
return 0; return 0;
@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* After waiting to acquire the wait_lock, try again. * After waiting to acquire the wait_lock, try again.
*/ */
if (__mutex_trylock(lock)) { if (__mutex_trylock(lock)) {
if (use_ww_ctx && ww_ctx) if (ww_ctx)
__ww_mutex_check_waiters(lock, ww_ctx); __ww_mutex_check_waiters(lock, ww_ctx);
goto skip_wait; goto skip_wait;
@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err; goto err;
} }
if (use_ww_ctx && ww_ctx) { if (ww_ctx) {
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
if (ret) if (ret)
goto err; goto err;
@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* ww_mutex needs to always recheck its position since its waiter * ww_mutex needs to always recheck its position since its waiter
* list is not FIFO ordered. * list is not FIFO ordered.
*/ */
if ((use_ww_ctx && ww_ctx) || !first) { if (ww_ctx || !first) {
first = __mutex_waiter_is_first(lock, &waiter); first = __mutex_waiter_is_first(lock, &waiter);
if (first) if (first)
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* or we must see its unlock and acquire. * or we must see its unlock and acquire.
*/ */
if (__mutex_trylock(lock) || if (__mutex_trylock(lock) ||
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
break; break;
spin_lock(&lock->wait_lock); spin_lock(&lock->wait_lock);
@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
acquired: acquired:
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (use_ww_ctx && ww_ctx) { if (ww_ctx) {
/* /*
* Wound-Wait; we stole the lock (!first_waiter), check the * Wound-Wait; we stole the lock (!first_waiter), check the
* waiters as anyone might want to wound us. * waiters as anyone might want to wound us.
@ -1068,7 +1071,7 @@ skip_wait:
/* got the lock - cleanup and rejoice! */ /* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip); lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx && ww_ctx) if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx); ww_mutex_lock_acquired(ww, ww_ctx);
spin_unlock(&lock->wait_lock); spin_unlock(&lock->wait_lock);