From ce52a18db45842f5b992851a552bd7f6acb2241b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 2 Oct 2018 16:19:18 -0400 Subject: [PATCH] locking/lockdep: Add a faster path in __lock_release() When __lock_release() is called, the most likely unlock scenario is on the innermost lock in the chain. In this case, we can skip some of the checks and provide a faster path to completion. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1538511560-10090-4-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a5d7db558928..511d30f88bce 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3626,6 +3626,13 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; + /* + * The most likely case is when the unlock is on the innermost + * lock. In this case, we are done! + */ + if (i == depth-1) + return 1; + if (reacquire_held_locks(curr, depth, i + 1)) return 0; @@ -3633,10 +3640,14 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) * We had N bottles of beer on the wall, we drank one, but now * there's not N-1 bottles of beer left on the wall... */ - if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) - return 0; + DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1); - return 1; + /* + * Since reacquire_held_locks() would have called check_chain_key() + * indirectly via __lock_acquire(), we don't need to do it again + * on return. + */ + return 0; } static int __lock_is_held(const struct lockdep_map *lock, int read)