1
0
Fork 0

locking/lockdep: Refactor lookup_chain_cache()

Currently, lookup_chain_cache() provides both 'lookup' and 'add'
functionalities in a function. However, each is useful. So this
patch makes lookup_chain_cache() only do 'lookup' functionality and
makes add_chain_cahce() only do 'add' functionality. And it's more
readable than before.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akpm@linux-foundation.org
Cc: boqun.feng@gmail.com
Cc: kernel-team@lge.com
Cc: kirill@shutemov.name
Cc: npiggin@gmail.com
Cc: walken@google.com
Cc: willy@infradead.org
Link: http://lkml.kernel.org/r/1502089981-21272-2-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
zero-colors
Byungchul Park 2017-08-07 16:12:48 +09:00 committed by Ingo Molnar
parent ae813308f4
commit 545c23f2e9
1 changed files with 93 additions and 48 deletions

View File

@ -2151,20 +2151,26 @@ static int check_no_collision(struct task_struct *curr,
}
/*
* Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
* Adds a dependency chain into chain hashtable. And must be called with
* graph_lock held.
*
* Return 0 if fail, and graph_lock is released.
* Return 1 if succeed, with graph_lock held.
*/
static inline int lookup_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
static inline int add_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
int i, j;
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
/*
* We might need to take the graph lock, ensure we've got IRQs
* disabled to make this an IRQ-safe lock.. for recursion reasons
@ -2172,43 +2178,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(chain_lookup_hits);
if (!check_no_collision(curr, hlock, chain))
return 0;
if (very_verbose(class))
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
return 0;
}
}
if (very_verbose(class))
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
hlist_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
if (!debug_locks_off_graph_unlock())
return 0;
@ -2260,6 +2230,78 @@ cache_hit:
return 1;
}
/*
* Look up a dependency chain.
*/
static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
{
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
debug_atomic_inc(chain_lookup_hits);
return chain;
}
}
return NULL;
}
/*
* If the key is not present yet in dependency chain cache then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
static inline int lookup_chain_cache_add(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
if (chain) {
cache_hit:
if (!check_no_collision(curr, hlock, chain))
return 0;
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
}
return 0;
}
if (very_verbose(class)) {
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
}
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
chain = lookup_chain_cache(chain_key);
if (chain) {
graph_unlock();
goto cache_hit;
}
if (!add_chain_cache(curr, hlock, chain_key))
return 0;
return 1;
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head, u64 chain_key)
{
@ -2270,11 +2312,11 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
*
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
* (If lookup_chain_cache_add() return with 1 it acquires
* graph_lock for us)
*/
if (!hlock->trylock && hlock->check &&
lookup_chain_cache(curr, hlock, chain_key)) {
lookup_chain_cache_add(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
*
@ -2302,14 +2344,17 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
if (!chain_head && ret != 2)
if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock))
return 0;
}
graph_unlock();
} else
/* after lookup_chain_cache(): */
} else {
/* after lookup_chain_cache_add(): */
if (unlikely(!debug_locks))
return 0;
}
return 1;
}