1
0
Fork 0

locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock()

This patch does not change the behavior of these functions but makes the
patch that frees unused lock classes easier to read.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-9-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Bart Van Assche 2019-02-14 15:00:43 -08:00 committed by Ingo Molnar
parent feb0a3865e
commit 956f3563a8
1 changed files with 36 additions and 36 deletions

View File

@ -4160,6 +4160,24 @@ static inline int within(const void *addr, void *start, unsigned long size)
return addr >= start && addr < start + size;
}
static void __lockdep_free_key_range(void *start, unsigned long size)
{
struct lock_class *class;
struct hlist_head *head;
int i;
/* Unhash all classes that were created by a module. */
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
hlist_for_each_entry_rcu(class, head, hash_entry) {
if (!within(class->key, start, size) &&
!within(class->name, start, size))
continue;
zap_class(class);
}
}
}
/*
* Used in module.c to remove lock classes from memory that is going to be
* freed; and possibly re-used by other modules.
@ -4170,30 +4188,14 @@ static inline int within(const void *addr, void *start, unsigned long size)
*/
void lockdep_free_key_range(void *start, unsigned long size)
{
struct lock_class *class;
struct hlist_head *head;
unsigned long flags;
int i;
int locked;
init_data_structures_once();
raw_local_irq_save(flags);
locked = graph_lock();
/*
* Unhash all classes that were created by this module:
*/
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
hlist_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
else if (within(class->name, start, size))
zap_class(class);
}
}
__lockdep_free_key_range(start, size);
if (locked)
graph_unlock();
raw_local_irq_restore(flags);
@ -4235,16 +4237,11 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock)
return false;
}
void lockdep_reset_lock(struct lockdep_map *lock)
/* The caller must hold the graph lock. Does not sleep. */
static void __lockdep_reset_lock(struct lockdep_map *lock)
{
struct lock_class *class;
unsigned long flags;
int j, locked;
init_data_structures_once();
raw_local_irq_save(flags);
locked = graph_lock();
int j;
/*
* Remove all classes this lock might have:
@ -4261,19 +4258,22 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
if (unlikely(lock_class_cache_is_registered(lock))) {
if (debug_locks_off_graph_unlock()) {
/*
* We all just reset everything, how did it match?
*/
WARN_ON(1);
}
goto out_restore;
}
if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
debug_locks_off();
}
void lockdep_reset_lock(struct lockdep_map *lock)
{
unsigned long flags;
int locked;
init_data_structures_once();
raw_local_irq_save(flags);
locked = graph_lock();
__lockdep_reset_lock(lock);
if (locked)
graph_unlock();
out_restore:
raw_local_irq_restore(flags);
}