From c120bce78065cbea460a58b1572c215db9c148ba Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 Apr 2019 11:45:12 +0200 Subject: [PATCH] lockdep: Simplify stack trace handling Replace the indirection through struct stack_trace by using the storage array based interfaces and storing the information is a small lockdep specific data structure. Signed-off-by: Thomas Gleixner Reviewed-by: Josh Poimboeuf Acked-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Steven Rostedt Cc: Alexander Potapenko Cc: Alexey Dobriyan Cc: Andrew Morton Cc: Christoph Lameter Cc: Pekka Enberg Cc: linux-mm@kvack.org Cc: David Rientjes Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport Cc: Akinobu Mita Cc: Christoph Hellwig Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy Cc: Marek Szyprowski Cc: Johannes Thumshirn Cc: David Sterba Cc: Chris Mason Cc: Josef Bacik Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer Cc: Alasdair Kergon Cc: Daniel Vetter Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen Cc: Maarten Lankhorst Cc: dri-devel@lists.freedesktop.org Cc: David Airlie Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Tom Zanussi Cc: Miroslav Benes Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094802.891724020@linutronix.de --- include/linux/lockdep.h | 9 +++++-- kernel/locking/lockdep.c | 55 ++++++++++++++++++++-------------------- 2 files changed, 35 insertions(+), 29 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 79c3873d58ac..6f165d625320 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 3603893d5bbd..45bcaf2e4cb6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -434,18 +434,14 @@ static void print_lockdep_off(const char *bug_msg) #endif } -static int save_trace(struct stack_trace *trace) +static int save_trace(struct lock_trace *trace) { - trace->nr_entries = 0; - trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; - trace->entries = stack_trace + nr_stack_trace_entries; - - trace->skip = 3; - - save_stack_trace(trace); - - trace->max_entries = trace->nr_entries; + unsigned long *entries = stack_trace + nr_stack_trace_entries; + unsigned int max_entries; + trace->offset = nr_stack_trace_entries; + max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; + trace->nr_entries = stack_trace_save(entries, max_entries, 3); nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { @@ -1196,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void) static int add_lock_to_list(struct lock_class *this, struct lock_class *links_to, struct list_head *head, unsigned long ip, int distance, - struct stack_trace *trace) + struct lock_trace *trace) { struct lock_list *entry; /* @@ -1415,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry, * checking. */ +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ + unsigned long *entries = stack_trace + trace->offset; + + stack_trace_print(entries, trace->nr_entries, spaces); +} + /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): @@ -1427,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth) printk("\n-> #%u", depth); print_lock_name(target->class); printk(KERN_CONT ":\n"); - print_stack_trace(&target->trace, 6); - + print_lock_trace(&target->trace, 6); return 0; } @@ -1740,7 +1742,7 @@ static void print_lock_class_header(struct lock_class *class, int depth) len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(KERN_CONT " at:\n"); - print_stack_trace(class->usage_traces + bit, len); + print_lock_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); @@ -1765,7 +1767,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); - print_stack_trace(&entry->trace, 2); + print_lock_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { @@ -1878,14 +1880,14 @@ print_bad_irq_dependency(struct task_struct *curr, print_lock_name(backwards_entry->class); pr_warn("\n... which became %s-irq-safe at:\n", irqclass); - print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); + print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); pr_warn("..."); - print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); + print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); pr_warn("\nother info that might help us debug this:\n\n"); print_irq_lock_scenario(backwards_entry, forwards_entry, @@ -2158,7 +2160,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, struct stack_trace *trace) + struct held_lock *next, int distance, struct lock_trace *trace) { struct lock_list *uninitialized_var(target_entry); struct lock_list *entry; @@ -2196,7 +2198,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) { - if (!trace->entries) { + if (!trace->nr_entries) { /* * If save_trace fails here, the printing might * trigger a WARN but because of the !nr_entries it @@ -2252,7 +2254,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, return print_bfs_bug(ret); - if (!trace->entries && !save_trace(trace)) + if (!trace->nr_entries && !save_trace(trace)) return 0; /* @@ -2284,14 +2286,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { + struct lock_trace trace = { .nr_entries = 0 }; int depth = curr->lockdep_depth; struct held_lock *hlock; - struct stack_trace trace = { - .nr_entries = 0, - .max_entries = 0, - .entries = NULL, - .skip = 0, - }; /* * Debugging checks. @@ -2719,6 +2716,10 @@ static inline int validate_chain(struct task_struct *curr, { return 1; } + +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ +} #endif /* @@ -2815,7 +2816,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_lock(this); pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); + print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); pr_warn("\nother info that might help us debug this:\n");