1
0
Fork 0

lockdep: Refactor IRQ trace events fields into struct

Refactor the IRQ trace events fields, used for printing information
about the IRQ trace events, into a separate struct 'irqtrace_events'.

This improves readability by separating the information only used in
reporting, as well as enables (simplified) storing/restoring of
irqtrace_events snapshots.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200729110916.3920464-1-elver@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
zero-sugar-mainline-defconfig
Marco Elver 2020-07-29 13:09:15 +02:00 committed by Ingo Molnar
parent 859247d39f
commit 0584df9c12
4 changed files with 50 additions and 48 deletions

View File

@ -33,6 +33,19 @@
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-task IRQ trace events information. */
struct irqtrace_events {
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
};
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);

View File

@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
@ -980,17 +981,9 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
u64 hardirq_chain_key;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
int irq_config;

View File

@ -2035,17 +2035,11 @@ static __latent_entropy struct task_struct *copy_process(
seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->softirq_context = 0;
memset(&p->irqtrace, 0, sizeof(p->irqtrace));
p->irqtrace.hardirq_disable_ip = _THIS_IP_;
p->irqtrace.softirq_enable_ip = _THIS_IP_;
p->softirqs_enabled = 1;
p->softirq_context = 0;
#endif
p->pagefault_disabled = 0;

View File

@ -3484,19 +3484,21 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
const struct irqtrace_events *trace = &curr->irqtrace;
printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
(void *)curr->hardirq_enable_ip);
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
(void *)trace->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
(void *)curr->hardirq_disable_ip);
trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
(void *)trace->hardirq_disable_ip);
printk("softirqs last enabled at (%u): [<%px>] %pS\n",
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
(void *)curr->softirq_enable_ip);
trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
(void *)trace->softirq_enable_ip);
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
(void *)curr->softirq_disable_ip);
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
(void *)trace->softirq_disable_ip);
}
static int HARDIRQ_verbose(struct lock_class *class)
@ -3699,7 +3701,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
void noinstr lockdep_hardirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
struct irqtrace_events *trace = &current->irqtrace;
if (unlikely(!debug_locks))
return;
@ -3752,8 +3754,8 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
skip_checks:
/* we'll do an OFF -> ON transition: */
this_cpu_write(hardirqs_enabled, 1);
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
trace->hardirq_enable_ip = ip;
trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
@ -3763,8 +3765,6 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
*/
void noinstr lockdep_hardirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
@ -3784,12 +3784,14 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
return;
if (lockdep_hardirqs_enabled()) {
struct irqtrace_events *trace = &current->irqtrace;
/*
* We have done an ON -> OFF transition:
*/
this_cpu_write(hardirqs_enabled, 0);
curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events;
trace->hardirq_disable_ip = ip;
trace->hardirq_disable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_off_events);
} else {
debug_atomic_inc(redundant_hardirqs_off);
@ -3802,7 +3804,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
*/
void lockdep_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
struct irqtrace_events *trace = &current->irqtrace;
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@ -3814,7 +3816,7 @@ void lockdep_softirqs_on(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
if (current->softirqs_enabled) {
debug_atomic_inc(redundant_softirqs_on);
return;
}
@ -3823,9 +3825,9 @@ void lockdep_softirqs_on(unsigned long ip)
/*
* We'll do an OFF -> ON transition:
*/
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
current->softirqs_enabled = 1;
trace->softirq_enable_ip = ip;
trace->softirq_enable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
@ -3833,7 +3835,7 @@ void lockdep_softirqs_on(unsigned long ip)
* enabled too:
*/
if (lockdep_hardirqs_enabled())
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
lockdep_recursion_finish();
}
@ -3842,8 +3844,6 @@ void lockdep_softirqs_on(unsigned long ip)
*/
void lockdep_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@ -3853,13 +3853,15 @@ void lockdep_softirqs_off(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
if (current->softirqs_enabled) {
struct irqtrace_events *trace = &current->irqtrace;
/*
* We have done an ON -> OFF transition:
*/
curr->softirqs_enabled = 0;
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
current->softirqs_enabled = 0;
trace->softirq_disable_ip = ip;
trace->softirq_disable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_off_events);
/*
* Whoops, we wanted softirqs off, so why aren't they?