1
0
Fork 0

ftrace: trace irq disabled critical timings

This patch adds latency tracing for critical timings
(how long interrupts are disabled for).

 "irqsoff" is added to /debugfs/tracing/available_tracers

Note:
  tracing_max_latency
    also holds the max latency for irqsoff (in usecs).
   (default to large number so one must start latency tracing)

  tracing_thresh
    threshold (in usecs) to always print out if irqs off
    is detected to be longer than stated here.
    If irq_thresh is non-zero, then max_irq_latency
    is ignored.

Here's an example of a trace with ftrace_enabled = 0

=======
preemption latency trace v1.1.5 on 2.6.24-rc7
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--------------------------------------------------------------------
 latency: 100 us, #3/3, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
    -----------------
    | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
    -----------------
 => started at: _spin_lock_irqsave+0x2a/0xb7
 => ended at:   _spin_unlock_irqrestore+0x32/0x5f

                 _------=> CPU#
                / _-----=> irqs-off
               | / _----=> need-resched
               || / _---=> hardirq/softirq
               ||| / _--=> preempt-depth
               |||| /
               |||||     delay
   cmd     pid ||||| time  |   caller
      \   /    |||||   \   |   /
 swapper-0     1d.s3    0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000])
 swapper-0     1d.s3  100us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000])
 swapper-0     1d.s3  100us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f)

vim:ft=help
=======

And this is a trace with ftrace_enabled == 1

=======
preemption latency trace v1.1.5 on 2.6.24-rc7
--------------------------------------------------------------------
 latency: 102 us, #12/12, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
    -----------------
    | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
    -----------------
 => started at: _spin_lock_irqsave+0x2a/0xb7
 => ended at:   _spin_unlock_irqrestore+0x32/0x5f

                 _------=> CPU#
                / _-----=> irqs-off
               | / _----=> need-resched
               || / _---=> hardirq/softirq
               ||| / _--=> preempt-depth
               |||| /
               |||||     delay
   cmd     pid ||||| time  |   caller
      \   /    |||||   \   |   /
 swapper-0     1dNs3    0us+: _spin_lock_irqsave+0x2a/0xb7 (e1000_update_stats+0x47/0x64c [e1000])
 swapper-0     1dNs3   46us : e1000_read_phy_reg+0x16/0x225 [e1000] (e1000_update_stats+0x5e2/0x64c [e1000])
 swapper-0     1dNs3   46us : e1000_swfw_sync_acquire+0x10/0x99 [e1000] (e1000_read_phy_reg+0x49/0x225 [e1000])
 swapper-0     1dNs3   46us : e1000_get_hw_eeprom_semaphore+0x12/0xa6 [e1000] (e1000_swfw_sync_acquire+0x36/0x99 [e1000])
 swapper-0     1dNs3   47us : __const_udelay+0x9/0x47 (e1000_read_phy_reg+0x116/0x225 [e1000])
 swapper-0     1dNs3   47us+: __delay+0x9/0x50 (__const_udelay+0x45/0x47)
 swapper-0     1dNs3   97us : preempt_schedule+0xc/0x84 (__delay+0x4e/0x50)
 swapper-0     1dNs3   98us : e1000_swfw_sync_release+0xc/0x55 [e1000] (e1000_read_phy_reg+0x211/0x225 [e1000])
 swapper-0     1dNs3   99us+: e1000_put_hw_eeprom_semaphore+0x9/0x35 [e1000] (e1000_swfw_sync_release+0x50/0x55 [e1000])
 swapper-0     1dNs3  101us : _spin_unlock_irqrestore+0xe/0x5f (e1000_update_stats+0x641/0x64c [e1000])
 swapper-0     1dNs3  102us : _spin_unlock_irqrestore+0x32/0x5f (e1000_update_stats+0x641/0x64c [e1000])
 swapper-0     1dNs3  102us : trace_hardirqs_on_caller+0x75/0x89 (_spin_unlock_irqrestore+0x32/0x5f)

vim:ft=help
=======

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
wifi-calibration
Steven Rostedt 2008-05-12 21:20:42 +02:00 committed by Thomas Gleixner
parent 352ad25aa4
commit 81d68a96a3
13 changed files with 531 additions and 31 deletions

View File

@ -165,7 +165,10 @@ void cpu_idle(void)
*/ */
local_irq_disable(); local_irq_disable();
enter_idle(); enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
idle(); idle();
start_critical_timings();
/* In many cases the interrupt that ended idle /* In many cases the interrupt that ended idle
has already called exit_idle. But some idle has already called exit_idle. But some idle
loops can be woken up without interrupt. */ loops can be woken up without interrupt. */

View File

@ -5,6 +5,7 @@
obj-$(CONFIG_SMP) := msr-on-cpu.o obj-$(CONFIG_SMP) := msr-on-cpu.o
lib-y := delay_$(BITS).o lib-y := delay_$(BITS).o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o
lib-y += memcpy_$(BITS).o lib-y += memcpy_$(BITS).o

View File

@ -0,0 +1,47 @@
/*
* Trampoline to trace irqs off. (otherwise CALLER_ADDR1 might crash)
* Copyright 2008 by Steven Rostedt, Red Hat, Inc
* (inspired by Andi Kleen's thunk_64.S)
* Subject to the GNU public license, v.2. No warranty of any kind.
*/
#include <linux/linkage.h>
#define ARCH_TRACE_IRQS_ON \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_on; \
popl %edx; \
popl %ecx; \
popl %eax;
#define ARCH_TRACE_IRQS_OFF \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_off; \
popl %edx; \
popl %ecx; \
popl %eax;
#ifdef CONFIG_TRACE_IRQFLAGS
/* put return address in eax (arg1) */
.macro thunk_ra name,func
.globl \name
\name:
pushl %eax
pushl %ecx
pushl %edx
/* Place EIP in the arg1 */
movl 3*4(%esp), %eax
call \func
popl %edx
popl %ecx
popl %eax
ret
.endm
thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
#endif

View File

@ -2,6 +2,7 @@
* Save registers before calling assembly functions. This avoids * Save registers before calling assembly functions. This avoids
* disturbance of register allocation in some inline assembly constructs. * disturbance of register allocation in some inline assembly constructs.
* Copyright 2001,2002 by Andi Kleen, SuSE Labs. * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
* Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
* Subject to the GNU public license, v.2. No warranty of any kind. * Subject to the GNU public license, v.2. No warranty of any kind.
*/ */
@ -42,8 +43,22 @@
#endif #endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
thunk trace_hardirqs_on_thunk,trace_hardirqs_on /* put return address in rdi (arg1) */
thunk trace_hardirqs_off_thunk,trace_hardirqs_off .macro thunk_ra name,func
.globl \name
\name:
CFI_STARTPROC
SAVE_ARGS
/* SAVE_ARGS pushs 9 elements */
/* the next element would be the rip */
movq 9*8(%rsp), %rdi
call \func
jmp restore
CFI_ENDPROC
.endm
thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC

View File

@ -179,8 +179,6 @@ static inline void trace_hardirqs_fixup(void)
* have a reliable stack. x86_64 only. * have a reliable stack. x86_64 only.
*/ */
#define SWAPGS_UNSAFE_STACK swapgs #define SWAPGS_UNSAFE_STACK swapgs
#define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
#define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \ TRACE_IRQS_ON; \
@ -192,24 +190,6 @@ static inline void trace_hardirqs_fixup(void)
TRACE_IRQS_OFF; TRACE_IRQS_OFF;
#else #else
#define ARCH_TRACE_IRQS_ON \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_on; \
popl %edx; \
popl %ecx; \
popl %eax;
#define ARCH_TRACE_IRQS_OFF \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_off; \
popl %edx; \
popl %ecx; \
popl %eax;
#define ARCH_LOCKDEP_SYS_EXIT \ #define ARCH_LOCKDEP_SYS_EXIT \
pushl %eax; \ pushl %eax; \
pushl %ecx; \ pushl %ecx; \
@ -223,8 +203,8 @@ static inline void trace_hardirqs_fixup(void)
#endif #endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else #else
# define TRACE_IRQS_ON # define TRACE_IRQS_ON
# define TRACE_IRQS_OFF # define TRACE_IRQS_OFF

View File

@ -50,4 +50,12 @@ extern void mcount(void);
# define CALLER_ADDR5 0UL # define CALLER_ADDR5 0UL
#endif #endif
#ifdef CONFIG_IRQSOFF_TRACER
extern void notrace time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void notrace time_hardirqs_off(unsigned long a0, unsigned long a1);
#else
# define time_hardirqs_on(a0, a1) do { } while (0)
# define time_hardirqs_off(a0, a1) do { } while (0)
#endif
#endif /* _LINUX_FTRACE_H */ #endif /* _LINUX_FTRACE_H */

View File

@ -12,10 +12,10 @@
#define _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_on(unsigned long ip);
extern void trace_softirqs_off(unsigned long ip); extern void trace_softirqs_off(unsigned long ip);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define trace_hardirq_context(p) ((p)->hardirq_context) # define trace_hardirq_context(p) ((p)->hardirq_context)
# define trace_softirq_context(p) ((p)->softirq_context) # define trace_softirq_context(p) ((p)->softirq_context)
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) # define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
@ -41,6 +41,14 @@
# define INIT_TRACE_IRQFLAGS # define INIT_TRACE_IRQFLAGS
#endif #endif
#ifdef CONFIG_IRQSOFF_TRACER
extern void stop_critical_timings(void);
extern void start_critical_timings(void);
#else
# define stop_critical_timings() do { } while (0)
# define start_critical_timings() do { } while (0)
#endif
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#include <asm/irqflags.h> #include <asm/irqflags.h>

View File

@ -909,7 +909,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
rt_mutex_init_task(p); rt_mutex_init_task(p);
#ifdef CONFIG_TRACE_IRQFLAGS #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_LOCKDEP)
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif #endif

View File

@ -39,6 +39,7 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/ftrace.h>
#include <asm/sections.h> #include <asm/sections.h>
@ -982,7 +983,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
return 1; return 1;
} }
#ifdef CONFIG_TRACE_IRQFLAGS #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/* /*
* Forwards and backwards subgraph searching, for the purposes of * Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency * proving that two subgraphs can be connected by a new dependency
@ -1680,7 +1681,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
static int mark_lock(struct task_struct *curr, struct held_lock *this, static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit); enum lock_usage_bit new_bit);
#ifdef CONFIG_TRACE_IRQFLAGS #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/* /*
* print irq inversion bug: * print irq inversion bug:
@ -2013,11 +2014,13 @@ void early_boot_irqs_on(void)
/* /*
* Hardirqs will be enabled: * Hardirqs will be enabled:
*/ */
void trace_hardirqs_on(void) void notrace trace_hardirqs_on_caller(unsigned long a0)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
unsigned long ip; unsigned long ip;
time_hardirqs_on(CALLER_ADDR0, a0);
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || current->lockdep_recursion))
return; return;
@ -2055,16 +2058,23 @@ void trace_hardirqs_on(void)
curr->hardirq_enable_event = ++curr->irq_events; curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events); debug_atomic_inc(&hardirqs_on_events);
} }
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void notrace trace_hardirqs_on(void)
{
trace_hardirqs_on_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on); EXPORT_SYMBOL(trace_hardirqs_on);
/* /*
* Hardirqs were disabled: * Hardirqs were disabled:
*/ */
void trace_hardirqs_off(void) void notrace trace_hardirqs_off_caller(unsigned long a0)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
time_hardirqs_off(CALLER_ADDR0, a0);
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || current->lockdep_recursion))
return; return;
@ -2082,7 +2092,12 @@ void trace_hardirqs_off(void)
} else } else
debug_atomic_inc(&redundant_hardirqs_off); debug_atomic_inc(&redundant_hardirqs_off);
} }
EXPORT_SYMBOL(trace_hardirqs_off_caller);
void notrace trace_hardirqs_off(void)
{
trace_hardirqs_off_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off); EXPORT_SYMBOL(trace_hardirqs_off);
/* /*

View File

@ -1041,7 +1041,9 @@ void release_console_sem(void)
_log_end = log_end; _log_end = log_end;
con_start = log_end; /* Flush */ con_start = log_end; /* Flush */
spin_unlock(&logbuf_lock); spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(_con_start, _log_end); call_console_drivers(_con_start, _log_end);
start_critical_timings();
local_irq_restore(flags); local_irq_restore(flags);
} }
console_locked = 0; console_locked = 0;

View File

@ -26,6 +26,24 @@ config FTRACE
(the bootup default), then the overhead of the instructions is very (the bootup default), then the overhead of the instructions is very
small and not measurable even in micro-benchmarks. small and not measurable even in micro-benchmarks.
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
select TRACE_IRQFLAGS
select TRACING
select TRACER_MAX_TRACE
help
This option measures the time spent in irqs-off critical
sections, with microsecond accuracy.
The default measurement method is a maximum search, which is
disabled by default and can be runtime (re-)started
via:
echo 0 > /debugfs/tracing/tracing_max_latency
config SCHED_TRACER config SCHED_TRACER
bool "Scheduling Latency Tracer" bool "Scheduling Latency Tracer"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL

View File

@ -3,6 +3,7 @@ obj-$(CONFIG_FTRACE) += libftrace.o
obj-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FTRACE) += trace_functions.o obj-$(CONFIG_FTRACE) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
libftrace-y := ftrace.o libftrace-y := ftrace.o

View File

@ -0,0 +1,402 @@
/*
* trace irqs off criticall timings
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
* From code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/fs.h>
#include "trace.h"
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
/*
* Sequence count - we record it when starting a measurement and
* skip the latency if the sequence has changed - some other section
* did a maximum and could disturb our measurement with serial console
* printouts, etc. Truly coinciding maximum latencies should be rare
* and what happens together happens separately as well, so this doesnt
* decrease the validity of the maximum found:
*/
static __cacheline_aligned_in_smp unsigned long max_sequence;
#ifdef CONFIG_FTRACE
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void notrace
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
if (likely(!tracer_enabled))
return;
local_save_flags(flags);
if (!irqs_disabled_flags(flags))
return;
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
ftrace(tr, data, ip, parent_ip, flags);
atomic_dec(&data->disabled);
}
static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
};
#endif /* CONFIG_FTRACE */
/*
* Should this new latency be reported/recorded?
*/
static int notrace report_latency(cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
return 0;
} else {
if (delta <= tracing_max_latency)
return 0;
}
return 1;
}
static void notrace
check_critical_timing(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long parent_ip,
int cpu)
{
unsigned long latency, t0, t1;
cycle_t T0, T1, T2, delta;
unsigned long flags;
/*
* usecs conversion is slow so we try to delay the conversion
* as long as possible:
*/
T0 = data->preempt_timestamp;
T1 = now(cpu);
delta = T1-T0;
local_save_flags(flags);
if (!report_latency(delta))
goto out;
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
/*
* Update the timestamp, because the trace entry above
* might change it (it can only get larger so the latency
* is fair to be reported):
*/
T2 = now(cpu);
delta = T2-T0;
latency = nsecs_to_usecs(delta);
if (data->critical_sequence != max_sequence)
goto out;
tracing_max_latency = delta;
t0 = nsecs_to_usecs(T0);
t1 = nsecs_to_usecs(T1);
data->critical_end = parent_ip;
update_max_tr_single(tr, current, cpu);
if (tracing_thresh)
printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
"violates %lu us threshold.\n"
" => started at timestamp %lu: ",
current->comm, current->pid,
raw_smp_processor_id(),
latency, nsecs_to_usecs(tracing_thresh), t0);
else
printk(KERN_INFO "(%16s-%-5d|#%d):"
" new %lu us maximum-latency "
"critical section.\n => started at timestamp %lu: ",
current->comm, current->pid,
raw_smp_processor_id(),
latency, t0);
print_symbol(KERN_CONT "<%s>\n", data->critical_start);
printk(KERN_CONT " => ended at timestamp %lu: ", t1);
print_symbol(KERN_CONT "<%s>\n", data->critical_end);
dump_stack();
t1 = nsecs_to_usecs(now(cpu));
printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
max_sequence++;
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = now(cpu);
tracing_reset(data);
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
}
static inline void notrace
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
if (likely(!tracer_enabled))
return;
cpu = raw_smp_processor_id();
data = tr->data[cpu];
if (unlikely(!data) || unlikely(!data->trace) ||
data->critical_start || atomic_read(&data->disabled))
return;
atomic_inc(&data->disabled);
data->critical_sequence = max_sequence;
data->preempt_timestamp = now(cpu);
data->critical_start = parent_ip;
tracing_reset(data);
local_save_flags(flags);
ftrace(tr, data, ip, parent_ip, flags);
atomic_dec(&data->disabled);
}
static inline void notrace
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
if (likely(!tracer_enabled))
return;
cpu = raw_smp_processor_id();
data = tr->data[cpu];
if (unlikely(!data) || unlikely(!data->trace) ||
!data->critical_start || atomic_read(&data->disabled))
return;
atomic_inc(&data->disabled);
local_save_flags(flags);
ftrace(tr, data, ip, parent_ip, flags);
check_critical_timing(tr, data, parent_ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
}
void notrace start_critical_timings(void)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
void notrace stop_critical_timings(void)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
#ifdef CONFIG_PROVE_LOCKING
void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
stop_critical_timing(a0, a1);
}
void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
start_critical_timing(a0, a1);
}
#else /* !CONFIG_PROVE_LOCKING */
/*
* Stubs:
*/
void early_boot_irqs_off(void)
{
}
void early_boot_irqs_on(void)
{
}
void trace_softirqs_on(unsigned long ip)
{
}
void trace_softirqs_off(unsigned long ip)
{
}
inline void print_irqtrace_events(struct task_struct *curr)
{
}
/*
* We are only interested in hardirq on/off events:
*/
void notrace trace_hardirqs_on(void)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void notrace trace_hardirqs_off(void)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
{
unsigned long flags;
local_save_flags(flags);
if (irqs_disabled_flags(flags))
start_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_PROVE_LOCKING */
static void start_irqsoff_tracer(struct trace_array *tr)
{
tracer_enabled = 1;
register_ftrace_function(&trace_ops);
}
static void stop_irqsoff_tracer(struct trace_array *tr)
{
unregister_ftrace_function(&trace_ops);
tracer_enabled = 0;
}
static void irqsoff_tracer_init(struct trace_array *tr)
{
irqsoff_trace = tr;
/* make sure that the tracer is visibel */
smp_wmb();
if (tr->ctrl)
start_irqsoff_tracer(tr);
}
static void irqsoff_tracer_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_irqsoff_tracer(tr);
}
static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
{
if (tr->ctrl)
start_irqsoff_tracer(tr);
else
stop_irqsoff_tracer(tr);
}
static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_irqsoff_tracer(iter->tr);
}
static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
{
if (iter->tr->ctrl)
start_irqsoff_tracer(iter->tr);
}
static struct tracer irqsoff_tracer __read_mostly =
{
.name = "irqsoff",
.init = irqsoff_tracer_init,
.reset = irqsoff_tracer_reset,
.open = irqsoff_tracer_open,
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
};
__init static int init_irqsoff_tracer(void)
{
register_tracer(&irqsoff_tracer);
return 0;
}
device_initcall(init_irqsoff_tracer);