alistair23-linux/arch/parisc/kernel/ftrace.c
Josh Poimboeuf 9a7c348ba6 ftrace: Add return address pointer to ftrace_ret_stack
Storing this value will help prevent unwinders from getting out of sync
with the function graph tracer ret_stack.  Now instead of needing a
stateful iterator, they can compare the return address pointer to find
the right ret_stack entry.

Note that an array of 50 ftrace_ret_stack structs is allocated for every
task.  So when an arch implements this, it will add either 200 or 400
bytes of memory usage per task (depending on whether it's a 32-bit or
64-bit platform).

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/a95cfcc39e8f26b89a430c56926af0bb217bc0a1.1471607358.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-08-24 12:15:14 +02:00

89 lines
2.2 KiB
C

/*
* Code for tracing calls in Linux kernel.
* Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* future possible enhancements:
* - add CONFIG_DYNAMIC_FTRACE
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#define __hot __attribute__ ((__section__ (".text.hot")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
static void __hot prepare_ftrace_return(unsigned long *parent,
unsigned long self_addr)
{
unsigned long old;
struct ftrace_graph_ent trace;
extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace))
return;
if (ftrace_push_return_trace(old, self_addr, &trace.depth,
0, NULL) == -EBUSY)
return;
/* activate parisc_return_to_handler() as return point */
*parent = (unsigned long) &parisc_return_to_handler;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3)
{
extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
if (ftrace_trace_function != ftrace_stub) {
/* struct ftrace_ops *op, struct pt_regs *regs); */
ftrace_trace_function(parent, self_addr, NULL, NULL);
return;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
ftrace_graph_entry != ftrace_graph_entry_stub) {
unsigned long *parent_rp;
/* calculate pointer to %rp in stack */
parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}