1
0
Fork 0

ftrace/x86: Add save_regs for i386 function calls

Add saving full regs for function tracing on i386.
The saving of regs was influenced by patches sent out by
Masami Hiramatsu.

Link: Link: http://lkml.kernel.org/r/20120711195745.379060003@goodmis.org

Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
hifive-unleashed-5.1
Steven Rostedt 2012-06-05 20:00:11 -04:00 committed by Steven Rostedt
parent 08f6fba503
commit 4de72395ff
3 changed files with 68 additions and 6 deletions

View File

@ -40,10 +40,8 @@
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifdef CONFIG_X86_64
#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
#endif
#endif
#ifndef __ASSEMBLY__
extern void mcount(void);

View File

@ -1123,6 +1123,7 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@ -1134,6 +1135,73 @@ ftrace_stub:
ret
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS,13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
movl 0x4(%ebp), %edx /* Load parent ip (2cd parameter) */
lea (%esp), %ecx
pushl %ecx /* Save pt_regs as 4th parameter */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
addl $MCOUNT_INSN_SIZE, %eax
movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp ftrace_ret
ftrace_restore_flags:
popf
jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)

View File

@ -206,7 +206,6 @@ static int
ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
unsigned const char *new_code);
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/*
* Should never be called:
* As it is only called by __ftrace_replace_code() which is called by
@ -221,7 +220,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
WARN_ON(1);
return -EINVAL;
}
#endif
int ftrace_update_ftrace_func(ftrace_func_t func)
{
@ -237,7 +235,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, old, new);
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
@ -245,7 +242,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
new = ftrace_call_replace(ip, (unsigned long)func);
ret = ftrace_modify_code(ip, old, new);
}
#endif
atomic_dec(&modifying_ftrace_code);