sh: dynamic ftrace support.

First cut at dynamic ftrace support.

Signed-off-by: Matt Fleming <mjf@gentoo.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Matt Fleming 2008-11-12 20:11:47 +09:00 committed by Paul Mundt
parent ef6aff6884
commit fad57feba7
8 changed files with 240 additions and 44 deletions

View file

@ -25,6 +25,8 @@ config SUPERH32
select HAVE_KRETPROBES
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
config SUPERH64
def_bool y if CPU_SH5

View file

@ -1,8 +1,29 @@
#ifndef __ASM_SH_FTRACE_H
#define __ASM_SH_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void mcount(void);
#define MCOUNT_ADDR ((long)(mcount))
#ifdef CONFIG_DYNAMIC_FTRACE
#define CALLER_ADDR ((long)(ftrace_caller))
#define STUB_ADDR ((long)(ftrace_stub))
#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1)
#endif
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* 'addr' is the memory table address. */
return addr;
}
#endif
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* __ASM_SH_FTRACE_H */

View file

@ -4,6 +4,11 @@
extra-y := head_32.o init_task.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_ftrace.o = -pg
endif
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
syscalls_32.o time_32.o topology.o traps.o traps_32.o
@ -24,5 +29,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
EXTRA_CFLAGS += -Werror

View file

@ -371,47 +371,3 @@ syscall_exit:
#endif
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
#ifdef CONFIG_FUNCTION_TRACER
.align 2
.globl _mcount
.type _mcount,@function
.globl mcount
.type mcount,@function
_mcount:
mcount:
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
sts.l pr, @-r15
mov.l @(20,r15),r4
sts pr, r5
mov.l 1f, r6
mov.l ftrace_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l @r6, r6
jsr @r6
nop
skip_trace:
lds.l @r15+, pr
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
rts
mov.l @r15+, r4
.align 2
1: .long ftrace_trace_function
.globl ftrace_stub
ftrace_stub:
rts
nop
#endif /* CONFIG_FUNCTION_TRACER */

109
arch/sh/kernel/ftrace.c Normal file
View file

@ -0,0 +1,109 @@
/*
* Copyright (C) 2008 Matt Fleming <mjf@gentoo.org>
*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* Thanks goes to Ingo Molnar, for suggesting the idea.
* Mathieu Desnoyers, for suggesting postponing the modifications.
* Arjan van de Ven, for keeping me straight, and explaining to me
* the dangers of modifying code on the run.
*/
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/ftrace.h>
#include <asm/cacheflush.h>
static unsigned char ftrace_nop[] = {
0x09, 0x00, /* nop */
0x09, 0x00, /* nop */
};
static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}
static int is_sh_nop(unsigned char *ip)
{
return strncmp(ip, ftrace_nop, sizeof(ftrace_nop));
}
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
/* Place the address in the memory table. */
if (addr == CALLER_ADDR)
__raw_writel(addr + MCOUNT_INSN_OFFSET, ftrace_replaced_code);
else
__raw_writel(addr, ftrace_replaced_code);
/*
* No locking needed, this must be called via kstop_machine
* which in essence is like running on a uniprocessor machine.
*/
return ftrace_replaced_code;
}
int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
*/
/*
* If we're trying to nop out a call to a function, we instead
* place a call to the address after the memory table.
*/
if (is_sh_nop(new_code) == 0)
__raw_writel(ip + MCOUNT_INSN_SIZE, (unsigned long)new_code);
/* read the text we want to modify */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
/* replace the text with the new text */
if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[MCOUNT_INSN_SIZE], *new;
memcpy(old, (unsigned char *)(ip + MCOUNT_INSN_OFFSET), MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip + MCOUNT_INSN_OFFSET, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
/* The return code is retured via data */
__raw_writel(0, (unsigned long)data);
return 0;
}

View file

@ -11,6 +11,7 @@ memcpy-y := memcpy.o
memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o
lib-$(CONFIG_MMU) += copy_page.o clear_page.o
lib-$(CONFIG_FUNCTION_TRACER) += mcount.o
lib-y += $(memcpy-y)
EXTRA_CFLAGS += -Werror

90
arch/sh/lib/mcount.S Normal file
View file

@ -0,0 +1,90 @@
/*
* arch/sh/lib/mcount.S
*
* Copyright (C) 2008 Paul Mundt
* Copyright (C) 2008 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/ftrace.h>
#define MCOUNT_ENTER() \
mov.l r4, @-r15; \
mov.l r5, @-r15; \
mov.l r6, @-r15; \
mov.l r7, @-r15; \
sts.l pr, @-r15; \
\
mov.l @(20,r15),r4; \
sts pr, r5
#define MCOUNT_LEAVE() \
lds.l @r15+, pr; \
mov.l @r15+, r7; \
mov.l @r15+, r6; \
mov.l @r15+, r5; \
rts; \
mov.l @r15+, r4
.align 2
.globl _mcount
.type _mcount,@function
.globl mcount
.type mcount,@function
_mcount:
mcount:
MCOUNT_ENTER()
#ifdef CONFIG_DYNAMIC_FTRACE
.globl mcount_call
mcount_call:
mov.l .Lftrace_stub, r6
#else
mov.l .Lftrace_trace_function, r6
mov.l ftrace_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l @r6, r6
#endif
jsr @r6
nop
skip_trace:
MCOUNT_LEAVE()
.align 2
.Lftrace_trace_function:
.long ftrace_trace_function
#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_caller
ftrace_caller:
MCOUNT_ENTER()
.globl ftrace_call
ftrace_call:
mov.l .Lftrace_stub, r6
jsr @r6
nop
MCOUNT_LEAVE()
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
* NOTE: From here on the locations of the .Lftrace_stub label and
* ftrace_stub itself are fixed. Adding additional data here will skew
* the displacement for the memory table and break the block replacement.
* Place new labels either after the ftrace_stub body, or before
* ftrace_caller. You have been warned.
*/
.align 2
.Lftrace_stub:
.long ftrace_stub
.globl ftrace_stub
ftrace_stub:
rts
nop

View file

@ -167,6 +167,17 @@ if ($arch eq "x86_64") {
$objcopy .= " -O elf32-i386";
$cc .= " -m32";
} elsif ($arch eq "sh") {
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
$type = ".long";
# force flags for this arch
$ld .= " -m shlelf_linux";
$objcopy .= " -O elf32-sh-linux";
$cc .= " -m32";
} else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
}