1
0
Fork 0

arm64: stacktrace: Convert to ARCH_STACKWALK

Historically architectures have had duplicated code in their stack trace
implementations for filtering what gets traced. In order to avoid this
duplication some generic code has been provided using a new interface
arch_stack_walk(), enabled by selecting ARCH_STACKWALK in Kconfig, which
factors all this out into the generic stack trace code. Convert arm64
to use this common infrastructure.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lore.kernel.org/r/20200914153409.25097-4-broonie@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
zero-sugar-mainline-defconfig
Mark Brown 2020-09-14 16:34:09 +01:00 committed by Will Deacon
parent baa2cd4170
commit 5fc57df2f6
2 changed files with 11 additions and 69 deletions

View File

@ -29,6 +29,7 @@ config ARM64
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_STACKWALK
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_DMA_FOR_DEVICE

View File

@ -133,82 +133,23 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
unsigned int no_sched_functions;
unsigned int skip;
};
static bool save_trace(void *d, unsigned long addr)
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
if (data->no_sched_functions && in_sched_functions(addr))
return false;
if (data->skip) {
data->skip--;
return false;
}
trace->entries[trace->nr_entries++] = addr;
return trace->nr_entries >= trace->max_entries;
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = 0;
start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, save_trace, &data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
if (!try_get_task_stack(tsk))
return;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = nosched;
if (tsk != current) {
start_backtrace(&frame, thread_saved_fp(tsk),
thread_saved_pc(tsk));
} else {
/* We don't want this function nor the caller */
data.skip += 2;
if (regs)
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)__save_stack_trace);
}
(unsigned long)arch_stack_walk);
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));
walk_stackframe(tsk, &frame, save_trace, &data);
put_task_stack(tsk);
walk_stackframe(task, &frame, consume_entry, cookie);
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif