1
0
Fork 0

ia64/kprobes: Remove jprobe implementation

Remove arch dependent setjump/longjump functions
and unused fields in kprobe_ctlblk for jprobes
from arch/ia64.

Note that since ia64 jprobes code is a bit different
from other architectures, this keeps __IA64_BREAK_JPROBE
for checking the ->break_handler(). It will be removed
with the break_handler() calls afterwards.

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Link: https://lore.kernel.org/lkml/152942448152.15209.2026051332977587306.stgit@devbox
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Masami Hiramatsu 2018-06-20 01:08:01 +09:00 committed by Ingo Molnar
parent c530e2f02e
commit 0aeaf6b3a3
4 changed files with 1 additions and 163 deletions

View File

@ -82,8 +82,6 @@ struct prev_kprobe {
#define ARCH_PREV_KPROBE_SZ 2
struct kprobe_ctlblk {
unsigned long kprobe_status;
struct pt_regs jprobe_saved_regs;
unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
unsigned long *bsp;
unsigned long cfm;
atomic_t prev_kprobe_index;

View File

@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o

View File

@ -1,90 +0,0 @@
/*
* Jprobe specific operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) Intel Corporation, 2005
*
* 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
* <anil.s.keshavamurthy@intel.com> initial implementation
*
* Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
* probe to be inserted into the beginning of a function call. The fundamental
* difference between a jprobe and a kprobe is the jprobe handler is executed
* in the same context as the target function, while the kprobe handlers
* are executed in interrupt context.
*
* For jprobes we initially gain control by placing a break point in the
* first instruction of the targeted function. When we catch that specific
* break, we:
* * set the return address to our jprobe_inst_return() function
* * jump to the jprobe handler function
*
* Since we fixed up the return address, the jprobe handler will return to our
* jprobe_inst_return() function, giving us control again. At this point we
* are back in the parents frame marker, so we do yet another call to our
* jprobe_break() function to fix up the frame marker as it would normally
* exist in the target function.
*
* Our jprobe_return function then transfers control back to kprobes.c by
* executing a break instruction using one of our reserved numbers. When we
* catch that break in kprobes.c, we continue like we do for a normal kprobe
* by single stepping the emulated instruction, and then returning execution
* to the correct location.
*/
#include <asm/asmmacro.h>
#include <asm/break.h>
/*
* void jprobe_break(void)
*/
.section .kprobes.text, "ax"
ENTRY(jprobe_break)
break.m __IA64_BREAK_JPROBE
END(jprobe_break)
/*
* void jprobe_inst_return(void)
*/
GLOBAL_ENTRY(jprobe_inst_return)
br.call.sptk.many b0=jprobe_break
END(jprobe_inst_return)
GLOBAL_ENTRY(invalidate_stacked_regs)
movl r16=invalidate_restore_cfm
;;
mov b6=r16
;;
br.ret.sptk.many b6
;;
invalidate_restore_cfm:
mov r16=ar.rsc
;;
mov ar.rsc=r0
;;
loadrs
;;
mov ar.rsc=r16
;;
br.cond.sptk.many rp
END(invalidate_stacked_regs)
GLOBAL_ENTRY(flush_register_stack)
// flush dirty regs to backing store (must be first in insn group)
flushrs
;;
br.ret.sptk.many rp
END(flush_register_stack)

View File

@ -35,8 +35,6 @@
#include <asm/sections.h>
#include <asm/exception.h>
extern void jprobe_inst_return(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@ -1040,74 +1038,6 @@ unsigned long arch_deref_entry_point(void *entry)
return ((struct fnptr *)entry)->ip;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr = arch_deref_entry_point(jp->entry);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct param_bsp_cfm pa;
int bytes;
/*
* Callee owns the argument space and could overwrite it, eg
* tail call optimization. So to be absolutely safe
* we save the argument space before transferring the control
* to instrumented jprobe function which runs in
* the process context
*/
pa.ip = regs->cr_iip;
unw_init_running(ia64_get_bsp_cfm, &pa);
bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
- (char *)pa.bsp;
memcpy( kcb->jprobes_saved_stacked_regs,
pa.bsp,
bytes );
kcb->bsp = pa.bsp;
kcb->cfm = pa.cfm;
/* save architectural state */
kcb->jprobe_saved_regs = *regs;
/* after rfi, execute the jprobe instrumented function */
regs->cr_iip = addr & ~0xFULL;
ia64_psr(regs)->ri = addr & 0xf;
regs->r1 = ((struct fnptr *)(jp->entry))->gp;
/*
* fix the return address to our jprobe_inst_return() function
* in the jprobes.S file
*/
regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
return 1;
}
/* ia64 does not need this */
void __kprobes jprobe_return(void)
{
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
int bytes;
/* restoring architectural state */
*regs = kcb->jprobe_saved_regs;
/* restoring the original argument space */
flush_register_stack();
bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
- (char *)kcb->bsp;
memcpy( kcb->bsp,
kcb->jprobes_saved_stacked_regs,
bytes );
invalidate_stacked_regs();
preempt_enable_no_resched();
return 1;
}
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};