1
0
Fork 0

kprobes: Don't check the ->break_handler() in generic kprobes code

Don't check the ->break_handler() from the core kprobes code,
because it was only used by jprobes which got removed.

( In followup patches we'll remove the remaining calls in low level
  arch handlers as well and remove the callback altogether. )

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arch@vger.kernel.org
Link: https://lore.kernel.org/lkml/152942462686.15209.6324404940493598980.stgit@devbox
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Masami Hiramatsu 2018-06-20 01:10:27 +09:00 committed by Ingo Molnar
parent e9bf11a5a6
commit 059053a275
2 changed files with 6 additions and 35 deletions

View File

@ -262,7 +262,7 @@ is optimized, that modification is ignored. Thus, if you want to
tweak the kernel's execution path, you need to suppress optimization, tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques: using one of the following techniques:
- Specify an empty function for the kprobe's post_handler or break_handler. - Specify an empty function for the kprobe's post_handler.
or or

View File

@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
(kprobe_disabled(p) || kprobes_all_disarmed)) (kprobe_disabled(p) || kprobes_all_disarmed))
return; return;
/* Both of break_handler and post_handler are not supported. */ /* kprobes with post_handler can not be optimized */
if (p->break_handler || p->post_handler) if (p->post_handler)
return; return;
op = container_of(p, struct optimized_kprobe, kp); op = container_of(p, struct optimized_kprobe, kp);
@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
} }
NOKPROBE_SYMBOL(aggr_fault_handler); NOKPROBE_SYMBOL(aggr_fault_handler);
static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe *cur = __this_cpu_read(kprobe_instance);
int ret = 0;
if (cur && cur->break_handler) {
if (cur->break_handler(cur, regs))
ret = 1;
}
reset_kprobe_instance();
return ret;
}
NOKPROBE_SYMBOL(aggr_break_handler);
/* Walks the list and increments nmissed count for multiprobe case */ /* Walks the list and increments nmissed count for multiprobe case */
void kprobes_inc_nmissed_count(struct kprobe *p) void kprobes_inc_nmissed_count(struct kprobe *p)
{ {
@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
} }
NOKPROBE_SYMBOL(cleanup_rp_inst); NOKPROBE_SYMBOL(cleanup_rp_inst);
/* /* Add the new probe to ap->list */
* Add the new probe to ap->list. Fail if this is the
* second break_handler at the address
*/
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{ {
BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
if (p->break_handler || p->post_handler) if (p->post_handler)
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
if (p->break_handler) { list_add_rcu(&p->list, &ap->list);
if (ap->break_handler)
return -EEXIST;
list_add_tail_rcu(&p->list, &ap->list);
ap->break_handler = aggr_break_handler;
} else
list_add_rcu(&p->list, &ap->list);
if (p->post_handler && !ap->post_handler) if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler; ap->post_handler = aggr_post_handler;
@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
/* We don't care the kprobe which has gone. */ /* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p)) if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler; ap->post_handler = aggr_post_handler;
if (p->break_handler && !kprobe_gone(p))
ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list); INIT_LIST_HEAD(&ap->list);
INIT_HLIST_NODE(&ap->hlist); INIT_HLIST_NODE(&ap->hlist);
@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
goto disarmed; goto disarmed;
else { else {
/* If disabling probe has special handlers, update aggrprobe */ /* If disabling probe has special handlers, update aggrprobe */
if (p->break_handler && !kprobe_gone(p))
ap->break_handler = NULL;
if (p->post_handler && !kprobe_gone(p)) { if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry_rcu(list_p, &ap->list, list) { list_for_each_entry_rcu(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler)) if ((list_p != p) && (list_p->post_handler))
@ -1911,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL; rp->kp.post_handler = NULL;
rp->kp.fault_handler = NULL; rp->kp.fault_handler = NULL;
rp->kp.break_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */ /* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) { if (rp->maxactive <= 0) {
@ -2034,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
list_for_each_entry_rcu(kp, &p->list, list) list_for_each_entry_rcu(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_GONE; kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL; p->post_handler = NULL;
p->break_handler = NULL;
kill_optimized_kprobe(p); kill_optimized_kprobe(p);
} }
/* /*