1
0
Fork 0

tracing, Text Edit Lock - kprobes architecture independent support

Use the mutual exclusion provided by the text edit lock in the kprobes code. It
allows coherent manipulation of the kernel code by other subsystems.

Changelog:

Move the kernel_text_lock/unlock out of the for loops.
Use text_mutex directly instead of a function.
Remove whitespace modifications.

(note : kprobes_mutex is always taken outside of text_mutex)

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
LKML-Reference: <49B14306.2080202@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
hifive-unleashed-5.1
Mathieu Desnoyers 2009-03-06 10:36:38 -05:00 committed by Ingo Molnar
parent 0e39ac4446
commit 4460fdad85
1 changed files with 13 additions and 2 deletions

View File

@ -43,6 +43,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/memory.h>
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@ -699,9 +700,10 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out; goto out;
} }
mutex_lock(&text_mutex);
ret = arch_prepare_kprobe(p); ret = arch_prepare_kprobe(p);
if (ret) if (ret)
goto out; goto out_unlock_text;
INIT_HLIST_NODE(&p->hlist); INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist, hlist_add_head_rcu(&p->hlist,
@ -710,6 +712,8 @@ int __kprobes register_kprobe(struct kprobe *p)
if (kprobe_enabled) if (kprobe_enabled)
arch_arm_kprobe(p); arch_arm_kprobe(p);
out_unlock_text:
mutex_unlock(&text_mutex);
out: out:
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
@ -746,8 +750,11 @@ valid_p:
* enabled and not gone - otherwise, the breakpoint would * enabled and not gone - otherwise, the breakpoint would
* already have been removed. We save on flushing icache. * already have been removed. We save on flushing icache.
*/ */
if (kprobe_enabled && !kprobe_gone(old_p)) if (kprobe_enabled && !kprobe_gone(old_p)) {
mutex_lock(&text_mutex);
arch_disarm_kprobe(p); arch_disarm_kprobe(p);
mutex_unlock(&text_mutex);
}
hlist_del_rcu(&old_p->hlist); hlist_del_rcu(&old_p->hlist);
} else { } else {
if (p->break_handler && !kprobe_gone(p)) if (p->break_handler && !kprobe_gone(p))
@ -1280,12 +1287,14 @@ static void __kprobes enable_all_kprobes(void)
if (kprobe_enabled) if (kprobe_enabled)
goto already_enabled; goto already_enabled;
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_gone(p)) if (!kprobe_gone(p))
arch_arm_kprobe(p); arch_arm_kprobe(p);
} }
mutex_unlock(&text_mutex);
kprobe_enabled = true; kprobe_enabled = true;
printk(KERN_INFO "Kprobes globally enabled\n"); printk(KERN_INFO "Kprobes globally enabled\n");
@ -1310,6 +1319,7 @@ static void __kprobes disable_all_kprobes(void)
kprobe_enabled = false; kprobe_enabled = false;
printk(KERN_INFO "Kprobes globally disabled\n"); printk(KERN_INFO "Kprobes globally disabled\n");
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, node, head, hlist) {
@ -1318,6 +1328,7 @@ static void __kprobes disable_all_kprobes(void)
} }
} }
mutex_unlock(&text_mutex);
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */ /* Allow all currently running kprobes to complete */
synchronize_sched(); synchronize_sched();