1
0
Fork 0
alistair23-linux/arch/i386/kernel/kprobes.c

751 lines
22 KiB
C
Raw Normal View History

/*
* Kernel Probes (KProbes)
* arch/i386/kernel/kprobes.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation ( includes contributions from
* Rusty Russell).
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
* 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
* <prasanna@in.ibm.com> added function-return probes.
*/
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
[PATCH] Move kprobe [dis]arming into arch specific code The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:25 -06:00
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
#include <asm/uaccess.h>
void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
/* insert a jmp code */
static __always_inline void set_jmp_op(void *from, void *to)
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
{
struct __arch_jmp_op {
char op;
long raddr;
} __attribute__((packed)) *jop;
jop = (struct __arch_jmp_op *)from;
jop->raddr = (long)(to) - ((long)(from) + 5);
jop->op = RELATIVEJUMP_INSTRUCTION;
}
/*
* returns non-zero if opcodes can be boosted.
*/
static __always_inline int can_boost(kprobe_opcode_t *opcodes)
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
{
#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
(b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
(b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
(bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
<< (row % 32))
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not be boost.
*/
static const unsigned long twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
#undef W
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
retry:
if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
return 0;
opcode = *(opcodes++);
/* 2nd-byte opcode */
if (opcode == 0x0f) {
if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
return 0;
return test_bit(*opcodes, twobyte_is_boostable);
}
switch (opcode & 0xf0) {
case 0x60:
if (0x63 < opcode && opcode < 0x67)
goto retry; /* prefixes */
/* can't boost Address-size override and bound */
return (opcode != 0x62 && opcode != 0x67);
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
case 0x70:
return 0; /* can't boost conditional jump */
case 0xc0:
/* can't boost software-interruptions */
return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
case 0xd0:
/* can boost AA* and XLAT */
return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
case 0xe0:
/* can boost in/out and absolute jmps */
return ((opcode & 0x04) || opcode == 0xea);
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
case 0xf0:
if ((opcode & 0x0c) == 0 && opcode != 0xf1)
goto retry; /* lock/rep(ne) prefix */
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
/* clear and set flags can be boost */
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
default:
if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
goto retry; /* prefixes */
/* can't boost CS override and call */
return (opcode != 0x2e && opcode != 0x9a);
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
}
}
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
{
switch (opcode) {
case 0xfa: /* cli */
case 0xfb: /* sti */
case 0xcf: /* iret/iretd */
case 0x9d: /* popf/popfd */
return 1;
}
return 0;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on i386. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
[PATCH] Move kprobe [dis]arming into arch specific code The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:25 -06:00
p->opcode = *p->addr;
if (can_boost(p->addr)) {
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
p->ainsn.boostable = 0;
} else {
p->ainsn.boostable = -1;
}
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
[PATCH] Move kprobe [dis]arming into arch specific code The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:25 -06:00
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
[PATCH] Move kprobe [dis]arming into arch specific code The architecture independent code of the current kprobes implementation is arming and disarming kprobes at registration time. The problem is that the code is assuming that arming and disarming is a just done by a simple write of some magic value to an address. This is problematic for ia64 where our instructions look more like structures, and we can not insert break points by just doing something like: *p->addr = BREAKPOINT_INSTRUCTION; The following patch to 2.6.12-rc4-mm2 adds two new architecture dependent functions: * void arch_arm_kprobe(struct kprobe *p) * void arch_disarm_kprobe(struct kprobe *p) and then adds the new functions for each of the architectures that already implement kprobes (spar64/ppc64/i386/x86_64). I thought arch_[dis]arm_kprobe was the most descriptive of what was really happening, but each of the architectures already had a disarm_kprobe() function that was really a "disarm and do some other clean-up items as needed when you stumble across a recursive kprobe." So... I took the liberty of changing the code that was calling disarm_kprobe() to call arch_disarm_kprobe(), and then do the cleanup in the block of code dealing with the recursive kprobe case. So far this patch as been tested on i386, x86_64, and ppc64, but still needs to be tested in sparc64. Signed-off-by: Rusty Lynch <rusty.lynch@intel.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:25 -06:00
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
mutex_unlock(&kprobe_mutex);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
= (regs->eflags & (TF_MASK | IF_MASK));
if (is_IF_modifier(p->opcode))
kcb->kprobe_saved_eflags &= ~IF_MASK;
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
regs->eflags |= TF_MASK;
regs->eflags &= ~IF_MASK;
/*single step inline if the instruction is an int3*/
if (p->opcode == BREAKPOINT_INSTRUCTION)
regs->eip = (unsigned long)p->addr;
else
regs->eip = (unsigned long)p->ainsn.insn;
}
/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
{
unsigned long *sara = (unsigned long *)&regs->esp;
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
add_rp_inst(ri);
} else {
rp->nmissed++;
}
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
}
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
*/
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
#ifdef CONFIG_PREEMPT
unsigned pre_preempt_count = preempt_count();
#else
unsigned pre_preempt_count = 1;
#endif
addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kcb->kprobe_saved_eflags;
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
regs->eip -= sizeof(kprobe_opcode_t);
ret = 1;
goto no_kprobe;
}
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Back up over the (now missing) int3 and run
* the original instruction.
*/
regs->eip -= sizeof(kprobe_opcode_t);
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
return 1;
ss_probe:
if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
regs->eip = (unsigned long)p->ainsn.insn;
preempt_enable_no_resched();
return 1;
}
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
/*
* For function-return probes, init_kprobes() establishes a probepoint
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
void __kprobes kretprobe_trampoline_holder(void)
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
{
asm volatile ( ".global kretprobe_trampoline\n"
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
"kretprobe_trampoline: \n"
" pushf\n"
/* skip cs, eip, orig_eax, es, ds */
" subl $20, %esp\n"
" pushl %eax\n"
" pushl %ebp\n"
" pushl %edi\n"
" pushl %esi\n"
" pushl %edx\n"
" pushl %ecx\n"
" pushl %ebx\n"
" movl %esp, %eax\n"
" call trampoline_handler\n"
/* move eflags to cs */
" movl 48(%esp), %edx\n"
" movl %edx, 44(%esp)\n"
/* save true return address on eflags */
" movl %eax, 48(%esp)\n"
" popl %ebx\n"
" popl %ecx\n"
" popl %edx\n"
" popl %esi\n"
" popl %edi\n"
" popl %ebp\n"
" popl %eax\n"
/* skip eip, orig_eax, es, ds */
" addl $16, %esp\n"
" popf\n"
" ret\n");
}
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
/*
* Called from kretprobe_trampoline
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
*/
fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler){
__get_cpu_var(current_kprobe) = &ri->rp->kp;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
orig_ret_address = (unsigned long)ri->ret_addr;
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
}
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
spin_unlock_irqrestore(&kretprobe_lock, flags);
return (void*)orig_ret_address;
[PATCH] kprobes: function-return probes This patch adds function-return probes to kprobes for the i386 architecture. This enables you to establish a handler to be run when a function returns. 1. API Two new functions are added to kprobes: int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); 2. Registration and unregistration 2.1 Register To register a function-return probe, the user populates the following fields in a kretprobe object and calls register_kretprobe() with the kretprobe address as an argument: kp.addr - the function's address handler - this function is run after the ret instruction executes, but before control returns to the return address in the caller. maxactive - The maximum number of instances of the probed function that can be active concurrently. For example, if the function is non- recursive and is called with a spinlock or mutex held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. maxactive is used to determine how many kretprobe_instance objects to allocate for this particular probed function. If maxactive <= 0, it is set to a default value (if CONFIG_PREEMPT maxactive=max(10, 2 * NR_CPUS) else maxactive=NR_CPUS) For example: struct kretprobe rp; rp.kp.addr = /* entrypoint address */ rp.handler = /*return probe handler */ rp.maxactive = /* e.g., 1 or NR_CPUS or 0, see the above explanation */ register_kretprobe(&rp); The following field may also be of interest: nmissed - Initialized to zero when the function-return probe is registered, and incremented every time the probed function is entered but there is no kretprobe_instance object available for establishing the function-return probe (i.e., because maxactive was set too low). 2.2 Unregister To unregiter a function-return probe, the user calls unregister_kretprobe() with the same kretprobe object as registered previously. If a probed function is running when the return probe is unregistered, the function will return as expected, but the handler won't be run. 3. Limitations 3.1 This patch supports only the i386 architecture, but patches for x86_64 and ppc64 are anticipated soon. 3.2 Return probes operates by replacing the return address in the stack (or in a known register, such as the lr register for ppc). This may cause __builtin_return_address(0), when invoked from the return-probed function, to return the address of the return-probes trampoline. 3.3 This implementation uses the "Multiprobes at an address" feature in 2.6.12-rc3-mm3. 3.4 Due to a limitation in multi-probes, you cannot currently establish a return probe and a jprobe on the same function. A patch to remove this limitation is being tested. This feature is required by SystemTap (http://sourceware.org/systemtap), and reflects ideas contributed by several SystemTap developers, including Will Cohen and Ananth Mavinakayanahalli. Signed-off-by: Hien Nguyen <hien@us.ibm.com> Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Frederik Deweerdt <frederik.deweerdt@laposte.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:09:19 -06:00
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "int 3"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*
* This function prepares to return from the post-single-step
* interrupt. We have to fix up the stack as follows:
*
* 0) Except in the case of absolute or indirect jump or call instructions,
* the new eip is relative to the copied instruction. We need to make
* it relative to the original instruction.
*
* 1) If the single-stepped instruction was pushfl, then the TF and IF
* flags are set in the just-pushed eflags, and may need to be cleared.
*
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
*
* This function also checks instruction size for preparing direct execution.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = (unsigned long *)&regs->esp;
unsigned long copy_eip = (unsigned long)p->ainsn.insn;
unsigned long orig_eip = (unsigned long)p->addr;
regs->eflags &= ~TF_MASK;
switch (p->ainsn.insn[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_eflags;
break;
case 0xc2: /* iret/ret/lret */
case 0xc3:
case 0xca:
case 0xcb:
case 0xcf:
case 0xea: /* jmp absolute -- eip is correct */
/* eip is already adjusted, no more changes required */
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
p->ainsn.boostable = 1;
goto no_change;
case 0xe8: /* call relative - Fix return addr */
*tos = orig_eip + (*tos - copy_eip);
break;
case 0x9a: /* call absolute -- same as call absolute, indirect */
*tos = orig_eip + (*tos - copy_eip);
goto no_change;
case 0xff:
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
/*
* call absolute, indirect
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
* Fix return addr; eip is correct.
* But this is not boostable
*/
*tos = orig_eip + (*tos - copy_eip);
goto no_change;
} else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
/* eip is correct. And this is boostable */
p->ainsn.boostable = 1;
goto no_change;
}
default:
break;
}
[PATCH] x86: kprobes-booster Current kprobe copies the original instruction at the probe point and replaces it with a breakpoint instruction (int3). When the kernel hits the probe point, kprobe handler is invoked. And the copied instruction is single-step executed on the copied buffer (not on the original address) by kprobe. After that, the kprobe checks registers and modify it (if need) as if the instructions was executed on the original address. My proposal is based on the fact there are many instructions which do NOT require the register modification after the single-step execution. When the copied instruction is a kind of them, kprobe just jumps back to the next instruction after single-step execution. If so, why don't we execute those instructions directly? With kprobe-booster patch, kprobes will execute a copied instruction directly and (if need) jump back to original code. This direct execution is executed when the kprobe don't have both post_handler and break_handler, and the copied instruction can be executed directly. I sorted instructions which can be executed directly or not; - Call instructions are NG(can not be executed directly). We should correct the return address pushed into top of stack. - Indirect instructions except for absolute indirect-jumps are NG. Those instructions changes EIP randomly. We should check EIP and correct it. - Instructions that change EIP beyond the range of the instruction buffer are NG. - Instructions that change EIP to tail 5 bytes of the instruction buffer (it is the size of a jump instruction). We must write a jump instruction which backs to original kernel code in the instruction buffer. - Break point instruction is NG. We should not touch EIP and pass to other handlers. - Absolute direct/indirect jumps are OK.- Conditional Jumps are NG. - Halt and software-interruptions are NG. Because it will stay on the instruction buffer of kprobes. - Prefixes are NG. - Unknown/reserved opcode is NG. - Other 1 byte instructions are OK. But those instructions need a jump back code. - 2 bytes instructions are mapped sparsely. So, in this release, this patch don't boost those instructions. >From Intel's IA-32 opcode map described in IA-32 Intel Architecture Software Developer's Manual Vol.2 B, I determined that following opcodes are not boostable. - 0FH (2byte escape) - 70H - 7FH (Jump on condition) - 9AH (Call) and 9CH (Pushf) - C0H-C1H (Grp 2: includes reserved opcode) - C6H-C7H (Grp11: includes reserved opcode) - CCH-CEH (Software-interrupt) - D0H-D3H (Grp2: includes reserved opcode) - D6H (Reserved) - D8H-DFH (Coprocessor) - E0H-E3H (loop/conditional jump) - E8H (Call) - F0H-F3H (Prefixes and reserved) - F4H (Halt) - F6H-F7H (Grp3: includes reserved opcode) - FEH-FFH(Grp4,5: includes reserved opcode) Kprobe-booster checks whether target instruction can be boosted (can be executed directly) at arch_copy_kprobe() function. If the target instruction can be boosted, it clears "boostable" flag. If not, it sets "boostable" flag -1. This is disabled status. In resume_execution() function, If "boostable" flag is cleared, kprobe-booster measures the size of the target instruction and sets "boostable" flag 1. In kprobe_handler(), kprobe checks the "boostable" flag. If the flag is 1, it resets current kprobe and executes instruction buffer directly instead of single stepping. When unregistering a boosted kprobe, it calls synchronize_sched() after "int3" is removed. So we can ensure followings after the synchronize_sched() called. - interrupt handlers are finished on all CPUs. - instruction buffer is not executed on all CPUs. And we can release the boosted kprobe safely. And also, on preemptible kernel, the booster is not enabled where the kernel preemption is enabled. So, there are no preempted threads on the instruction buffer. The description of kretprobe-booster: ==================================== In the normal operation, kretprobe make a target function return to trampoline code. And a kprobe (called trampoline_probe) have been inserted at the trampoline code. When the kernel hits this kprobe, it calls kretprobe's handler and it returns to original return address. Kretprobe-booster patch removes the trampoline_probe. It allows the trampoline code to call kretprobe's handler directly instead of invoking kprobe. And tranpoline code returns to original return address. This new trampoline code stores and restores registers, so the kretprobe handler is still able to access those registers. Current kprobe has about 1.3 usec/probe(*) overhead, and kprobe-booster patch reduces it to 0.6 usec/probe(*). Also current kretprobe has about 2.0 usec/probe(*) overhead. Kprobe-booster patch reduces it to 1.3 usec/probe(*), and the combination of both kprobe-booster patch and kretprobe-booster patch reduces it to 0.9 usec/probe(*). I expect the combination of both patches can reduce half of a probing overhead. Performance numbers strongly depend on the processor model. Andrew Morton wrote: > These preempt tricks look rather nasty. Can you please describe what the > problem is, precisely? And how this code avoids it? Perhaps we can find > something cleaner. The problem is how to remove the copied instructions of the kprobe *safely* on the preemptable kernel (CONFIG_PREEMPT=y). Kprobes basically executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)copied instructioin(single step) (5)kprobe_posthandler() (6)preempt_enable() (7)return to the original code During the execution of copied instruction, preemption is disabled (from step (2) to (6)). When unregistering the probes, Kprobe waits for RCU quiescent state by using synchronize_sched() after removing int3 instruction. Thus we can ensure the copied instruction is not executed. On the other hand, kprobe-booster executes the following actions; (1)int3 (2)preempt_disable() (3)kprobe_prehandler() (4)preempt_enable() <-- this one is added by my patch (5)copied instruction(direct execution) (6)jmp back to the original code The problem is that we have no way to prevent preemption on step (5) or (6). We cannot call preempt_disable() after step (6), because there are no rooms to do that. Thus, some other processes may be preempted at step(5) or (6) on preemptable kernel. And I couldn't find the easy way to ensure that other processes' stack do *not* have the address of them. (I thought some way to do that, but those are very costly.) So currently, I simply boost the kprobe only when the probe point is already preemption disabled. > Also, the patch adds a preempt_enable() but I don't see a corresponding > preempt_disable(). Am I missing something? It is corresponding to the preempt_disable() in the top of kprobe_handler(). I copied the code of kprobe_handler() here: static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr = NULL; unsigned long *lp; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); <-- HERE kcb = get_kprobe_ctlblk(); Signed-off-by: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 02:38:17 -07:00
if (p->ainsn.boostable == 0) {
if ((regs->eip > copy_eip) &&
(regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
/*
* These instructions can be executed directly if it
* jumps back to correct address.
*/
set_jmp_op((void *)regs->eip,
(void *)orig_eip + (regs->eip - copy_eip));
p->ainsn.boostable = 1;
} else {
p->ainsn.boostable = -1;
}
}
regs->eip = orig_eip + (regs->eip - copy_eip);
no_change:
return;
}
/*
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled thoroughout this function.
*/
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs, kcb);
regs->eflags |= kcb->kprobe_saved_eflags;
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
/*
* if somebody else is singlestepping across a probe point, eflags
* will have TF set, in which case, continue the remaining processing
* of do_debug, as if this is not a probe hit.
*/
if (regs->eflags & TF_MASK)
return 0;
return 1;
}
static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the eip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->eip = (unsigned long)cur->addr;
regs->eflags |= kcb->kprobe_old_eflags;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accouting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
2005-11-07 02:00:07 -07:00
int ret = NOTIFY_DONE;
if (args->regs && user_mode_vm(args->regs))
return ret;
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
2005-11-07 02:00:07 -07:00
ret = NOTIFY_STOP;
break;
case DIE_DEBUG:
if (post_kprobe_handler(args->regs))
2005-11-07 02:00:07 -07:00
ret = NOTIFY_STOP;
break;
case DIE_GPF:
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
2005-11-07 02:00:07 -07:00
ret = NOTIFY_STOP;
preempt_enable();
break;
default:
break;
}
2005-11-07 02:00:07 -07:00
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_esp = &regs->esp;
addr = (unsigned long)(kcb->jprobe_saved_esp);
/*
* TBD: As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
*/
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
MIN_STACK_SIZE(addr));
regs->eflags &= ~IF_MASK;
regs->eip = (unsigned long)(jp->entry);
return 1;
}
void __kprobes jprobe_return(void)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
asm volatile (" xchgl %%ebx,%%esp \n"
" int3 \n"
" .globl jprobe_return_end \n"
" jprobe_return_end: \n"
" nop \n"::"b"
(kcb->jprobe_saved_esp):"memory");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->eip - 1);
unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
if (&regs->esp != kcb->jprobe_saved_esp) {
struct pt_regs *saved_regs =
container_of(kcb->jprobe_saved_esp,
struct pt_regs, esp);
printk("current esp %p does not match saved esp %p\n",
&regs->esp, kcb->jprobe_saved_esp);
printk("Saved registers for jprobe %p\n", jp);
show_registers(saved_regs);
printk("Current registers\n");
show_registers(regs);
BUG();
}
*regs = kcb->jprobe_saved_regs;
memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
MIN_STACK_SIZE(stack_addr));
preempt_enable_no_resched();
return 1;
}
return 0;
}
int __init arch_init_kprobes(void)
{
return 0;
}