1
0
Fork 0
alistair23-linux/tools/perf/builtin-annotate.c

518 lines
13 KiB
C
Raw Normal View History

/*
* builtin-annotate.c
*
* Builtin annotate command: Analyze the perf.data input file,
* look up and read DSOs and symbol information and display
* a histogram of results, along various sorting keys.
*/
#include "builtin.h"
#include "util/util.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
#include "perf.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/annotate.h"
#include "util/event.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
#include "arch/common.h"
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 13:08:12 -06:00
#include "util/block-range.h"
#include <dlfcn.h>
#include <errno.h>
#include <linux/bitmap.h>
struct perf_annotate {
struct perf_tool tool;
struct perf_session *session;
bool use_tui, use_stdio, use_gtk;
bool full_paths;
bool print_line;
bool skip_missing;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 13:08:12 -06:00
/*
* Given one basic block:
*
* from to branch_i
* * ----> *
* |
* | block
* v
* * ----> *
* from to branch_i+1
*
* where the horizontal are the branches and the vertical is the executed
* block of instructions.
*
* We count, for each 'instruction', the number of blocks that covered it as
* well as count the ratio each branch is taken.
*
* We can do this without knowing the actual instruction stream by keeping
* track of the address ranges. We break down ranges such that there is no
* overlap and iterate from the start until the end.
*
* @acme: once we parse the objdump output _before_ processing the samples,
* we can easily fold the branch.cycles IPC bits in.
*/
static void process_basic_block(struct addr_map_symbol *start,
struct addr_map_symbol *end,
struct branch_flags *flags)
{
struct symbol *sym = start->sym;
struct annotation *notes = sym ? symbol__annotation(sym) : NULL;
struct block_range_iter iter;
struct block_range *entry;
/*
* Sanity; NULL isn't executable and the CPU cannot execute backwards
*/
if (!start->addr || start->addr > end->addr)
return;
iter = block_range__create(start->addr, end->addr);
if (!block_range_iter__valid(&iter))
return;
/*
* First block in range is a branch target.
*/
entry = block_range_iter(&iter);
assert(entry->is_target);
entry->entry++;
do {
entry = block_range_iter(&iter);
entry->coverage++;
entry->sym = sym;
if (notes)
notes->max_coverage = max(notes->max_coverage, entry->coverage);
} while (block_range_iter__next(&iter));
/*
* Last block in rage is a branch.
*/
entry = block_range_iter(&iter);
assert(entry->is_branch);
entry->taken++;
if (flags->predicted)
entry->pred++;
}
static void process_branch_stack(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample)
{
struct addr_map_symbol *prev = NULL;
struct branch_info *bi;
int i;
if (!bs || !bs->nr)
return;
bi = sample__resolve_bstack(sample, al);
if (!bi)
return;
for (i = bs->nr - 1; i >= 0; i--) {
/*
* XXX filter against symbol
*/
if (prev)
process_basic_block(prev, &bi[i].from, &bi[i].flags);
prev = &bi[i].to;
}
free(bi);
}
static int perf_evsel__add_sample(struct perf_evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann)
{
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
int ret;
if (ann->sym_hist_filter != NULL &&
(al->sym == NULL ||
strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
/*
* FIXME: why isn't this done in the symbol_filter when loading
* the DSO?
*/
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
&al->map->dso->symbols[al->map->type]);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
}
return 0;
}
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 13:08:12 -06:00
/*
* XXX filtered samples can still have branch entires pointing into our
* symbol and are missed.
*/
process_branch_stack(sample->branch_stack, al, sample);
he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr);
hists__inc_nr_samples(hists, true);
return ret;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
perf tools: Consolidate symbol resolving across all tools Now we have a very high level routine for simple tools to process IP sample events: int event__preprocess_sample(const event_t *self, struct addr_location *al, symbol_filter_t filter) It receives the event itself and will insert new threads in the global threads list and resolve the map and symbol, filling all this info into the new addr_location struct, so that tools like annotate and report can further process the event by creating hist_entries in their specific way (with or without callgraphs, etc). It in turn uses the new next layer function: void thread__find_addr_location(struct thread *self, u8 cpumode, enum map_type type, u64 addr, struct addr_location *al, symbol_filter_t filter) This one will, given a thread (userspace or the kernel kthread one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE too in the near future) at the given cpumode, taking vdsos into account (userspace hit, but kernel symbol) and will fill all these details in the addr_location given. Tools that need a more compact API for plain function resolution, like 'kmem', can use this other one: struct symbol *thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter) So, to resolve a kernel symbol, that is all the 'kmem' tool needs, its just a matter of calling: sym = thread__find_function(kthread, addr, NULL); The 'filter' parameter is needed because we do lazy parsing/loading of ELF symtabs or /proc/kallsyms. With this we remove more code duplication all around, which is always good, huh? :-) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: John Kacur <jkacur@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-27 11:29:23 -07:00
struct addr_location al;
perf machine: Protect the machine->threads with a rwlock In addition to using refcounts for the struct thread lifetime management, we need to protect access to machine->threads from concurrent access. That happens in 'perf top', where a thread processes events, inserting and deleting entries from that rb_tree while another thread decays hist_entries, that end up dropping references and ultimately deleting threads from the rb_tree and releasing its resources when no further hist_entry (or other data structures, like in 'perf sched') references it. So the rule is the same for refcounts + protected trees in the kernel, get the tree lock, find object, bump the refcount, drop the tree lock, return, use object, drop the refcount if no more use of it is needed, keep it if storing it in some other data structure, drop when releasing that data structure. I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and "perf_event__preprocess_sample(&al)" with "addr_location__put(&al)". The addr_location__put() one is because as we return references to several data structures, we may end up adding more reference counting for the other data structures and then we'll drop it at addr_location__put() time. Acked-by: David Ahern <dsahern@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 17:43:22 -06:00
int ret = 0;
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
perf machine: Protect the machine->threads with a rwlock In addition to using refcounts for the struct thread lifetime management, we need to protect access to machine->threads from concurrent access. That happens in 'perf top', where a thread processes events, inserting and deleting entries from that rb_tree while another thread decays hist_entries, that end up dropping references and ultimately deleting threads from the rb_tree and releasing its resources when no further hist_entry (or other data structures, like in 'perf sched') references it. So the rule is the same for refcounts + protected trees in the kernel, get the tree lock, find object, bump the refcount, drop the tree lock, return, use object, drop the refcount if no more use of it is needed, keep it if storing it in some other data structure, drop when releasing that data structure. I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and "perf_event__preprocess_sample(&al)" with "addr_location__put(&al)". The addr_location__put() one is because as we return references to several data structures, we may end up adding more reference counting for the other data structures and then we'll drop it at addr_location__put() time. Acked-by: David Ahern <dsahern@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 17:43:22 -06:00
goto out_put;
if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
perf machine: Protect the machine->threads with a rwlock In addition to using refcounts for the struct thread lifetime management, we need to protect access to machine->threads from concurrent access. That happens in 'perf top', where a thread processes events, inserting and deleting entries from that rb_tree while another thread decays hist_entries, that end up dropping references and ultimately deleting threads from the rb_tree and releasing its resources when no further hist_entry (or other data structures, like in 'perf sched') references it. So the rule is the same for refcounts + protected trees in the kernel, get the tree lock, find object, bump the refcount, drop the tree lock, return, use object, drop the refcount if no more use of it is needed, keep it if storing it in some other data structure, drop when releasing that data structure. I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and "perf_event__preprocess_sample(&al)" with "addr_location__put(&al)". The addr_location__put() one is because as we return references to several data structures, we may end up adding more reference counting for the other data structures and then we'll drop it at addr_location__put() time. Acked-by: David Ahern <dsahern@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 17:43:22 -06:00
ret = -1;
}
perf machine: Protect the machine->threads with a rwlock In addition to using refcounts for the struct thread lifetime management, we need to protect access to machine->threads from concurrent access. That happens in 'perf top', where a thread processes events, inserting and deleting entries from that rb_tree while another thread decays hist_entries, that end up dropping references and ultimately deleting threads from the rb_tree and releasing its resources when no further hist_entry (or other data structures, like in 'perf sched') references it. So the rule is the same for refcounts + protected trees in the kernel, get the tree lock, find object, bump the refcount, drop the tree lock, return, use object, drop the refcount if no more use of it is needed, keep it if storing it in some other data structure, drop when releasing that data structure. I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and "perf_event__preprocess_sample(&al)" with "addr_location__put(&al)". The addr_location__put() one is because as we return references to several data structures, we may end up adding more reference counting for the other data structures and then we'll drop it at addr_location__put() time. Acked-by: David Ahern <dsahern@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Don Zickus <dzickus@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 17:43:22 -06:00
out_put:
addr_location__put(&al);
return ret;
}
static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_evsel *evsel,
struct perf_annotate *ann)
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
{
return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel,
ann->print_line, ann->full_paths, 0, 0);
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
}
static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
{
struct rb_node *nd = rb_first(&hists->entries), *next;
int key = K_RIGHT;
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct annotation *notes;
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
goto find_next;
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL) {
find_next:
if (key == K_LEFT)
nd = rb_prev(nd);
else
nd = rb_next(nd);
continue;
}
if (use_browser == 2) {
int ret;
int (*annotate)(struct hist_entry *he,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
annotate = dlsym(perf_gtk_handle,
"hist_entry__gtk_annotate");
if (annotate == NULL) {
ui__error("GTK browser not found!\n");
return;
}
ret = annotate(he, evsel, NULL);
if (!ret || !ann->skip_missing)
return;
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
key = hist_entry__tui_annotate(he, evsel, NULL);
switch (key) {
case -1:
if (!ann->skip_missing)
return;
/* fall through */
case K_RIGHT:
next = rb_next(nd);
break;
case K_LEFT:
next = rb_prev(nd);
break;
default:
return;
}
if (next != NULL)
nd = next;
} else {
hist_entry__tty_annotate(he, evsel, ann);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
* symbol, free he->ms.sym->src to signal we already
* processed this symbol.
*/
zfree(&notes->src->cycles_hist);
zfree(&notes->src);
}
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
}
}
static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
struct perf_session *session = ann->session;
struct perf_evsel *pos;
u64 total_nr_samples;
if (ann->cpu_list) {
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
if (ret)
goto out;
}
if (!objdump_path) {
ret = perf_env__lookup_objdump(&session->header.env);
if (ret)
goto out;
}
ret = perf_session__process_events(session);
if (ret)
goto out;
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
perf_evlist__fprintf_nr_events(session->evlist, stdout);
goto out;
}
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
total_nr_samples = 0;
evlist__for_each_entry(session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
perf callchain: Allow disabling call graphs per event This patch introduce "call-graph=no" to disable per-event callgraph. Here is an example. perf record -e 'cpu/cpu-cycles,call-graph=fp/,cpu/instructions,call-graph=no/' sleep 1 perf report --stdio # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 6 of event 'cpu/cpu-cycles,call-graph=fp/' # Event count (approx.): 774218 # # Children Self Command Shared Object Symbol # ........ ........ ....... ................ ........................................ # 61.94% 0.00% sleep [kernel.vmlinux] [k] entry_SYSCALL_64_fastpath | ---entry_SYSCALL_64_fastpath | |--97.30%-- __brk | --2.70%-- mmap64 _dl_check_map_versions _dl_check_all_versions 61.94% 0.00% sleep [kernel.vmlinux] [k] perf_event_mmap | ---perf_event_mmap | |--97.30%-- do_brk | sys_brk | entry_SYSCALL_64_fastpath | __brk | --2.70%-- mmap_region do_mmap_pgoff vm_mmap_pgoff sys_mmap_pgoff sys_mmap entry_SYSCALL_64_fastpath mmap64 _dl_check_map_versions _dl_check_all_versions ...... # Samples: 6 of event 'cpu/instructions,call-graph=no/' # Event count (approx.): 359692 # # Children Self Command Shared Object Symbol # ........ ........ ....... ................ ................................. # 89.03% 0.00% sleep [unknown] [.] 0xffff6598ffff6598 89.03% 0.00% sleep ld-2.17.so [.] _dl_resolve_conflicts 89.03% 0.00% sleep [kernel.vmlinux] [k] page_fault Signed-off-by: Kan Liang <kan.liang@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1439289050-40510-2-git-send-email-kan.liang@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-08-11 04:30:48 -06:00
/* Don't sort callchain */
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
perf_evsel__output_resort(pos, NULL);
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-04 22:53:25 -07:00
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
continue;
hists__find_annotations(hists, pos, ann);
}
}
if (total_nr_samples == 0) {
ui__error("The %s file has no samples!\n", session->file->path);
goto out;
}
if (use_browser == 2) {
void (*show_annotations)(void);
show_annotations = dlsym(perf_gtk_handle,
"perf_gtk__show_annotations");
if (show_annotations == NULL) {
ui__error("GTK browser not found!\n");
goto out;
}
show_annotations();
}
out:
return ret;
}
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
};
int cmd_annotate(int argc, const char **argv)
{
struct perf_annotate annotate = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
perf tools: Add PERF_RECORD_NAMESPACES to include namespaces related info Introduce a new option to record PERF_RECORD_NAMESPACES events emitted by the kernel when fork, clone, setns or unshare are invoked. And update perf-record documentation with the new option to record namespace events. Committer notes: Combined it with a later patch to allow printing it via 'perf report -D' and be able to test the feature introduced in this patch. Had to move here also perf_ns__name(), that was introduced in another later patch. Also used PRIu64 and PRIx64 to fix the build in some enfironments wrt: util/event.c:1129:39: error: format '%lx' expects argument of type 'long unsigned int', but argument 6 has type 'long long unsigned int' [-Werror=format=] ret += fprintf(fp, "%u/%s: %lu/0x%lx%s", idx ^ Testing it: # perf record --namespaces -a ^C[ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 1.083 MB perf.data (423 samples) ] # # perf report -D <SNIP> 3 2028902078892 0x115140 [0xa0]: PERF_RECORD_NAMESPACES 14783/14783 - nr_namespaces: 7 [0/net: 3/0xf0000081, 1/uts: 3/0xeffffffe, 2/ipc: 3/0xefffffff, 3/pid: 3/0xeffffffc, 4/user: 3/0xeffffffd, 5/mnt: 3/0xf0000000, 6/cgroup: 3/0xeffffffb] 0x1151e0 [0x30]: event: 9 . . ... raw event: size 48 bytes . 0000: 09 00 00 00 02 00 30 00 c4 71 82 68 0c 7f 00 00 ......0..q.h.... . 0010: a9 39 00 00 a9 39 00 00 94 28 fe 63 d8 01 00 00 .9...9...(.c.... . 0020: 03 00 00 00 00 00 00 00 ce c4 02 00 00 00 00 00 ................ <SNIP> NAMESPACES events: 1 <SNIP> # Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@fb.com> Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> Cc: Aravinda Prasad <aravinda@linux.vnet.ibm.com> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sargun Dhillon <sargun@sargun.me> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/148891930386.25309.18412039920746995488.stgit@hbathini.in.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-07 13:41:43 -07:00
.namespaces = perf_event__process_namespaces,
perf annotate: Process attr and build_id records perf annotate did not get some love for pipe-mode, and did not have .attr and .buil_id setup (while record and inject did. Fix that. It can easily be reproduced by: perf record -o - noploop | perf annotate that in my system shows: 0xd8 [0x28]: failed to process type: 9 Committer Testing: Before: $ perf record -o - stress -t 2 -c 2 | perf annotate --stdio stress: info: [11060] dispatching hogs: 2 cpu, 0 io, 0 vm, 0 hdd 0x4470 [0x28]: failed to process type: 9 $ stress: info: [11060] successful run completed in 2s $ After: $ perf record -o - stress -t 2 -c 2 | perf annotate --stdio stress: info: [11871] dispatching hogs: 2 cpu, 0 io, 0 vm, 0 hdd stress: info: [11871] successful run completed in 2s [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] no symbols found in /usr/bin/stress, maybe install a debug package? Percent | Source code & Disassembly of libc-2.24.so for cycles:uhH (6117 samples) --------------------------------------------------------------------------------------- : : Disassembly of section .text: : : 000000000003b050 <random_r>: : __random_r(): 10.56 : 3b050: test %rdi,%rdi 0.00 : 3b053: je 3b0d0 <random_r+0x80> 0.34 : 3b055: test %rsi,%rsi 0.00 : 3b058: je 3b0d0 <random_r+0x80> 0.46 : 3b05a: mov 0x18(%rdi),%eax 12.44 : 3b05d: mov 0x10(%rdi),%r8 0.18 : 3b061: test %eax,%eax 0.00 : 3b063: je 3b0b0 <random_r+0x60> <SNIP> Signed-off-by: David Carrillo-Cisneros <davidcc@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: He Kuang <hekuang@huawei.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Paul Turner <pjt@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Simon Que <sque@chromium.org> Cc: Stephane Eranian <eranian@google.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/r/20170410201432.24807-5-davidcc@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-04-10 14:14:29 -06:00
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.tracing_data = perf_event__process_tracing_data,
perf tools: Add feature header record to pipe-mode Add header record types to pipe-mode, reusing the functions used in file-mode and leveraging the new struct feat_fd. For alignment, check that synthesized events don't exceed pagesize. Add the perf_event__synthesize_feature event call back to process the new header records. Before this patch: $ perf record -o - -e cycles sleep 1 | perf report --stdio --header [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] ... After this patch: $ perf record -o - -e cycles sleep 1 | perf report --stdio --header # ======== # captured on: Mon May 22 16:33:43 2017 # ======== # # hostname : my_hostname # os release : 4.11.0-dbx-up_perf # perf version : 4.11.rc6.g6277c80 # arch : x86_64 # nrcpus online : 72 # nrcpus avail : 72 # cpudesc : Intel(R) Xeon(R) CPU E5-2696 v3 @ 2.30GHz # cpuid : GenuineIntel,6,63,2 # total memory : 263457192 kB # cmdline : /root/perf record -o - -e cycles -c 100000 sleep 1 # HEADER_CPU_TOPOLOGY info available, use -I to display # HEADER_NUMA_TOPOLOGY info available, use -I to display # pmu mappings: intel_bts = 6, uncore_imc_4 = 22, uncore_sbox_1 = 47, uncore_cbox_5 = 33, uncore_ha_0 = 16, uncore_cbox [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] ... Support added for the subcommands: report, inject, annotate and script. Signed-off-by: David Carrillo-Cisneros <davidcc@google.com> Acked-by: David Ahern <dsahern@gmail.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: He Kuang <hekuang@huawei.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Turner <pjt@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Simon Que <sque@chromium.org> Cc: Stephane Eranian <eranian@google.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/r/20170718042549.145161-16-davidcc@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-17 22:25:48 -06:00
.feature = perf_event__process_feature,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
};
struct perf_data_file file = {
.mode = PERF_DATA_MODE_READ,
};
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
perf_counter tools: Add 'perf annotate' feature Add new perf sub-command to display annotated source code: $ perf annotate decode_tree_entry ------------------------------------------------ Percent | Source code & Disassembly of /home/mingo/git/git ------------------------------------------------ : : /home/mingo/git/git: file format elf64-x86-64 : : : Disassembly of section .text: : : 00000000004a0da0 <decode_tree_entry>: : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 3.82 : 4a0da0: 41 54 push %r12 : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.17 : 4a0da2: 48 83 fa 17 cmp $0x17,%rdx : *modep = mode; : return str; : } : : static void decode_tree_entry(struct tree_desc *desc, const char *buf, unsigned long size) : { 0.00 : 4a0da6: 49 89 fc mov %rdi,%r12 0.00 : 4a0da9: 55 push %rbp 3.37 : 4a0daa: 53 push %rbx : const char *path; : unsigned int mode, len; : : if (size < 24 || buf[size - 21]) 0.08 : 4a0dab: 76 73 jbe 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dad: 80 7c 16 eb 00 cmpb $0x0,-0x15(%rsi,%rdx,1) 3.48 : 4a0db2: 75 6c jne 4a0e20 <decode_tree_entry+0x80> : static const char *get_mode(const char *str, unsigned int *modep) : { : unsigned char c; : unsigned int mode = 0; : : if (*str == ' ') 1.94 : 4a0db4: 0f b6 06 movzbl (%rsi),%eax 0.39 : 4a0db7: 3c 20 cmp $0x20,%al 0.00 : 4a0db9: 74 65 je 4a0e20 <decode_tree_entry+0x80> : return NULL; : : while ((c = *str++) != ' ') { 0.06 : 4a0dbb: 89 c2 mov %eax,%edx : if (c < '0' || c > '7') 1.99 : 4a0dbd: 31 ed xor %ebp,%ebp : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 1.74 : 4a0dbf: 48 8d 5e 01 lea 0x1(%rsi),%rbx : if (c < '0' || c > '7') 0.00 : 4a0dc3: 8d 42 d0 lea -0x30(%rdx),%eax 0.17 : 4a0dc6: 3c 07 cmp $0x7,%al 0.00 : 4a0dc8: 76 0d jbe 4a0dd7 <decode_tree_entry+0x37> 0.00 : 4a0dca: eb 54 jmp 4a0e20 <decode_tree_entry+0x80> 0.00 : 4a0dcc: 0f 1f 40 00 nopl 0x0(%rax) 16.57 : 4a0dd0: 8d 42 d0 lea -0x30(%rdx),%eax 0.14 : 4a0dd3: 3c 07 cmp $0x7,%al 0.00 : 4a0dd5: 77 49 ja 4a0e20 <decode_tree_entry+0x80> : return NULL; : mode = (mode << 3) + (c - '0'); 3.12 : 4a0dd7: 0f b6 c2 movzbl %dl,%eax : unsigned int mode = 0; : : if (*str == ' ') : return NULL; : : while ((c = *str++) != ' ') { 0.00 : 4a0dda: 0f b6 13 movzbl (%rbx),%edx 16.74 : 4a0ddd: 48 83 c3 01 add $0x1,%rbx : if (c < '0' || c > '7') : return NULL; : mode = (mode << 3) + (c - '0'); The first column is the percentage of samples that arrived on that particular line - relative to the total cost of the function. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 07:48:52 -06:00
"symbol to annotate"),
OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
perf: Fix endianness argument compatibility with OPT_BOOLEAN() and introduce OPT_INCR() Parsing an option from the command line with OPT_BOOLEAN on a bool data type would not work on a big-endian machine due to the manner in which the boolean was being cast into an int and incremented. For example, running 'perf probe --list' on a PowerPC machine would fail to properly set the list_events bool and would therefore print out the usage information and terminate. This patch makes OPT_BOOLEAN work as expected with a bool datatype. For cases where the original OPT_BOOLEAN was intentionally being used to increment an int each time it was passed in on the command line, this patch introduces OPT_INCR with the old behaviour of OPT_BOOLEAN (the verbose variable is currently the only such example of this). I have reviewed every use of OPT_BOOLEAN to verify that a true C99 bool was passed. Where integers were used, I verified that they were only being used for boolean logic and changed them to bools to ensure that they would not be mistakenly used as ints. The major exception was the verbose variable which now uses OPT_INCR instead of OPT_BOOLEAN. Signed-off-by: Ian Munsie <imunsie@au.ibm.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: <stable@kernel.org> # NOTE: wont apply to .3[34].x cleanly, please backport Cc: Git development list <git@vger.kernel.org> Cc: Ian Munsie <imunsie@au1.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Eric B Munson <ebmunson@us.ibm.com> Cc: Valdis.Kletnieks@vt.edu Cc: WANG Cong <amwang@redhat.com> Cc: Thiago Farina <tfransosi@gmail.com> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Cc: Jaswinder Singh Rajput <jaswinderrajput@gmail.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Mike Galbraith <efault@gmx.de> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: John Kacur <jkacur@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1271147857-11604-1-git-send-email-imunsie@au.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-04-13 02:37:33 -06:00
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &annotate.print_line,
"print matching source lines (may be slow)"),
OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
"Don't shorten the displayed pathnames"),
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-04 22:53:25 -07:00
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
stdio__config_color, "always"),
OPT_END()
};
int ret = hists__init();
if (ret < 0)
return ret;
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
/*
* Special case: if there's an argument left then assume that
* it's a symbol filter:
*/
if (argc > 1)
usage_with_options(annotate_usage, options);
annotate.sym_hist_filter = argv[0];
}
if (quiet)
perf_quiet_option();
file.path = input_name;
annotate.session = perf_session__new(&file, false, &annotate.tool);
if (annotate.session == NULL)
return -1;
ret = symbol__annotation_init();
if (ret < 0)
goto out_delete;
symbol_conf.try_vmlinux_path = true;
perf tools: Check recorded kernel version when finding vmlinux Currently vmlinux_path__init() only tries to find vmlinux file from current directory, /boot and some canonical directories with version number of the running kernel. This can be a problem when reporting old data recorded on a kernel version not running currently. We can use --symfs option for this but it's annoying for user to do it always. As we already have the info in the perf.data file, it can be changed to use it for the search automatically. Before: $ perf report ... # Samples: 4K of event 'cpu-clock' # Event count (approx.): 1067250000 # # Overhead Command Shared Object Symbol # ........ .......... ................. .............................. 71.87% swapper [kernel.kallsyms] [k] recover_probed_instruction After: # Overhead Command Shared Object Symbol # ........ .......... ................. .................... 71.87% swapper [kernel.kallsyms] [k] native_safe_halt This requires to change signature of symbol__init() to receive struct perf_session_env *. Reported-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1407825645-24586-14-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-08-12 00:40:45 -06:00
ret = symbol__init(&annotate.session->header.env);
if (ret < 0)
goto out_delete;
if (setup_sorting(NULL) < 0)
usage_with_options(annotate_usage, options);
if (annotate.use_stdio)
use_browser = 0;
else if (annotate.use_tui)
use_browser = 1;
else if (annotate.use_gtk)
use_browser = 2;
setup_browser(true);
ret = __cmd_annotate(&annotate);
out_delete:
/*
* Speed up the exit process, for large files this can
* take quite a while.
*
* XXX Enable this when using valgrind or if we ever
* librarize this command.
*
* Also experiment with obstacks to see how much speed
* up we'll get here.
*
* perf_session__delete(session);
*/
return ret;
}