1
0
Fork 0

perf callchain: Allow disabling call graphs per event

This patch introduce "call-graph=no" to disable per-event callgraph.

Here is an example.

  perf record -e 'cpu/cpu-cycles,call-graph=fp/,cpu/instructions,call-graph=no/' sleep 1

  perf report --stdio

  # To display the perf.data header info, please use
  --header/--header-only options.
  #
  #
  # Total Lost Samples: 0
  #
  # Samples: 6  of event 'cpu/cpu-cycles,call-graph=fp/'
  # Event count (approx.): 774218
  #
  # Children      Self  Command  Shared Object     Symbol
  # ........  ........  .......  ................  ........................................
  #
    61.94%     0.00%  sleep    [kernel.vmlinux]  [k] entry_SYSCALL_64_fastpath
              |
              ---entry_SYSCALL_64_fastpath
                 |
                 |--97.30%-- __brk
                 |
                  --2.70%-- mmap64
                            _dl_check_map_versions
                            _dl_check_all_versions

    61.94%     0.00%  sleep    [kernel.vmlinux]  [k] perf_event_mmap
              |
              ---perf_event_mmap
                 |
                 |--97.30%-- do_brk
                 |          sys_brk
                 |          entry_SYSCALL_64_fastpath
                 |          __brk
                 |
                  --2.70%-- mmap_region
                            do_mmap_pgoff
                            vm_mmap_pgoff
                            sys_mmap_pgoff
                            sys_mmap
                            entry_SYSCALL_64_fastpath
                            mmap64
                            _dl_check_map_versions
                            _dl_check_all_versions
  ......

  # Samples: 6  of event 'cpu/instructions,call-graph=no/'
  # Event count (approx.): 359692
  #
  # Children      Self  Command  Shared Object     Symbol
  # ........  ........  .......  ................  .................................
  #
     89.03%     0.00%  sleep    [unknown]         [.] 0xffff6598ffff6598
     89.03%     0.00%  sleep    ld-2.17.so        [.] _dl_resolve_conflicts
     89.03%     0.00%  sleep    [kernel.vmlinux]  [k] page_fault

Signed-off-by: Kan Liang <kan.liang@intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1439289050-40510-2-git-send-email-kan.liang@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
hifive-unleashed-5.1
Kan Liang 2015-08-11 06:30:48 -04:00 committed by Arnaldo Carvalho de Melo
parent d457c96392
commit f9db0d0f1b
6 changed files with 28 additions and 10 deletions

View File

@ -54,7 +54,8 @@ OPTIONS
enabling time stamping. 0 for disabling time stamping.
The default is 1.
- 'call-graph': Disable/enable callgraph. Acceptable str are "fp" for
FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode.
FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and
"no" for disable callgraph.
- 'stack-size': user stack size for dwarf mode
Note: If user explicitly sets options which conflict with the params,
the value set by the params will be overridden.

View File

@ -239,6 +239,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
hists__output_resort(hists, NULL);
if (symbol_conf.event_group &&

View File

@ -722,6 +722,9 @@ static void data_process(void)
if (verbose || data__files_cnt > 2)
data__fprintf();
/* Don't sort callchain for perf diff */
perf_evsel__reset_sample_bit(evsel_base, CALLCHAIN);
hists__process(hists_base);
}
}

View File

@ -279,6 +279,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.use_callchain = false;
symbol_conf.cumulate_callchain = false;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting();
callchain_register_param(&callchain_param);
@ -425,6 +426,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.use_callchain = true;
symbol_conf.cumulate_callchain = false;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting();
callchain_register_param(&callchain_param);
@ -482,6 +484,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.use_callchain = false;
symbol_conf.cumulate_callchain = true;
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
setup_sorting();
callchain_register_param(&callchain_param);
@ -665,6 +668,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
symbol_conf.use_callchain = true;
symbol_conf.cumulate_callchain = true;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
setup_sorting();
callchain_register_param(&callchain_param);

View File

@ -651,12 +651,17 @@ static void apply_config_terms(struct perf_evsel *evsel,
/* parse callgraph parameters */
if (callgraph_buf != NULL) {
param.enabled = true;
if (parse_callchain_record(callgraph_buf, &param)) {
pr_err("per-event callgraph setting for %s failed. "
"Apply callgraph global setting for it\n",
evsel->name);
return;
if (!strcmp(callgraph_buf, "no")) {
param.enabled = false;
param.record_mode = CALLCHAIN_NONE;
} else {
param.enabled = true;
if (parse_callchain_record(callgraph_buf, &param)) {
pr_err("per-event callgraph setting for %s failed. "
"Apply callgraph global setting for it\n",
evsel->name);
return;
}
}
}
if (dump_size > 0) {

View File

@ -1109,13 +1109,14 @@ void hists__inc_stats(struct hists *hists, struct hist_entry *h)
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits)
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node **p = &entries->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
if (symbol_conf.use_callchain)
if (use_callchain)
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
@ -1139,6 +1140,8 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
struct rb_node *next;
struct hist_entry *n;
u64 min_callchain_hits;
struct perf_evsel *evsel = hists_to_evsel(hists);
bool use_callchain = evsel ? (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) : symbol_conf.use_callchain;
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
@ -1157,7 +1160,7 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
hists__inc_stats(hists, n);
if (!n->filtered)