1
0
Fork 0

perf/core improvements and fixes:

User visible:
 
 - Improve --filter support for 'perf probe', allowing using its arguments
   on other commands, as --add, --del, etc (Masami Hiramatsu)
 
 - Show warning when running 'perf kmem stat' on a unsuitable perf.data file,
   i.e. one with events that are not the ones required for the stat variant
   used (Namhyung Kim).
 
 Infrastructure:
 
 - Auxtrace support patches, paving the way to support Intel PT and BTS (Adrian Hunter)
 
 - hists browser (top, report) refactorings (Namhyung Kim)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJVSTWtAAoJENZQFvNTUqpA7zoP/3PDUfiFkhg5wUMIsCiVlI22
 t05ptMRt82X0/FoleEYBfLIwJcnBbOmmSTFkoQzMj9ETHkwKB1QpH5HgeRrKe5un
 +rhoxWlcBs3/KgBNk4sIrg2FrzM//LXy4NrLc3TuyCQJfuWxfCCs8L/pIpT3it9m
 cc9GgbMXV7164KggSSG+3+IY9sbnQXQNQdhZoVbd4GAumX15JQO83eSYXZaIWleO
 Wra3aHP4tOEJmdPBhDhpGdTn0zpvTHLV5qPU6/3W1BvQt6O/6Gqe4ujjg7Ga2bLR
 pnGnoRwFM1Z7CacHVFoETeA8unqOUKEeIJvpbq0SsHfiT12RRjx//iy6Q6MaEx59
 DL4tVWxZyIzZizQ9cSXTe+uXQn5LUO2Tj2PC4wcVVAyClI94tjF20XtKxX6Ptyl2
 KVe0lv9CyxcB/OlwbxVo/xLYVdlbrIh2uGhpwsfIB7UNAdGi5G9SXiiEBD7gUUp1
 k1sRbEMKcUYYx/ezN5wkIQIAaEVMNWl6VJF9qLA63Ti15XiBXHdJE2tMleLWz1oi
 z70NTDdwFTquYocTgSnOo0nbb71m55YCfHyAr6VN6ZB08i4Lo7bF9HaI7ODgBFUk
 3FHb4gJxsytC5xwp8R/VJVLPqfC1+HFy2CDZZbr9DkNycIvHqUJratz+EhcCHO2Y
 RJ1CflbTUfJKBPO6TrXH
 =oEm1
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-3' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Improve --filter support for 'perf probe', allowing using its arguments
    on other commands, as --add, --del, etc (Masami Hiramatsu)

  - Show warning when running 'perf kmem stat' on a unsuitable perf.data file,
    i.e. one with events that are not the ones required for the stat variant
    used (Namhyung Kim).

Infrastructure changes:

  - Auxtrace support patches, paving the way to support Intel PT and BTS (Adrian Hunter)

  - hists browser (top, report) refactorings (Namhyung Kim)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
steinar/wifi_calib_4_9_kernel
Ingo Molnar 2015-05-06 04:42:12 +02:00
commit 1836ac856e
82 changed files with 5848 additions and 817 deletions

View File

@ -215,7 +215,6 @@ VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
export srctree objtree VPATH
# SUBARCH tells the usermode build what the underlying arch is. That is set
# first, and if a usermode build is happening, the "ARCH=um" on the command
# line overrides the setting of ARCH below. If a native build is happening,
@ -1497,11 +1496,11 @@ image_name:
# Clear a bunch of variables before executing the submake
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
# Single targets
# ---------------------------------------------------------------------------

View File

@ -1,3 +1,8 @@
# Some of the tools (perf) use same make variables
# as in kernel build.
export srctree=
export objtree=
include scripts/Makefile.include
help:
@ -47,11 +52,16 @@ cgroup firewire hv guest usb virtio vm net: FORCE
liblockdep: FORCE
$(call descend,lib/lockdep)
libapikfs: FORCE
libapi: FORCE
$(call descend,lib/api)
perf: libapikfs FORCE
$(call descend,$@)
# The perf build does not follow the descend function setup,
# invoking it via it's own make rule.
PERF_O = $(if $(O),$(O)/tools/perf,)
perf: FORCE
$(Q)mkdir -p $(PERF_O) .
$(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir=
selftests: FORCE
$(call descend,testing/$@)
@ -97,10 +107,10 @@ cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clea
liblockdep_clean:
$(call descend,lib/lockdep,clean)
libapikfs_clean:
libapi_clean:
$(call descend,lib/api,clean)
perf_clean: libapikfs_clean
perf_clean:
$(call descend,$(@:_clean=),clean)
selftests_clean:

View File

@ -1387,7 +1387,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
do_warning_event(event, "%s: no type found", __func__);
goto fail;
}
field->name = last_token;
field->name = field->alias = last_token;
if (test_type(type, EVENT_OP))
goto fail;
@ -1469,7 +1469,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
size_dynamic = type_size(field->name);
free_token(field->name);
strcat(field->type, brackets);
field->name = token;
field->name = field->alias = token;
type = read_token(&token);
} else {
char *new_type;
@ -6444,6 +6444,8 @@ void pevent_ref(struct pevent *pevent)
void pevent_free_format_field(struct format_field *field)
{
free(field->type);
if (field->alias != field->name)
free(field->alias);
free(field->name);
free(field);
}

View File

@ -191,6 +191,7 @@ struct format_field {
struct event_format *event;
char *type;
char *name;
char *alias;
int offset;
int size;
unsigned int arraylen;

View File

@ -0,0 +1,108 @@
Overhead calculation
--------------------
The overhead can be shown in two columns as 'Children' and 'Self' when
perf collects callchains. The 'self' overhead is simply calculated by
adding all period values of the entry - usually a function (symbol).
This is the value that perf shows traditionally and sum of all the
'self' overhead values should be 100%.
The 'children' overhead is calculated by adding all period values of
the child functions so that it can show the total overhead of the
higher level functions even if they don't directly execute much.
'Children' here means functions that are called from another (parent)
function.
It might be confusing that the sum of all the 'children' overhead
values exceeds 100% since each of them is already an accumulation of
'self' overhead of its child functions. But with this enabled, users
can find which function has the most overhead even if samples are
spread over the children.
Consider the following example; there are three functions like below.
-----------------------
void foo(void) {
/* do something */
}
void bar(void) {
/* do something */
foo();
}
int main(void) {
bar()
return 0;
}
-----------------------
In this case 'foo' is a child of 'bar', and 'bar' is an immediate
child of 'main' so 'foo' also is a child of 'main'. In other words,
'main' is a parent of 'foo' and 'bar', and 'bar' is a parent of 'foo'.
Suppose all samples are recorded in 'foo' and 'bar' only. When it's
recorded with callchains the output will show something like below
in the usual (self-overhead-only) output of perf report:
----------------------------------
Overhead Symbol
........ .....................
60.00% foo
|
--- foo
bar
main
__libc_start_main
40.00% bar
|
--- bar
main
__libc_start_main
----------------------------------
When the --children option is enabled, the 'self' overhead values of
child functions (i.e. 'foo' and 'bar') are added to the parents to
calculate the 'children' overhead. In this case the report could be
displayed as:
-------------------------------------------
Children Self Symbol
........ ........ ....................
100.00% 0.00% __libc_start_main
|
--- __libc_start_main
100.00% 0.00% main
|
--- main
__libc_start_main
100.00% 40.00% bar
|
--- bar
main
__libc_start_main
60.00% 60.00% foo
|
--- foo
bar
main
__libc_start_main
-------------------------------------------
In the above output, the 'self' overhead of 'foo' (60%) was add to the
'children' overhead of 'bar', 'main' and '\_\_libc_start_main'.
Likewise, the 'self' overhead of 'bar' (40%) was added to the
'children' overhead of 'main' and '\_\_libc_start_main'.
So '\_\_libc_start_main' and 'main' are shown first since they have
same (100%) 'children' overhead (even though they have zero 'self'
overhead) and they are the parents of 'foo' and 'bar'.
Since v3.16 the 'children' overhead is shown by default and the output
is sorted by its values. The 'children' overhead is disabled by
specifying --no-children option on the command line or by adding
'report.children = false' or 'top.children = false' in the perf config
file.

View File

@ -44,6 +44,33 @@ OPTIONS
--kallsyms=<file>::
kallsyms pathname
--itrace::
Decode Instruction Tracing data, replacing it with synthesized events.
Options are:
i synthesize instructions events
b synthesize branches events
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
The default is all events i.e. the same as --itrace=ibxe
In addition, the period (default 100000) for instructions events
can be specified in units of:
i instructions
t ticks
ms milliseconds
us microseconds
ns nanoseconds (default)
Also the call chain size (default 16, max. 1024) for instructions or
transactions events can be specified.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]

View File

@ -37,7 +37,11 @@ OPTIONS
-s <key[,key2...]>::
--sort=<key[,key2...]>::
Sort the output (default: frag,hit,bytes)
Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
for page). Available sort keys are 'ptr, callsite, bytes, hit,
pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
migtype, gfp' for page. This option should be preceded by one of the
mode selection options - i.e. --slab, --page, --alloc and/or --caller.
-l <num>::
--line=<num>::
@ -52,6 +56,11 @@ OPTIONS
--page::
Analyze page allocator events
--live::
Show live page stat. The perf kmem shows total allocation stat by
default, but this option shows live (currently allocated) pages
instead. (This option works with --page option only)
SEE ALSO
--------
linkperf:perf-record[1]

View File

@ -14,11 +14,13 @@ or
or
'perf probe' [options] --del='[GROUP:]EVENT' [...]
or
'perf probe' --list
'perf probe' --list[=[GROUP:]EVENT]
or
'perf probe' [options] --line='LINE'
or
'perf probe' [options] --vars='PROBEPOINT'
or
'perf probe' [options] --funcs
DESCRIPTION
-----------
@ -64,8 +66,8 @@ OPTIONS
classes(e.g. [a-z], [!A-Z]).
-l::
--list::
List up current probe events.
--list[=[GROUP:]EVENT]::
List up current probe events. This can also accept filtering patterns of event names.
-L::
--line=::
@ -82,9 +84,10 @@ OPTIONS
variables.
-F::
--funcs::
--funcs[=FILTER]::
Show available functions in given module or kernel. With -x/--exec,
can also list functions in a user space executable / shared library.
This also can accept a FILTER rule argument.
--filter=FILTER::
(Only for --vars and --funcs) Set filter. FILTER is a combination of glob

View File

@ -108,6 +108,8 @@ OPTIONS
Number of mmap data pages (must be a power of two) or size
specification with appended unit character - B/K/M/G. The
size is rounded up to have nearest pages power of two value.
Also, by adding a comma, the number of mmap pages for AUX
area tracing can be specified.
--group::
Put all events in a single event group. This precedes the --event
@ -257,6 +259,13 @@ records. See clock_gettime(). In particular CLOCK_MONOTONIC and
CLOCK_MONOTONIC_RAW are supported, some events might also allow
CLOCK_BOOTTIME, CLOCK_REALTIME and CLOCK_TAI.
-S::
--snapshot::
Select AUX area tracing Snapshot Mode. This option is valid only with an
AUX area tracing event. Optionally the number of bytes to capture per
snapshot can be specified. In Snapshot Mode, trace data is captured only when
signal SIGUSR2 is received.
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]

View File

@ -193,6 +193,7 @@ OPTIONS
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires callchains are recorded.
See the `overhead calculation' section for more details.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
@ -323,6 +324,37 @@ OPTIONS
--header-only::
Show only perf.data header (forces --stdio).
--itrace::
Options for decoding instruction tracing data. The options are:
i synthesize instructions events
b synthesize branches events
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
The default is all events i.e. the same as --itrace=ibxe
In addition, the period (default 100000) for instructions events
can be specified in units of:
i instructions
t ticks
ms milliseconds
us microseconds
ns nanoseconds (default)
Also the call chain size (default 16, max. 1024) for instructions or
transactions events can be specified.
To disable decoding entirely, use --no-itrace.
include::callchain-overhead-calculation.txt[]
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]

View File

@ -115,7 +115,8 @@ OPTIONS
-f::
--fields::
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, srcline, period.
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, flags.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace
@ -165,6 +166,12 @@ OPTIONS
At this point usage is displayed, and perf-script exits.
The flags field is synthesized and may have a value when Instruction
Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
call, return, conditional, system, asynchronous, interrupt,
transaction abort, trace begin, trace end, and in transaction,
respectively.
Finally, a user may not set fields to none for all event types.
i.e., -f "" is not allowed.
@ -221,6 +228,34 @@ OPTIONS
--header-only
Show only perf.data header.
--itrace::
Options for decoding instruction tracing data. The options are:
i synthesize instructions events
b synthesize branches events
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
The default is all events i.e. the same as --itrace=ibxe
In addition, the period (default 100000) for instructions events
can be specified in units of:
i instructions
t ticks
ms milliseconds
us microseconds
ns nanoseconds (default)
Also the call chain size (default 16, max. 1024) for instructions or
transactions events can be specified.
To disable decoding entirely, use --no-itrace.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],

View File

@ -168,7 +168,7 @@ Default is to monitor all CPUS.
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires -g/--call-graph option
enabled.
enabled. See the `overhead calculation' section for more details.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
@ -234,6 +234,7 @@ INTERACTIVE PROMPTING KEYS
Pressing any unmapped key displays a menu, and prompts for input.
include::callchain-overhead-calculation.txt[]
SEE ALSO
--------

View File

@ -35,7 +35,7 @@ OPTIONS
-e::
--expr::
List of events to show, currently only syscall names.
List of syscalls to show, currently only syscall names.
Prefixing with ! shows all syscalls but the ones specified. You may
need to escape it.

View File

@ -24,7 +24,7 @@ unexport MAKEFLAGS
# (To override it, run 'make JOBS=1' and similar.)
#
ifeq ($(JOBS),)
JOBS := $(shell egrep -c '^processor|^CPU' /proc/cpuinfo 2>/dev/null)
JOBS := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
ifeq ($(JOBS),0)
JOBS := 1
endif

View File

@ -73,6 +73,8 @@ include config/utilities.mak
# for CTF data format.
#
# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
#
# Define NO_AUXTRACE if you do not want AUX area tracing support
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))

View File

@ -1,4 +1,5 @@
libperf-y += header.o
libperf-y += sym-handling.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_DWARF) += skip-callchain-idx.o

View File

@ -0,0 +1,82 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* Copyright (C) 2015 Naveen N. Rao, IBM Corporation
*/
#include "debug.h"
#include "symbol.h"
#include "map.h"
#include "probe-event.h"
#ifdef HAVE_LIBELF_SUPPORT
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
return ehdr.e_type == ET_EXEC ||
ehdr.e_type == ET_REL ||
ehdr.e_type == ET_DYN;
}
#if defined(_CALL_ELF) && _CALL_ELF == 2
void arch__elf_sym_adjust(GElf_Sym *sym)
{
sym->st_value += PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
}
#endif
#endif
#if !defined(_CALL_ELF) || _CALL_ELF != 2
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
char *sym = syma->name;
/* Skip over any initial dot */
if (*sym == '.')
sym++;
/* Avoid "SyS" kernel syscall aliases */
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
return SYMBOL_B;
if (strlen(sym) >= 10 && !strncmp(sym, "compat_SyS", 10))
return SYMBOL_B;
return SYMBOL_A;
}
/* Allow matching against dot variants */
int arch__compare_symbol_names(const char *namea, const char *nameb)
{
/* Skip over initial dot */
if (*namea == '.')
namea++;
if (*nameb == '.')
nameb++;
return strcmp(namea, nameb);
}
#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
bool arch__prefers_symtab(void)
{
return true;
}
#define PPC64LE_LEP_OFFSET 8
void arch__fix_tev_from_maps(struct perf_probe_event *pev,
struct probe_trace_event *tev, struct map *map)
{
/*
* ppc64 ABIv2 local entry point is currently always 2 instructions
* (8 bytes) after the global entry point.
*/
if (!pev->uprobes && map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
tev->point.address += PPC64LE_LEP_OFFSET;
tev->point.offset += PPC64LE_LEP_OFFSET;
}
}
#endif

View File

@ -23,6 +23,7 @@
#include <pthread.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <sys/types.h>
@ -51,6 +52,9 @@ struct thread_data {
unsigned int loops_done;
u64 val;
u64 runtime_ns;
u64 system_time_ns;
u64 user_time_ns;
double speed_gbs;
pthread_mutex_t *process_lock;
};
@ -1034,6 +1038,7 @@ static void *worker_thread(void *__tdata)
u64 bytes_done;
long work_done;
u32 l;
struct rusage rusage;
bind_to_cpumask(td->bind_cpumask);
bind_to_memnode(td->bind_node);
@ -1186,6 +1191,13 @@ static void *worker_thread(void *__tdata)
timersub(&stop, &start0, &diff);
td->runtime_ns = diff.tv_sec * 1000000000ULL;
td->runtime_ns += diff.tv_usec * 1000ULL;
td->speed_gbs = bytes_done / (td->runtime_ns / 1e9) / 1e9;
getrusage(RUSAGE_THREAD, &rusage);
td->system_time_ns = rusage.ru_stime.tv_sec * 1000000000ULL;
td->system_time_ns += rusage.ru_stime.tv_usec * 1000ULL;
td->user_time_ns = rusage.ru_utime.tv_sec * 1000000000ULL;
td->user_time_ns += rusage.ru_utime.tv_usec * 1000ULL;
free_data(thread_data, g->p.bytes_thread);
@ -1412,7 +1424,7 @@ static int __bench_numa(const char *name)
double runtime_sec_min;
int wait_stat;
double bytes;
int i, t;
int i, t, p;
if (init())
return -1;
@ -1548,6 +1560,24 @@ static int __bench_numa(const char *name)
print_res(name, bytes / runtime_sec_max / 1e9,
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
char tname[32];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
memset(tname, 0, 32);
td = g->threads + p*g->p.nr_threads + t;
snprintf(tname, 32, "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / 1e9,
"secs", "thread-system-time", "system CPU time/thread");
print_res(tname, td->user_time_ns / 1e9,
"secs", "thread-user-time", "user CPU time/thread");
}
}
}
free(pids);
deinit();

View File

@ -69,6 +69,15 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops);
if (session == NULL)
return -1;
/*
* We take all buildids when the file contains AUX area tracing data
* because we do not decode the trace because it would take too long.
*/
if (!perf_data_file__is_pipe(&file) &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE))
with_hits = false;
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID

View File

@ -16,6 +16,7 @@
#include "util/debug.h"
#include "util/build-id.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/parse-options.h"
@ -26,10 +27,12 @@ struct perf_inject {
struct perf_session *session;
bool build_ids;
bool sched_stat;
bool have_auxtrace;
const char *input_name;
struct perf_data_file output;
u64 bytes_written;
struct list_head samples;
struct itrace_synth_opts itrace_synth_opts;
};
struct event_entry {
@ -38,14 +41,11 @@ struct event_entry {
union perf_event event[0];
};
static int perf_event__repipe_synth(struct perf_tool *tool,
union perf_event *event)
static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
ssize_t size;
size = perf_data_file__write(&inject->output, event,
event->header.size);
size = perf_data_file__write(&inject->output, buf, sz);
if (size < 0)
return -errno;
@ -53,6 +53,15 @@ static int perf_event__repipe_synth(struct perf_tool *tool,
return 0;
}
static int perf_event__repipe_synth(struct perf_tool *tool,
union perf_event *event)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
return output_bytes(inject, event, event->header.size);
}
static int perf_event__repipe_oe_synth(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe __maybe_unused)
@ -86,6 +95,79 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
return perf_event__repipe_synth(tool, event);
}
#ifdef HAVE_AUXTRACE_SUPPORT
static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
{
char buf[4096];
ssize_t ssz;
int ret;
while (size > 0) {
ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
if (ssz < 0)
return -errno;
ret = output_bytes(inject, buf, ssz);
if (ret)
return ret;
size -= ssz;
}
return 0;
}
static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session
__maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int ret;
inject->have_auxtrace = true;
if (!inject->output.is_pipe) {
off_t offset;
offset = lseek(inject->output.fd, 0, SEEK_CUR);
if (offset == -1)
return -errno;
ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
event, offset);
if (ret < 0)
return ret;
}
if (perf_data_file__is_pipe(session->file) || !session->one_mmap) {
ret = output_bytes(inject, event, event->header.size);
if (ret < 0)
return ret;
ret = copy_bytes(inject, perf_data_file__fd(session->file),
event->auxtrace.size);
} else {
ret = output_bytes(inject, event,
event->header.size + event->auxtrace.size);
}
if (ret < 0)
return ret;
return event->auxtrace.size;
}
#else
static s64
perf_event__repipe_auxtrace(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
pr_err("AUX area tracing not supported\n");
return -EINVAL;
}
#endif
static int perf_event__repipe(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@ -155,6 +237,32 @@ static int perf_event__repipe_fork(struct perf_tool *tool,
return err;
}
static int perf_event__repipe_comm(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_comm(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
static int perf_event__repipe_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_exit(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
}
static int perf_event__repipe_tracing_data(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session)
@ -167,6 +275,18 @@ static int perf_event__repipe_tracing_data(struct perf_tool *tool,
return err;
}
static int perf_event__repipe_id_index(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session)
{
int err;
perf_event__repipe_synth(tool, event);
err = perf_event__process_id_index(tool, event, session);
return err;
}
static int dso__read_build_id(struct dso *dso)
{
if (dso->has_build_id)
@ -351,16 +471,20 @@ static int __cmd_inject(struct perf_inject *inject)
struct perf_session *session = inject->session;
struct perf_data_file *file_out = &inject->output;
int fd = perf_data_file__fd(file_out);
u64 output_data_offset;
signal(SIGINT, sig_handler);
if (inject->build_ids || inject->sched_stat) {
if (inject->build_ids || inject->sched_stat ||
inject->itrace_synth_opts.set) {
inject->tool.mmap = perf_event__repipe_mmap;
inject->tool.mmap2 = perf_event__repipe_mmap2;
inject->tool.fork = perf_event__repipe_fork;
inject->tool.tracing_data = perf_event__repipe_tracing_data;
}
output_data_offset = session->header.data_offset;
if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
@ -379,17 +503,43 @@ static int __cmd_inject(struct perf_inject *inject)
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler = perf_inject__sched_stat;
}
} else if (inject->itrace_synth_opts.set) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
inject->itrace_synth_opts.inject = true;
inject->tool.comm = perf_event__repipe_comm;
inject->tool.exit = perf_event__repipe_exit;
inject->tool.id_index = perf_event__repipe_id_index;
inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
inject->tool.auxtrace = perf_event__process_auxtrace;
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
output_data_offset = 4096;
}
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
if (!file_out->is_pipe)
lseek(fd, session->header.data_offset, SEEK_SET);
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (!file_out->is_pipe) {
if (inject->build_ids)
if (inject->build_ids) {
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
if (inject->have_auxtrace)
dsos__hit_all(session);
}
/*
* The AUX areas have been removed and replaced with
* synthesized hardware events, so clear the feature flag.
*/
if (inject->itrace_synth_opts.set)
perf_header__clear_feat(&session->header,
HEADER_AUXTRACE);
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
perf_session__write_header(session, session->evlist, fd, true);
}
@ -408,11 +558,16 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.fork = perf_event__repipe,
.exit = perf_event__repipe,
.lost = perf_event__repipe,
.aux = perf_event__repipe,
.itrace_start = perf_event__repipe,
.read = perf_event__repipe_sample,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
.attr = perf_event__repipe_attr,
.tracing_data = perf_event__repipe_op2_synth,
.auxtrace_info = perf_event__repipe_op2_synth,
.auxtrace = perf_event__repipe_auxtrace,
.auxtrace_error = perf_event__repipe_op2_synth,
.finished_round = perf_event__repipe_oe_synth,
.build_id = perf_event__repipe_op2_synth,
.id_index = perf_event__repipe_op2_synth,
@ -444,6 +599,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
"kallsyms pathname"),
OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
NULL, "opts", "Instruction Tracing options",
itrace_parse_synth_opts),
OPT_END()
};
const char * const inject_usage[] = {

File diff suppressed because it is too large Load Diff

View File

@ -44,22 +44,19 @@
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
#define DEFAULT_FUNC_FILTER "!_*"
#define DEFAULT_LIST_FILTER "*:*"
/* Session management structure */
static struct {
int command; /* Command short_name */
bool list_events;
bool force_add;
bool show_lines;
bool show_vars;
bool show_ext_vars;
bool show_funcs;
bool mod_events;
bool uprobes;
bool quiet;
bool target_used;
int nevents;
struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
struct line_range line_range;
char *target;
int max_probe_points;
@ -93,6 +90,28 @@ static int parse_probe_event(const char *str)
return ret;
}
static int params_add_filter(const char *str)
{
const char *err = NULL;
int ret = 0;
pr_debug2("Add filter: %s\n", str);
if (!params.filter) {
params.filter = strfilter__new(str, &err);
if (!params.filter)
ret = err ? -EINVAL : -ENOMEM;
} else
ret = strfilter__or(params.filter, str, &err);
if (ret == -EINVAL) {
pr_err("Filter parse error at %td.\n", err - str + 1);
pr_err("Source: \"%s\"\n", str);
pr_err(" %*c\n", (int)(err - str + 1), '^');
}
return ret;
}
static int set_target(const char *ptr)
{
int found = 0;
@ -152,34 +171,11 @@ static int parse_probe_event_argv(int argc, const char **argv)
len += sprintf(&buf[len], "%s ", argv[i]);
}
params.mod_events = true;
ret = parse_probe_event(buf);
free(buf);
return ret;
}
static int opt_add_probe_event(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
return parse_probe_event(str);
} else
return 0;
}
static int opt_del_probe_event(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
if (!params.dellist)
params.dellist = strlist__new(true, NULL);
strlist__add(params.dellist, str);
}
return 0;
}
static int opt_set_target(const struct option *opt, const char *str,
int unset __maybe_unused)
{
@ -217,8 +213,10 @@ static int opt_set_target(const struct option *opt, const char *str,
return ret;
}
/* Command option callbacks */
#ifdef HAVE_DWARF_SUPPORT
static int opt_show_lines(const struct option *opt __maybe_unused,
static int opt_show_lines(const struct option *opt,
const char *str, int unset __maybe_unused)
{
int ret = 0;
@ -226,19 +224,19 @@ static int opt_show_lines(const struct option *opt __maybe_unused,
if (!str)
return 0;
if (params.show_lines) {
if (params.command == 'L') {
pr_warning("Warning: more than one --line options are"
" detected. Only the first one is valid.\n");
return 0;
}
params.show_lines = true;
params.command = opt->short_name;
ret = parse_line_range_desc(str, &params.line_range);
return ret;
}
static int opt_show_vars(const struct option *opt __maybe_unused,
static int opt_show_vars(const struct option *opt,
const char *str, int unset __maybe_unused)
{
struct perf_probe_event *pev = &params.events[params.nevents];
@ -252,29 +250,39 @@ static int opt_show_vars(const struct option *opt __maybe_unused,
pr_err(" Error: '--vars' doesn't accept arguments.\n");
return -EINVAL;
}
params.show_vars = true;
params.command = opt->short_name;
return ret;
}
#endif
static int opt_add_probe_event(const struct option *opt,
const char *str, int unset __maybe_unused)
{
if (str) {
params.command = opt->short_name;
return parse_probe_event(str);
}
return 0;
}
static int opt_set_filter_with_command(const struct option *opt,
const char *str, int unset)
{
if (!unset)
params.command = opt->short_name;
if (str)
return params_add_filter(str);
return 0;
}
static int opt_set_filter(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
const char *err;
if (str) {
pr_debug2("Set filter: %s\n", str);
if (params.filter)
strfilter__delete(params.filter);
params.filter = strfilter__new(str, &err);
if (!params.filter) {
pr_err("Filter parse error at %td.\n", err - str + 1);
pr_err("Source: \"%s\"\n", str);
pr_err(" %*c\n", (int)(err - str + 1), '^');
return -EINVAL;
}
}
if (str)
return params_add_filter(str);
return 0;
}
@ -290,8 +298,6 @@ static void cleanup_params(void)
for (i = 0; i < params.nevents; i++)
clear_perf_probe_event(params.events + i);
if (params.dellist)
strlist__delete(params.dellist);
line_range__clear(&params.line_range);
free(params.target);
if (params.filter)
@ -316,22 +322,24 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
"perf probe --list [GROUP:]EVENT ...",
#ifdef HAVE_DWARF_SUPPORT
"perf probe [<options>] --line 'LINEDESC'",
"perf probe [<options>] --vars 'PROBEPOINT'",
#endif
"perf probe [<options>] --funcs",
NULL
};
};
struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
OPT_BOOLEAN('q', "quiet", &params.quiet,
"be quiet (do not show any mesages)"),
OPT_BOOLEAN('l', "list", &params.list_events,
"list up current probe events"),
OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
"list up probe events",
opt_set_filter_with_command, DEFAULT_LIST_FILTER),
OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
opt_del_probe_event),
opt_set_filter_with_command),
OPT_CALLBACK('a', "add", NULL,
#ifdef HAVE_DWARF_SUPPORT
"[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
@ -378,8 +386,9 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
"Set how many probe points can be found for a probe."),
OPT_BOOLEAN('F', "funcs", &params.show_funcs,
"Show potential probe-able functions."),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
"Show potential probe-able functions.",
opt_set_filter_with_command, DEFAULT_FUNC_FILTER),
OPT_CALLBACK('\0', "filter", NULL,
"[!]FILTER", "Set a filter (with --vars/funcs only)\n"
"\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
@ -402,6 +411,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
set_option_flag(options, 'L', "line", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 'V', "vars", PARSE_OPT_EXCLUSIVE);
#endif
set_option_flag(options, 'F', "funcs", PARSE_OPT_EXCLUSIVE);
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@ -410,11 +420,16 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
pr_warning(" Error: '-' is not supported.\n");
usage_with_options(probe_usage, options);
}
if (params.command && params.command != 'a') {
pr_warning(" Error: another command except --add is set.\n");
usage_with_options(probe_usage, options);
}
ret = parse_probe_event_argv(argc, argv);
if (ret < 0) {
pr_err_with_code(" Error: Command Parse Error.", ret);
return ret;
}
params.command = 'a';
}
if (params.quiet) {
@ -428,47 +443,35 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
if (params.max_probe_points == 0)
params.max_probe_points = MAX_PROBES;
if ((!params.nevents && !params.dellist && !params.list_events &&
!params.show_lines && !params.show_funcs))
usage_with_options(probe_usage, options);
/*
* Only consider the user's kernel image path if given.
*/
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (params.list_events) {
switch (params.command) {
case 'l':
if (params.uprobes) {
pr_warning(" Error: Don't use --list with --exec.\n");
usage_with_options(probe_usage, options);
}
ret = show_perf_probe_events();
ret = show_perf_probe_events(params.filter);
if (ret < 0)
pr_err_with_code(" Error: Failed to show event list.", ret);
return ret;
}
if (params.show_funcs) {
if (!params.filter)
params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
NULL);
case 'F':
ret = show_available_funcs(params.target, params.filter,
params.uprobes);
strfilter__delete(params.filter);
params.filter = NULL;
if (ret < 0)
pr_err_with_code(" Error: Failed to show functions.", ret);
return ret;
}
#ifdef HAVE_DWARF_SUPPORT
if (params.show_lines) {
case 'L':
ret = show_line_range(&params.line_range, params.target,
params.uprobes);
if (ret < 0)
pr_err_with_code(" Error: Failed to show lines.", ret);
return ret;
}
if (params.show_vars) {
case 'V':
if (!params.filter)
params.filter = strfilter__new(DEFAULT_VAR_FILTER,
NULL);
@ -478,23 +481,18 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
params.target,
params.filter,
params.show_ext_vars);
strfilter__delete(params.filter);
params.filter = NULL;
if (ret < 0)
pr_err_with_code(" Error: Failed to show vars.", ret);
return ret;
}
#endif
if (params.dellist) {
ret = del_perf_probe_events(params.dellist);
case 'd':
ret = del_perf_probe_events(params.filter);
if (ret < 0) {
pr_err_with_code(" Error: Failed to delete events.", ret);
return ret;
}
}
if (params.nevents) {
break;
case 'a':
/* Ensure the last given target is used */
if (params.target && !params.target_used) {
pr_warning(" Error: -x/-m must follow the probe definitions.\n");
@ -508,6 +506,9 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
}
break;
default:
usage_with_options(probe_usage, options);
}
return 0;
}

View File

@ -27,6 +27,7 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include <unistd.h>
#include <sched.h>
@ -38,6 +39,7 @@ struct record {
struct record_opts opts;
u64 bytes_written;
struct perf_data_file file;
struct auxtrace_record *itr;
struct perf_evlist *evlist;
struct perf_session *session;
const char *progname;
@ -110,9 +112,12 @@ out:
return rc;
}
static volatile int done = 0;
static volatile int done;
static volatile int signr = -1;
static volatile int child_finished = 0;
static volatile int child_finished;
static volatile int auxtrace_snapshot_enabled;
static volatile int auxtrace_snapshot_err;
static volatile int auxtrace_record__snapshot_started;
static void sig_handler(int sig)
{
@ -133,6 +138,133 @@ static void record__sig_exit(void)
raise(signr);
}
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
struct record *rec = container_of(tool, struct record, tool);
struct perf_data_file *file = &rec->file;
size_t padding;
u8 pad[8] = {0};
if (!perf_data_file__is_pipe(file)) {
off_t file_offset;
int fd = perf_data_file__fd(file);
int err;
file_offset = lseek(fd, 0, SEEK_CUR);
if (file_offset == -1)
return -1;
err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
event, file_offset);
if (err)
return err;
}
/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
padding = (len1 + len2) & 7;
if (padding)
padding = 8 - padding;
record__write(rec, event, event->header.size);
record__write(rec, data1, len1);
if (len2)
record__write(rec, data2, len2);
record__write(rec, &pad, padding);
return 0;
}
static int record__auxtrace_mmap_read(struct record *rec,
struct auxtrace_mmap *mm)
{
int ret;
ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
struct auxtrace_mmap *mm)
{
int ret;
ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
return ret;
if (ret)
rec->samples++;
return 0;
}
static int record__auxtrace_read_snapshot_all(struct record *rec)
{
int i;
int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm =
&rec->evlist->mmap[i].auxtrace_mmap;
if (!mm->base)
continue;
if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
rc = -1;
goto out;
}
}
out:
return rc;
}
static void record__read_auxtrace_snapshot(struct record *rec)
{
pr_debug("Recording AUX area tracing snapshot\n");
if (record__auxtrace_read_snapshot_all(rec) < 0) {
auxtrace_snapshot_err = -1;
} else {
auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
if (!auxtrace_snapshot_err)
auxtrace_snapshot_enabled = 1;
}
}
#else
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
struct auxtrace_mmap *mm __maybe_unused)
{
return 0;
}
static inline
void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
{
}
static inline
int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
{
return 0;
}
#endif
static int record__open(struct record *rec)
{
char msg[512];
@ -169,13 +301,16 @@ try_again:
goto out;
}
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
"/proc/sys/kernel/perf_event_mlock_kb,\n"
"or try again with a smaller value of -m/--mmap_pages.\n"
"(current value: %u)\n", opts->mmap_pages);
"(current value: %u,%u)\n",
opts->mmap_pages, opts->auxtrace_mmap_pages);
rc = -errno;
} else {
pr_err("failed to mmap with %d (%s)\n", errno,
@ -270,12 +405,20 @@ static int record__mmap_read_all(struct record *rec)
int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
if (rec->evlist->mmap[i].base) {
if (record__mmap_read(rec, i) != 0) {
rc = -1;
goto out;
}
}
if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
record__auxtrace_mmap_read(rec, mm) != 0) {
rc = -1;
goto out;
}
}
/*
@ -305,6 +448,9 @@ static void record__init_features(struct record *rec)
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
if (!rec->opts.full_auxtrace)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
}
static volatile int workload_exec_errno;
@ -323,6 +469,8 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
child_finished = 1;
}
static void snapshot_sig_handler(int sig);
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
@ -343,6 +491,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
if (rec->opts.auxtrace_snapshot_mode)
signal(SIGUSR2, snapshot_sig_handler);
else
signal(SIGUSR2, SIG_IGN);
session = perf_session__new(file, false, tool);
if (session == NULL) {
@ -421,6 +573,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
}
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
session, process_synthesized_event);
if (err)
goto out_delete_session;
}
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
machine);
if (err < 0)
@ -475,14 +634,27 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
perf_evlist__enable(rec->evlist);
}
auxtrace_snapshot_enabled = 1;
for (;;) {
int hits = rec->samples;
if (record__mmap_read_all(rec) < 0) {
auxtrace_snapshot_enabled = 0;
err = -1;
goto out_child;
}
if (auxtrace_record__snapshot_started) {
auxtrace_record__snapshot_started = 0;
if (!auxtrace_snapshot_err)
record__read_auxtrace_snapshot(rec);
if (auxtrace_snapshot_err) {
pr_err("AUX area tracing snapshot failed\n");
err = -1;
goto out_child;
}
}
if (hits == rec->samples) {
if (done || draining)
break;
@ -505,10 +677,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* disable events in this case.
*/
if (done && !disabled && !target__none(&opts->target)) {
auxtrace_snapshot_enabled = 0;
perf_evlist__disable(rec->evlist);
disabled = true;
}
}
auxtrace_snapshot_enabled = 0;
if (forks && workload_exec_errno) {
char msg[STRERR_BUFSIZE];
@ -545,15 +719,23 @@ out_child:
if (!err && !file->is_pipe) {
rec->session->header.data_size += rec->bytes_written;
if (!rec->no_buildid)
if (!rec->no_buildid) {
process_buildids(rec);
/*
* We take all buildids when the file contains
* AUX area tracing data because we do not decode the
* trace because it would take too long.
*/
if (rec->opts.full_auxtrace)
dsos__hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
}
if (!err && !quiet) {
char samples[128];
if (rec->samples)
if (rec->samples && !rec->opts.full_auxtrace)
scnprintf(samples, sizeof(samples),
" (%" PRIu64 " samples)", rec->samples);
else
@ -795,6 +977,49 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
struct record_opts *opts = opt->value;
char *s, *p;
unsigned int mmap_pages;
int ret;
if (!str)
return -EINVAL;
s = strdup(str);
if (!s)
return -ENOMEM;
p = strchr(s, ',');
if (p)
*p = '\0';
if (*s) {
ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
if (ret)
goto out_free;
opts->mmap_pages = mmap_pages;
}
if (!p) {
ret = 0;
goto out_free;
}
ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
if (ret)
goto out_free;
opts->auxtrace_mmap_pages = mmap_pages;
out_free:
free(s);
return ret;
}
static const char * const __record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@ -875,9 +1100,9 @@ struct option __record_options[] = {
&record.opts.no_inherit_set,
"child tasks do not inherit counters"),
OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
"number of mmap data pages",
perf_evlist__parse_mmap_pages),
OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
"number of mmap data pages and AUX area tracing mmap pages",
record__parse_mmap_pages),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
@ -929,6 +1154,8 @@ struct option __record_options[] = {
OPT_CALLBACK('k', "clockid", &record.opts,
"clockid", "clockid to use for events, see clock_gettime()",
parse_clockid),
OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
"opts", "AUX area tracing Snapshot Mode", ""),
OPT_END()
};
@ -936,7 +1163,7 @@ struct option *record_options = __record_options;
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
{
int err = -ENOMEM;
int err;
struct record *rec = &record;
char errbuf[BUFSIZ];
@ -957,6 +1184,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(record_usage, record_options);
}
if (!rec->itr) {
rec->itr = auxtrace_record__init(rec->evlist, &err);
if (err)
return err;
}
err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
rec->opts.auxtrace_snapshot_opts);
if (err)
return err;
err = -ENOMEM;
symbol__init(NULL);
if (symbol_conf.kptr_restrict)
@ -1002,6 +1242,10 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
usage_with_options(record_usage, record_options);
err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
if (err)
goto out_symbol_exit;
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out_symbol_exit;
@ -1011,5 +1255,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
out_symbol_exit:
perf_evlist__delete(rec->evlist);
symbol__exit();
auxtrace_record__free(rec->itr);
return err;
}
static void snapshot_sig_handler(int sig __maybe_unused)
{
if (!auxtrace_snapshot_enabled)
return;
auxtrace_snapshot_enabled = 0;
auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
auxtrace_record__snapshot_started = 1;
}

View File

@ -36,6 +36,8 @@
#include "util/data.h"
#include "arch/common.h"
#include "util/auxtrace.h"
#include <dlfcn.h>
#include <linux/bitmap.h>
@ -585,6 +587,7 @@ parse_percent_limit(const struct option *opt, const char *str,
int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_session *session;
struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
struct stat st;
bool has_br_stack = false;
int branch_mode = -1;
@ -607,6 +610,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.attr = perf_event__process_attr,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@ -717,6 +723,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"how to display percentage of filtered entries", parse_filter_percentage),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options",
itrace_parse_synth_opts),
OPT_END()
};
struct perf_data_file file = {
@ -761,6 +770,8 @@ repeat:
report.queue_size);
}
session->itrace_synth_opts = &itrace_synth_opts;
report.session = session;
has_br_stack = perf_header__has_feat(&session->header,

View File

@ -16,6 +16,7 @@
#include "util/evsel.h"
#include "util/sort.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include <linux/bitmap.h>
static char const *script_name;
@ -26,6 +27,7 @@ static u64 nr_unordered;
static bool no_callchain;
static bool latency_format;
static bool system_wide;
static bool print_flags;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@ -146,9 +148,10 @@ static const char *output_field2str(enum perf_output_field field)
#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x)
static int perf_evsel__check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg,
enum perf_output_field field)
static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg,
enum perf_output_field field,
bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->attr;
int type = attr->type;
@ -158,6 +161,8 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
return 0;
if (output[type].user_set) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
@ -175,10 +180,22 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
return 0;
}
static int perf_evsel__check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg,
enum perf_output_field field)
{
return perf_evsel__do_check_stype(evsel, sample_type, sample_msg, field,
false);
}
static int perf_evsel__check_attr(struct perf_evsel *evsel,
struct perf_session *session)
{
struct perf_event_attr *attr = &evsel->attr;
bool allow_user_set;
allow_user_set = perf_header__has_feat(&session->header,
HEADER_AUXTRACE);
if (PRINT_FIELD(TRACE) &&
!perf_session__has_traces(session, "record -R"))
@ -191,8 +208,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
}
if (PRINT_FIELD(ADDR) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
PERF_OUTPUT_ADDR))
perf_evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
PERF_OUTPUT_ADDR, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
@ -229,8 +246,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
return -EINVAL;
if (PRINT_FIELD(CPU) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
PERF_OUTPUT_CPU))
perf_evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(PERIOD) &&
@ -445,6 +462,25 @@ static void print_sample_bts(union perf_event *event,
printf("\n");
}
static void print_sample_flags(u32 flags)
{
const char *chars = PERF_IP_FLAG_CHARS;
const int n = strlen(PERF_IP_FLAG_CHARS);
char str[33];
int i, pos = 0;
for (i = 0; i < n; i++, flags >>= 1) {
if (flags & 1)
str[pos++] = chars[i];
}
for (; i < 32; i++, flags >>= 1) {
if (flags & 1)
str[pos++] = '?';
}
str[pos] = 0;
printf(" %-4s ", str);
}
static void process_event(union perf_event *event, struct perf_sample *sample,
struct perf_evsel *evsel, struct addr_location *al)
{
@ -464,6 +500,9 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
printf("%s: ", evname ? evname : "[unknown]");
}
if (print_flags)
print_sample_flags(sample->flags);
if (is_bts_event(attr)) {
print_sample_bts(event, sample, evsel, thread, al);
return;
@ -999,12 +1038,15 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
}
}
tok = strtok(tok, ",");
while (tok) {
for (tok = strtok(tok, ","); tok; tok = strtok(NULL, ",")) {
for (i = 0; i < imax; ++i) {
if (strcmp(tok, all_output_options[i].str) == 0)
break;
}
if (i == imax && strcmp(tok, "flags") == 0) {
print_flags = true;
continue;
}
if (i == imax) {
fprintf(stderr, "Invalid field requested.\n");
rc = -EINVAL;
@ -1032,8 +1074,6 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
}
output[type].fields |= all_output_options[i].field;
}
tok = strtok(NULL, ",");
}
if (type >= 0) {
@ -1497,6 +1537,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
struct itrace_synth_opts itrace_synth_opts = { .set = false, };
char *script_path = NULL;
const char **__argv;
int i, j, err = 0;
@ -1511,6 +1552,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.attr = process_attr,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.auxtrace_error = perf_event__process_auxtrace_error,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@ -1549,7 +1594,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"comma separated output fields prepend with 'type:'. "
"Valid types: hw,sw,trace,raw. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,period", parse_output_fields),
"addr,symoff,period,flags", parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@ -1570,6 +1615,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
"Show the mmap events"),
OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options",
itrace_parse_synth_opts),
OPT_END()
};
const char * const script_subcommands[] = { "record", "report", NULL };
@ -1765,6 +1813,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
script.session = session;
session->itrace_synth_opts = &itrace_synth_opts;
if (cpu_list) {
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)

View File

@ -247,21 +247,50 @@ out_free:
return -1;
}
enum {
CTX_BIT_USER = 1 << 0,
CTX_BIT_KERNEL = 1 << 1,
CTX_BIT_HV = 1 << 2,
CTX_BIT_HOST = 1 << 3,
CTX_BIT_IDLE = 1 << 4,
CTX_BIT_MAX = 1 << 5,
};
#define NUM_CTX CTX_BIT_MAX
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_stats[MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
static struct stats runtime_branches_stats[MAX_NR_CPUS];
static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_in_tx_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats walltime_nsecs_stats;
static struct stats runtime_transaction_stats[MAX_NR_CPUS];
static struct stats runtime_elision_stats[MAX_NR_CPUS];
static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
static int evsel_context(struct perf_evsel *evsel)
{
int ctx = 0;
if (evsel->attr.exclude_kernel)
ctx |= CTX_BIT_KERNEL;
if (evsel->attr.exclude_user)
ctx |= CTX_BIT_USER;
if (evsel->attr.exclude_hv)
ctx |= CTX_BIT_HV;
if (evsel->attr.exclude_host)
ctx |= CTX_BIT_HOST;
if (evsel->attr.exclude_idle)
ctx |= CTX_BIT_IDLE;
return ctx;
}
static void perf_stat__reset_stats(struct perf_evlist *evlist)
{
@ -356,37 +385,39 @@ static struct perf_evsel *nth_evsel(int n)
static void update_shadow_stats(struct perf_evsel *counter, u64 *count,
int cpu)
{
int ctx = evsel_context(counter);
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
update_stats(&runtime_nsecs_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[cpu], count[0]);
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX)))
update_stats(&runtime_cycles_in_tx_stats[cpu], count[0]);
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START)))
update_stats(&runtime_transaction_stats[cpu], count[0]);
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (transaction_run &&
perf_evsel__cmp(counter, nth_evsel(T_ELISION_START)))
update_stats(&runtime_elision_stats[cpu], count[0]);
update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_stats(&runtime_stalled_cycles_front_stats[cpu], count[0]);
update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
update_stats(&runtime_stalled_cycles_back_stats[cpu], count[0]);
update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
update_stats(&runtime_branches_stats[cpu], count[0]);
update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
update_stats(&runtime_cacherefs_stats[cpu], count[0]);
update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
update_stats(&runtime_l1_dcache_stats[cpu], count[0]);
update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
update_stats(&runtime_l1_icache_stats[cpu], count[0]);
update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
update_stats(&runtime_ll_cache_stats[cpu], count[0]);
update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
update_stats(&runtime_dtlb_cache_stats[cpu], count[0]);
update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
update_stats(&runtime_itlb_cache_stats[cpu], count[0]);
update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
}
static void zero_per_pkg(struct perf_evsel *counter)
@ -908,8 +939,9 @@ static void print_stalled_cycles_frontend(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_cycles_stats[cpu]);
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -927,8 +959,9 @@ static void print_stalled_cycles_backend(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_cycles_stats[cpu]);
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -946,8 +979,9 @@ static void print_branch_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_branches_stats[cpu]);
total = avg_stats(&runtime_branches_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -965,8 +999,9 @@ static void print_l1_dcache_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_l1_dcache_stats[cpu]);
total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -984,8 +1019,9 @@ static void print_l1_icache_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_l1_icache_stats[cpu]);
total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -1003,8 +1039,9 @@ static void print_dtlb_cache_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -1022,8 +1059,9 @@ static void print_itlb_cache_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_itlb_cache_stats[cpu]);
total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -1041,8 +1079,9 @@ static void print_ll_cache_misses(int cpu,
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
total = avg_stats(&runtime_ll_cache_stats[cpu]);
total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
if (total)
ratio = avg / total * 100.0;
@ -1060,6 +1099,7 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
double sc = evsel->scale;
const char *fmt;
int cpu = cpu_map__id_to_cpu(id);
int ctx = evsel_context(evsel);
if (csv_output) {
fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
@ -1091,15 +1131,15 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
return;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = avg_stats(&runtime_cycles_stats[cpu]);
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total) {
ratio = avg / total;
fprintf(output, " # %5.2f insns per cycle ", ratio);
} else {
fprintf(output, " ");
}
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
if (total && avg) {
ratio = total / avg;
@ -1110,46 +1150,46 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
runtime_branches_stats[cpu].n != 0) {
runtime_branches_stats[ctx][cpu].n != 0) {
print_branch_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_l1_dcache_stats[cpu].n != 0) {
runtime_l1_dcache_stats[ctx][cpu].n != 0) {
print_l1_dcache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_l1_icache_stats[cpu].n != 0) {
runtime_l1_icache_stats[ctx][cpu].n != 0) {
print_l1_icache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_dtlb_cache_stats[cpu].n != 0) {
runtime_dtlb_cache_stats[ctx][cpu].n != 0) {
print_dtlb_cache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_itlb_cache_stats[cpu].n != 0) {
runtime_itlb_cache_stats[ctx][cpu].n != 0) {
print_itlb_cache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_ll_cache_stats[cpu].n != 0) {
runtime_ll_cache_stats[ctx][cpu].n != 0) {
print_ll_cache_misses(cpu, evsel, avg);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
runtime_cacherefs_stats[cpu].n != 0) {
total = avg_stats(&runtime_cacherefs_stats[cpu]);
runtime_cacherefs_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
if (total)
ratio = avg * 100 / total;
@ -1171,15 +1211,15 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
}
} else if (transaction_run &&
perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) {
total = avg_stats(&runtime_cycles_stats[cpu]);
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total)
fprintf(output,
" # %5.2f%% transactional cycles ",
100.0 * (avg / total));
} else if (transaction_run &&
perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) {
total = avg_stats(&runtime_cycles_stats[cpu]);
total2 = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (total2 < avg)
total2 = avg;
if (total)
@ -1189,8 +1229,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
} else if (transaction_run &&
perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) &&
avg > 0 &&
runtime_cycles_in_tx_stats[cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (total)
ratio = total / avg;
@ -1199,8 +1239,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
} else if (transaction_run &&
perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) &&
avg > 0 &&
runtime_cycles_in_tx_stats[cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (total)
ratio = total / avg;
@ -1541,7 +1581,7 @@ static int setup_events(const char * const *attrs, unsigned len)
unsigned i;
for (i = 0; i < len; i++) {
if (parse_events(evsel_list, attrs[i]))
if (parse_events(evsel_list, attrs[i], NULL))
return -1;
}
return 0;

View File

@ -2660,16 +2660,15 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN(0, "comm", &trace.show_comm,
"show the thread COMM next to its id"),
OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
"list of events to trace"),
OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
"trace events on existing process id"),
OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
OPT_CALLBACK(0, "filter-pids", &trace, "float",
"show only events with duration > N.M ms", trace__set_filter_pids),
OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
"pids to filter (by the kernel)", trace__set_filter_pids),
OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",

View File

@ -610,6 +610,11 @@ ifdef LIBBABELTRACE
endif
endif
ifndef NO_AUXTRACE
$(call detected,CONFIG_AUXTRACE)
CFLAGS += -DHAVE_AUXTRACE_SUPPORT
endif
# Among the variables below, these:
# perfexecdir
# template_dir

View File

@ -54,12 +54,17 @@ struct record_opts {
bool period;
bool sample_intr_regs;
bool running_time;
bool full_auxtrace;
bool auxtrace_snapshot_mode;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
unsigned int user_freq;
u64 branch_stack;
u64 default_interval;
u64 user_interval;
size_t auxtrace_snapshot_size;
const char *auxtrace_snapshot_opts;
bool sample_transaction;
unsigned initial_delay;
bool use_clockid;

View File

@ -482,7 +482,7 @@ static int do_test_code_reading(bool try_kcore)
else
str = "cycles";
pr_debug("Parsing event '%s'\n", str);
ret = parse_events(evlist, str);
ret = parse_events(evlist, str, NULL);
if (ret < 0) {
pr_debug("parse_events failed\n");
goto out_err;

View File

@ -23,7 +23,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
__perf_evsel__hw_cache_type_op_res_name(type, op, i,
name, sizeof(name));
err = parse_events(evlist, name);
err = parse_events(evlist, name, NULL);
if (err)
ret = err;
}
@ -71,7 +71,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
return -ENOMEM;
for (i = 0; i < nr_names; ++i) {
err = parse_events(evlist, names[i]);
err = parse_events(evlist, names[i], NULL);
if (err) {
pr_debug("failed to parse event '%s', err %d\n",
names[i], err);

View File

@ -695,7 +695,7 @@ int test__hists_cumulate(void)
TEST_ASSERT_VAL("No memory", evlist);
err = parse_events(evlist, "cpu-clock");
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;

View File

@ -108,10 +108,10 @@ int test__hists_filter(void)
TEST_ASSERT_VAL("No memory", evlist);
err = parse_events(evlist, "cpu-clock");
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;
err = parse_events(evlist, "task-clock");
err = parse_events(evlist, "task-clock", NULL);
if (err)
goto out;

View File

@ -282,10 +282,10 @@ int test__hists_link(void)
if (evlist == NULL)
return -ENOMEM;
err = parse_events(evlist, "cpu-clock");
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;
err = parse_events(evlist, "task-clock");
err = parse_events(evlist, "task-clock", NULL);
if (err)
goto out;

View File

@ -590,7 +590,7 @@ int test__hists_output(void)
TEST_ASSERT_VAL("No memory", evlist);
err = parse_events(evlist, "cpu-clock");
err = parse_events(evlist, "cpu-clock", NULL);
if (err)
goto out;

View File

@ -78,8 +78,8 @@ int test__keep_tracking(void)
perf_evlist__set_maps(evlist, cpus, threads);
CHECK__(parse_events(evlist, "dummy:u"));
CHECK__(parse_events(evlist, "cycles:u"));
CHECK__(parse_events(evlist, "dummy:u", NULL));
CHECK__(parse_events(evlist, "cycles:u", NULL));
perf_evlist__config(evlist, &opts);

View File

@ -32,6 +32,7 @@ make_no_backtrace := NO_BACKTRACE=1
make_no_libnuma := NO_LIBNUMA=1
make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_tags := tags
make_cscope := cscope
make_help := help
@ -52,7 +53,7 @@ make_static := LDFLAGS=-static
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1
# $(run) contains all available tests
run := make_pure
@ -74,6 +75,7 @@ run += make_no_backtrace
run += make_no_libnuma
run += make_no_libaudit
run += make_no_libbionic
run += make_no_auxtrace
run += make_help
run += make_doc
run += make_perf_o
@ -223,7 +225,19 @@ tarpkg:
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1
all: $(run) $(run_O) tarpkg
make_kernelsrc:
@echo " - make -C <kernelsrc> tools/perf"
$(call clean); \
(make -C ../.. tools/perf) > $@ 2>&1 && \
test -x perf && rm -f $@ || (cat $@ ; false)
make_kernelsrc_tools:
@echo " - make -C <kernelsrc>/tools perf"
$(call clean); \
(make -C ../../tools perf) > $@ 2>&1 && \
test -x perf && rm -f $@ || (cat $@ ; false)
all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
@echo OK
out: $(run_O)

View File

@ -1571,7 +1571,7 @@ static int test_event(struct evlist_test *e)
if (evlist == NULL)
return -ENOMEM;
ret = parse_events(evlist, e->name);
ret = parse_events(evlist, e->name, NULL);
if (ret) {
pr_debug("failed to parse event '%s', err %d\n",
e->name, ret);

View File

@ -68,7 +68,7 @@ int test__perf_time_to_tsc(void)
perf_evlist__set_maps(evlist, cpus, threads);
CHECK__(parse_events(evlist, "cycles:u"));
CHECK__(parse_events(evlist, "cycles:u", NULL));
perf_evlist__config(evlist, &opts);

View File

@ -152,7 +152,8 @@ int test__pmu(void)
if (ret)
break;
ret = perf_pmu__config_terms(&formats, &attr, terms, false);
ret = perf_pmu__config_terms(&formats, &attr, terms,
false, NULL);
if (ret)
break;

View File

@ -347,7 +347,7 @@ int test__switch_tracking(void)
perf_evlist__set_maps(evlist, cpus, threads);
/* First event */
err = parse_events(evlist, "cpu-clock:u");
err = parse_events(evlist, "cpu-clock:u", NULL);
if (err) {
pr_debug("Failed to parse event dummy:u\n");
goto out_err;
@ -356,7 +356,7 @@ int test__switch_tracking(void)
cpu_clocks_evsel = perf_evlist__last(evlist);
/* Second event */
err = parse_events(evlist, "cycles:u");
err = parse_events(evlist, "cycles:u", NULL);
if (err) {
pr_debug("Failed to parse event cycles:u\n");
goto out_err;
@ -371,7 +371,7 @@ int test__switch_tracking(void)
goto out;
}
err = parse_events(evlist, sched_switch);
err = parse_events(evlist, sched_switch, NULL);
if (err) {
pr_debug("Failed to parse event %s\n", sched_switch);
goto out_err;
@ -401,7 +401,7 @@ int test__switch_tracking(void)
perf_evsel__set_sample_bit(cycles_evsel, TIME);
/* Fourth event */
err = parse_events(evlist, "dummy:u");
err = parse_events(evlist, "dummy:u", NULL);
if (err) {
pr_debug("Failed to parse event dummy:u\n");
goto out_err;

View File

@ -25,6 +25,9 @@ struct hist_browser {
struct hists *hists;
struct hist_entry *he_selection;
struct map_symbol *selection;
struct hist_browser_timer *hbt;
struct pstack *pstack;
struct perf_session_env *env;
int print_seq;
bool show_dso;
bool show_headers;
@ -60,7 +63,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hist_entry *he =
rb_entry(nd, struct hist_entry, rb_node);
if (he->ms.unfolded)
if (he->unfolded)
unfolded_rows += he->nr_rows;
}
return unfolded_rows;
@ -136,24 +139,19 @@ static char tree__folded_sign(bool unfolded)
return unfolded ? '-' : '+';
}
static char map_symbol__folded(const struct map_symbol *ms)
{
return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
}
static char hist_entry__folded(const struct hist_entry *he)
{
return map_symbol__folded(&he->ms);
return he->has_children ? tree__folded_sign(he->unfolded) : ' ';
}
static char callchain_list__folded(const struct callchain_list *cl)
{
return map_symbol__folded(&cl->ms);
return cl->has_children ? tree__folded_sign(cl->unfolded) : ' ';
}
static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
{
ms->unfolded = unfold ? ms->has_children : false;
cl->unfolded = unfold ? cl->has_children : false;
}
static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
@ -189,7 +187,7 @@ static int callchain_node__count_rows(struct callchain_node *node)
list_for_each_entry(chain, &node->val, list) {
++n;
unfolded = chain->ms.unfolded;
unfolded = chain->unfolded;
}
if (unfolded)
@ -211,15 +209,27 @@ static int callchain__count_rows(struct rb_root *chain)
return n;
}
static bool map_symbol__toggle_fold(struct map_symbol *ms)
static bool hist_entry__toggle_fold(struct hist_entry *he)
{
if (!ms)
if (!he)
return false;
if (!ms->has_children)
if (!he->has_children)
return false;
ms->unfolded = !ms->unfolded;
he->unfolded = !he->unfolded;
return true;
}
static bool callchain_list__toggle_fold(struct callchain_list *cl)
{
if (!cl)
return false;
if (!cl->has_children)
return false;
cl->unfolded = !cl->unfolded;
return true;
}
@ -235,10 +245,10 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *no
list_for_each_entry(chain, &child->val, list) {
if (first) {
first = false;
chain->ms.has_children = chain->list.next != &child->val ||
chain->has_children = chain->list.next != &child->val ||
!RB_EMPTY_ROOT(&child->rb_root);
} else
chain->ms.has_children = chain->list.next == &child->val &&
chain->has_children = chain->list.next == &child->val &&
!RB_EMPTY_ROOT(&child->rb_root);
}
@ -252,11 +262,11 @@ static void callchain_node__init_have_children(struct callchain_node *node,
struct callchain_list *chain;
chain = list_entry(node->val.next, struct callchain_list, list);
chain->ms.has_children = has_sibling;
chain->has_children = has_sibling;
if (!list_empty(&node->val)) {
chain = list_entry(node->val.prev, struct callchain_list, list);
chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
}
callchain_node__init_have_children_rb_tree(node);
@ -276,7 +286,7 @@ static void callchain__init_have_children(struct rb_root *root)
static void hist_entry__init_have_children(struct hist_entry *he)
{
if (!he->init_have_children) {
he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
he->init_have_children = true;
}
@ -284,14 +294,22 @@ static void hist_entry__init_have_children(struct hist_entry *he)
static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
if (map_symbol__toggle_fold(browser->selection)) {
struct hist_entry *he = browser->he_selection;
struct hist_entry *he = browser->he_selection;
struct map_symbol *ms = browser->selection;
struct callchain_list *cl = container_of(ms, struct callchain_list, ms);
bool has_children;
if (ms == &he->ms)
has_children = hist_entry__toggle_fold(he);
else
has_children = callchain_list__toggle_fold(cl);
if (has_children) {
hist_entry__init_have_children(he);
browser->b.nr_entries -= he->nr_rows;
browser->nr_callchain_rows -= he->nr_rows;
if (he->ms.unfolded)
if (he->unfolded)
he->nr_rows = callchain__count_rows(&he->sorted_chain);
else
he->nr_rows = 0;
@ -318,8 +336,8 @@ static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool
list_for_each_entry(chain, &child->val, list) {
++n;
map_symbol__set_folding(&chain->ms, unfold);
has_children = chain->ms.has_children;
callchain_list__set_folding(chain, unfold);
has_children = chain->has_children;
}
if (has_children)
@ -337,8 +355,8 @@ static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
list_for_each_entry(chain, &node->val, list) {
++n;
map_symbol__set_folding(&chain->ms, unfold);
has_children = chain->ms.has_children;
callchain_list__set_folding(chain, unfold);
has_children = chain->has_children;
}
if (has_children)
@ -363,9 +381,9 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
{
hist_entry__init_have_children(he);
map_symbol__set_folding(&he->ms, unfold);
he->unfolded = unfold ? he->has_children : false;
if (he->ms.has_children) {
if (he->has_children) {
int n = callchain__set_folding(&he->sorted_chain, unfold);
he->nr_rows = unfold ? n : 0;
} else
@ -406,11 +424,11 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
"Or reduce the sampling frequency.");
}
static int hist_browser__run(struct hist_browser *browser,
struct hist_browser_timer *hbt)
static int hist_browser__run(struct hist_browser *browser)
{
int key;
char title[160];
struct hist_browser_timer *hbt = browser->hbt;
int delay_secs = hbt ? hbt->refresh : 0;
browser->b.entries = &browser->hists->entries;
@ -1016,7 +1034,7 @@ do_offset:
if (offset > 0) {
do {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded) {
if (h->unfolded) {
u16 remaining = h->nr_rows - h->row_offset;
if (offset > remaining) {
offset -= remaining;
@ -1037,7 +1055,7 @@ do_offset:
} else if (offset < 0) {
while (1) {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded) {
if (h->unfolded) {
if (first) {
if (-offset > h->row_offset) {
offset += h->row_offset;
@ -1074,7 +1092,7 @@ do_offset:
* row_offset at its last entry.
*/
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded)
if (h->unfolded)
h->row_offset = h->nr_rows;
break;
}
@ -1195,7 +1213,9 @@ static int hist_browser__dump(struct hist_browser *browser)
return 0;
}
static struct hist_browser *hist_browser__new(struct hists *hists)
static struct hist_browser *hist_browser__new(struct hists *hists,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
struct hist_browser *browser = zalloc(sizeof(*browser));
@ -1206,6 +1226,8 @@ static struct hist_browser *hist_browser__new(struct hists *hists)
browser->b.seek = ui_browser__hists_seek;
browser->b.use_navkeypressed = true;
browser->show_headers = symbol_conf.show_hist_headers;
browser->hbt = hbt;
browser->env = env;
}
return browser;
@ -1395,6 +1417,257 @@ close_file_and_continue:
return ret;
}
struct popup_action {
struct thread *thread;
struct dso *dso;
struct map_symbol ms;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
static int
do_annotate(struct hist_browser *browser, struct popup_action *act)
{
struct perf_evsel *evsel;
struct annotation *notes;
struct hist_entry *he;
int err;
if (!objdump_path && perf_session_env__lookup_objdump(browser->env))
return 0;
notes = symbol__annotation(act->ms.sym);
if (!notes->src)
return 0;
evsel = hists_to_evsel(browser->hists);
err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
he = hist_browser__selected_entry(browser);
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
*/
if ((err == 'q' || err == CTRL('c')) && he->branch_info)
return 1;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
if (err)
ui_browser__handle_resize(&browser->b);
return 0;
}
static int
add_annotate_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct map *map, struct symbol *sym)
{
if (sym == NULL || map->dso->annotate_warned)
return 0;
if (asprintf(optstr, "Annotate %s", sym->name) < 0)
return 0;
act->ms.map = map;
act->ms.sym = sym;
act->fn = do_annotate;
return 1;
}
static int
do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
{
struct thread *thread = act->thread;
if (browser->hists->thread_filter) {
pstack__remove(browser->pstack, &browser->hists->thread_filter);
perf_hpp__set_elide(HISTC_THREAD, false);
thread__zput(browser->hists->thread_filter);
ui_helpline__pop();
} else {
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
browser->hists->thread_filter = thread__get(thread);
perf_hpp__set_elide(HISTC_THREAD, false);
pstack__push(browser->pstack, &browser->hists->thread_filter);
}
hists__filter_by_thread(browser->hists);
hist_browser__reset(browser);
return 0;
}
static int
add_thread_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct thread *thread)
{
if (thread == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s(%d) thread",
browser->hists->thread_filter ? "out of" : "into",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid) < 0)
return 0;
act->thread = thread;
act->fn = do_zoom_thread;
return 1;
}
static int
do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
{
struct dso *dso = act->dso;
if (browser->hists->dso_filter) {
pstack__remove(browser->pstack, &browser->hists->dso_filter);
perf_hpp__set_elide(HISTC_DSO, false);
browser->hists->dso_filter = NULL;
ui_helpline__pop();
} else {
if (dso == NULL)
return 0;
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
dso->kernel ? "the Kernel" : dso->short_name);
browser->hists->dso_filter = dso;
perf_hpp__set_elide(HISTC_DSO, true);
pstack__push(browser->pstack, &browser->hists->dso_filter);
}
hists__filter_by_dso(browser->hists);
hist_browser__reset(browser);
return 0;
}
static int
add_dso_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct dso *dso)
{
if (dso == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s DSO",
browser->hists->dso_filter ? "out of" : "into",
dso->kernel ? "the Kernel" : dso->short_name) < 0)
return 0;
act->dso = dso;
act->fn = do_zoom_dso;
return 1;
}
static int
do_browse_map(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
map__browse(act->ms.map);
return 0;
}
static int
add_map_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr, struct map *map)
{
if (map == NULL)
return 0;
if (asprintf(optstr, "Browse map details") < 0)
return 0;
act->ms.map = map;
act->fn = do_browse_map;
return 1;
}
static int
do_run_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
char script_opt[64];
memset(script_opt, 0, sizeof(script_opt));
if (act->thread) {
scnprintf(script_opt, sizeof(script_opt), " -c %s ",
thread__comm_str(act->thread));
} else if (act->ms.sym) {
scnprintf(script_opt, sizeof(script_opt), " -S %s ",
act->ms.sym->name);
}
script_browse(script_opt);
return 0;
}
static int
add_script_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct thread *thread, struct symbol *sym)
{
if (thread) {
if (asprintf(optstr, "Run scripts for samples of thread [%s]",
thread__comm_str(thread)) < 0)
return 0;
} else if (sym) {
if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
sym->name) < 0)
return 0;
} else {
if (asprintf(optstr, "Run scripts for all samples") < 0)
return 0;
}
act->thread = thread;
act->ms.sym = sym;
act->fn = do_run_script;
return 1;
}
static int
do_switch_data(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
if (switch_data_file()) {
ui__warning("Won't switch the data files due to\n"
"no valid data file get selected!\n");
return 0;
}
return K_SWITCH_INPUT_DATA;
}
static int
add_switch_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr)
{
if (!is_report_browser(browser->hbt))
return 0;
if (asprintf(optstr, "Switch to another data file in PWD") < 0)
return 0;
act->fn = do_switch_data;
return 1;
}
static int
do_exit_browser(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
return 0;
}
static int
add_exit_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr)
{
if (asprintf(optstr, "Exit") < 0)
return 0;
act->fn = do_exit_browser;
return 1;
}
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
@ -1421,14 +1694,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
struct perf_session_env *env)
{
struct hists *hists = evsel__hists(evsel);
struct hist_browser *browser = hist_browser__new(hists);
struct hist_browser *browser = hist_browser__new(hists, hbt, env);
struct branch_info *bi;
struct pstack *fstack;
char *options[16];
#define MAX_OPTIONS 16
char *options[MAX_OPTIONS];
struct popup_action actions[MAX_OPTIONS];
int nr_options = 0;
int key = -1;
char buf[64];
char script_opt[64];
int delay_secs = hbt ? hbt->refresh : 0;
struct perf_hpp_fmt *fmt;
@ -1473,13 +1746,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
hist_browser__update_nr_entries(browser);
}
fstack = pstack__new(2);
if (fstack == NULL)
browser->pstack = pstack__new(2);
if (browser->pstack == NULL)
goto out;
ui_helpline__push(helpline);
memset(options, 0, sizeof(options));
memset(actions, 0, sizeof(actions));
perf_hpp__for_each_format(fmt)
perf_hpp__reset_width(fmt, hists);
@ -1489,16 +1763,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
while (1) {
struct thread *thread = NULL;
const struct dso *dso = NULL;
int choice = 0,
annotate = -2, zoom_dso = -2, zoom_thread = -2,
annotate_f = -2, annotate_t = -2, browse_map = -2;
int scripts_comm = -2, scripts_symbol = -2,
scripts_all = -2, switch_data = -2;
struct dso *dso = NULL;
int choice = 0;
nr_options = 0;
key = hist_browser__run(browser, hbt);
key = hist_browser__run(browser);
if (browser->he_selection != NULL) {
thread = hist_browser__selected_thread(browser);
@ -1526,17 +1796,25 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
browser->selection->sym == NULL ||
browser->selection->map->dso->annotate_warned)
continue;
goto do_annotate;
actions->ms.map = browser->selection->map;
actions->ms.sym = browser->selection->sym;
do_annotate(browser, actions);
continue;
case 'P':
hist_browser__dump(browser);
continue;
case 'd':
goto zoom_dso;
actions->dso = dso;
do_zoom_dso(browser, actions);
continue;
case 'V':
browser->show_dso = !browser->show_dso;
continue;
case 't':
goto zoom_thread;
actions->thread = thread;
do_zoom_thread(browser, actions);
continue;
case '/':
if (ui_browser__input_window("Symbol to show",
"Please enter the name of symbol you want to see",
@ -1548,12 +1826,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
}
continue;
case 'r':
if (is_report_browser(hbt))
goto do_scripts;
if (is_report_browser(hbt)) {
actions->thread = NULL;
actions->ms.sym = NULL;
do_run_script(browser, actions);
}
continue;
case 's':
if (is_report_browser(hbt))
goto do_data_switch;
if (is_report_browser(hbt)) {
key = do_switch_data(browser, actions);
if (key == K_SWITCH_INPUT_DATA)
goto out_free_stack;
}
continue;
case 'i':
/* env->arch is NULL for live-mode (i.e. perf top) */
@ -1583,7 +1867,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
case K_LEFT: {
const void *top;
if (pstack__empty(fstack)) {
if (pstack__empty(browser->pstack)) {
/*
* Go back to the perf_evsel_menu__run or other user
*/
@ -1591,11 +1875,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
goto out_free_stack;
continue;
}
top = pstack__pop(fstack);
if (top == &browser->hists->dso_filter)
goto zoom_out_dso;
top = pstack__peek(browser->pstack);
if (top == &browser->hists->dso_filter) {
/*
* No need to set actions->dso here since
* it's just to remove the current filter.
* Ditto for thread below.
*/
do_zoom_dso(browser, actions);
}
if (top == &browser->hists->thread_filter)
goto zoom_out_thread;
do_zoom_thread(browser, actions);
continue;
}
case K_ESC:
@ -1623,196 +1913,71 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
if (bi == NULL)
goto skip_annotation;
if (bi->from.sym != NULL &&
!bi->from.map->dso->annotate_warned &&
asprintf(&options[nr_options], "Annotate %s", bi->from.sym->name) > 0) {
annotate_f = nr_options++;
}
if (bi->to.sym != NULL &&
!bi->to.map->dso->annotate_warned &&
(bi->to.sym != bi->from.sym ||
bi->to.map->dso != bi->from.map->dso) &&
asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) {
annotate_t = nr_options++;
}
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
bi->from.map,
bi->from.sym);
if (bi->to.sym != bi->from.sym)
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
bi->to.map,
bi->to.sym);
} else {
if (browser->selection->sym != NULL &&
!browser->selection->map->dso->annotate_warned) {
struct annotation *notes;
notes = symbol__annotation(browser->selection->sym);
if (notes->src &&
asprintf(&options[nr_options], "Annotate %s",
browser->selection->sym->name) > 0) {
annotate = nr_options++;
}
}
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
browser->selection->map,
browser->selection->sym);
}
skip_annotation:
if (thread != NULL &&
asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
(browser->hists->thread_filter ? "out of" : "into"),
(thread->comm_set ? thread__comm_str(thread) : ""),
thread->tid) > 0)
zoom_thread = nr_options++;
if (dso != NULL &&
asprintf(&options[nr_options], "Zoom %s %s DSO",
(browser->hists->dso_filter ? "out of" : "into"),
(dso->kernel ? "the Kernel" : dso->short_name)) > 0)
zoom_dso = nr_options++;
if (browser->selection != NULL &&
browser->selection->map != NULL &&
asprintf(&options[nr_options], "Browse map details") > 0)
browse_map = nr_options++;
nr_options += add_thread_opt(browser, &actions[nr_options],
&options[nr_options], thread);
nr_options += add_dso_opt(browser, &actions[nr_options],
&options[nr_options], dso);
nr_options += add_map_opt(browser, &actions[nr_options],
&options[nr_options],
browser->selection->map);
/* perf script support */
if (browser->he_selection) {
struct symbol *sym;
if (asprintf(&options[nr_options], "Run scripts for samples of thread [%s]",
thread__comm_str(browser->he_selection->thread)) > 0)
scripts_comm = nr_options++;
sym = browser->he_selection->ms.sym;
if (sym && sym->namelen &&
asprintf(&options[nr_options], "Run scripts for samples of symbol [%s]",
sym->name) > 0)
scripts_symbol = nr_options++;
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
thread, NULL);
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
NULL, browser->selection->sym);
}
if (asprintf(&options[nr_options], "Run scripts for all samples") > 0)
scripts_all = nr_options++;
if (is_report_browser(hbt) && asprintf(&options[nr_options],
"Switch to another data file in PWD") > 0)
switch_data = nr_options++;
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
add_exit_option:
options[nr_options++] = (char *)"Exit";
retry_popup_menu:
choice = ui__popup_menu(nr_options, options);
nr_options += add_exit_opt(browser, &actions[nr_options],
&options[nr_options]);
if (choice == nr_options - 1)
break;
do {
struct popup_action *act;
if (choice == -1) {
free_popup_options(options, nr_options - 1);
continue;
}
if (choice == annotate || choice == annotate_t || choice == annotate_f) {
struct hist_entry *he;
struct annotation *notes;
struct map_symbol ms;
int err;
do_annotate:
if (!objdump_path && perf_session_env__lookup_objdump(env))
continue;
he = hist_browser__selected_entry(browser);
if (he == NULL)
continue;
if (choice == annotate_f) {
ms.map = he->branch_info->from.map;
ms.sym = he->branch_info->from.sym;
} else if (choice == annotate_t) {
ms.map = he->branch_info->to.map;
ms.sym = he->branch_info->to.sym;
} else {
ms = *browser->selection;
}
notes = symbol__annotation(ms.sym);
if (!notes->src)
continue;
err = map_symbol__tui_annotate(&ms, evsel, hbt);
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
*/
if ((err == 'q' || err == CTRL('c'))
&& annotate_t != -2 && annotate_f != -2)
goto retry_popup_menu;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
if (err)
ui_browser__handle_resize(&browser->b);
} else if (choice == browse_map)
map__browse(browser->selection->map);
else if (choice == zoom_dso) {
zoom_dso:
if (browser->hists->dso_filter) {
pstack__remove(fstack, &browser->hists->dso_filter);
zoom_out_dso:
ui_helpline__pop();
browser->hists->dso_filter = NULL;
perf_hpp__set_elide(HISTC_DSO, false);
} else {
if (dso == NULL)
continue;
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
dso->kernel ? "the Kernel" : dso->short_name);
browser->hists->dso_filter = dso;
perf_hpp__set_elide(HISTC_DSO, true);
pstack__push(fstack, &browser->hists->dso_filter);
}
hists__filter_by_dso(hists);
hist_browser__reset(browser);
} else if (choice == zoom_thread) {
zoom_thread:
if (browser->hists->thread_filter) {
pstack__remove(fstack, &browser->hists->thread_filter);
zoom_out_thread:
ui_helpline__pop();
thread__zput(browser->hists->thread_filter);
perf_hpp__set_elide(HISTC_THREAD, false);
} else {
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
browser->hists->thread_filter = thread__get(thread);
perf_hpp__set_elide(HISTC_THREAD, false);
pstack__push(fstack, &browser->hists->thread_filter);
}
hists__filter_by_thread(hists);
hist_browser__reset(browser);
}
/* perf scripts support */
else if (choice == scripts_all || choice == scripts_comm ||
choice == scripts_symbol) {
do_scripts:
memset(script_opt, 0, 64);
if (choice == scripts_comm)
sprintf(script_opt, " -c %s ", thread__comm_str(browser->he_selection->thread));
if (choice == scripts_symbol)
sprintf(script_opt, " -S %s ", browser->he_selection->ms.sym->name);
script_browse(script_opt);
}
/* Switch to another data file */
else if (choice == switch_data) {
do_data_switch:
if (!switch_data_file()) {
key = K_SWITCH_INPUT_DATA;
choice = ui__popup_menu(nr_options, options);
if (choice == -1 || choice >= nr_options)
break;
} else
ui__warning("Won't switch the data files due to\n"
"no valid data file get selected!\n");
}
act = &actions[choice];
key = act->fn(browser, act);
} while (key == 1);
if (key == K_SWITCH_INPUT_DATA)
break;
}
out_free_stack:
pstack__delete(fstack);
pstack__delete(browser->pstack);
out:
hist_browser__delete(browser);
free_popup_options(options, nr_options - 1);
free_popup_options(options, MAX_OPTIONS);
return key;
}

View File

@ -74,6 +74,7 @@ libperf-y += data.o
libperf-$(CONFIG_X86) += tsc.o
libperf-y += cloexec.o
libperf-y += thread-stack.o
libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_LIBELF) += symbol-elf.o
libperf-$(CONFIG_LIBELF) += probe-event.o
@ -117,7 +118,7 @@ $(OUTPUT)util/pmu-bison.c: util/pmu.y
CFLAGS_parse-events-flex.o += -w
CFLAGS_pmu-flex.o += -w
CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -w
CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,643 @@
/*
* auxtrace.h: AUX area trace support
* Copyright (c) 2013-2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef __PERF_AUXTRACE_H
#define __PERF_AUXTRACE_H
#include <sys/types.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include "../perf.h"
#include "event.h"
#include "session.h"
#include "debug.h"
union perf_event;
struct perf_session;
struct perf_evlist;
struct perf_tool;
struct option;
struct record_opts;
struct auxtrace_info_event;
struct events_stats;
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
};
enum itrace_period_type {
PERF_ITRACE_PERIOD_INSTRUCTIONS,
PERF_ITRACE_PERIOD_TICKS,
PERF_ITRACE_PERIOD_NANOSECS,
};
/**
* struct itrace_synth_opts - AUX area tracing synthesis options.
* @set: indicates whether or not options have been set
* @inject: indicates the event (not just the sample) must be fully synthesized
* because 'perf inject' will write it out
* @instructions: whether to synthesize 'instructions' events
* @branches: whether to synthesize 'branches' events
* @transactions: whether to synthesize events for transactions
* @errors: whether to synthesize decoder error events
* @dont_decode: whether to skip decoding entirely
* @log: write a decoding log
* @calls: limit branch samples to calls (can be combined with @returns)
* @returns: limit branch samples to returns (can be combined with @calls)
* @callchain: add callchain to 'instructions' events
* @callchain_sz: maximum callchain size
* @period: 'instructions' events period
* @period_type: 'instructions' events period type
*/
struct itrace_synth_opts {
bool set;
bool inject;
bool instructions;
bool branches;
bool transactions;
bool errors;
bool dont_decode;
bool log;
bool calls;
bool returns;
bool callchain;
unsigned int callchain_sz;
unsigned long long period;
enum itrace_period_type period_type;
};
/**
* struct auxtrace_index_entry - indexes a AUX area tracing event within a
* perf.data file.
* @file_offset: offset within the perf.data file
* @sz: size of the event
*/
struct auxtrace_index_entry {
u64 file_offset;
u64 sz;
};
#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
/**
* struct auxtrace_index - index of AUX area tracing events within a perf.data
* file.
* @list: linking a number of arrays of entries
* @nr: number of entries
* @entries: array of entries
*/
struct auxtrace_index {
struct list_head list;
size_t nr;
struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
};
/**
* struct auxtrace - session callbacks to allow AUX area data decoding.
* @process_event: lets the decoder see all session events
* @flush_events: process any remaining data
* @free_events: free resources associated with event processing
* @free: free resources associated with the session
*/
struct auxtrace {
int (*process_event)(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool);
int (*process_auxtrace_event)(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool);
int (*flush_events)(struct perf_session *session,
struct perf_tool *tool);
void (*free_events)(struct perf_session *session);
void (*free)(struct perf_session *session);
};
/**
* struct auxtrace_buffer - a buffer containing AUX area tracing data.
* @list: buffers are queued in a list held by struct auxtrace_queue
* @size: size of the buffer in bytes
* @pid: in per-thread mode, the pid this buffer is associated with
* @tid: in per-thread mode, the tid this buffer is associated with
* @cpu: in per-cpu mode, the cpu this buffer is associated with
* @data: actual buffer data (can be null if the data has not been loaded)
* @data_offset: file offset at which the buffer can be read
* @mmap_addr: mmap address at which the buffer can be read
* @mmap_size: size of the mmap at @mmap_addr
* @data_needs_freeing: @data was malloc'd so free it when it is no longer
* needed
* @consecutive: the original data was split up and this buffer is consecutive
* to the previous buffer
* @offset: offset as determined by aux_head / aux_tail members of struct
* perf_event_mmap_page
* @reference: an implementation-specific reference determined when the data is
* recorded
* @buffer_nr: used to number each buffer
* @use_size: implementation actually only uses this number of bytes
* @use_data: implementation actually only uses data starting at this address
*/
struct auxtrace_buffer {
struct list_head list;
size_t size;
pid_t pid;
pid_t tid;
int cpu;
void *data;
off_t data_offset;
void *mmap_addr;
size_t mmap_size;
bool data_needs_freeing;
bool consecutive;
u64 offset;
u64 reference;
u64 buffer_nr;
size_t use_size;
void *use_data;
};
/**
* struct auxtrace_queue - a queue of AUX area tracing data buffers.
* @head: head of buffer list
* @tid: in per-thread mode, the tid this queue is associated with
* @cpu: in per-cpu mode, the cpu this queue is associated with
* @set: %true once this queue has been dedicated to a specific thread or cpu
* @priv: implementation-specific data
*/
struct auxtrace_queue {
struct list_head head;
pid_t tid;
int cpu;
bool set;
void *priv;
};
/**
* struct auxtrace_queues - an array of AUX area tracing queues.
* @queue_array: array of queues
* @nr_queues: number of queues
* @new_data: set whenever new data is queued
* @populated: queues have been fully populated using the auxtrace_index
* @next_buffer_nr: used to number each buffer
*/
struct auxtrace_queues {
struct auxtrace_queue *queue_array;
unsigned int nr_queues;
bool new_data;
bool populated;
u64 next_buffer_nr;
};
/**
* struct auxtrace_heap_item - element of struct auxtrace_heap.
* @queue_nr: queue number
* @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
* to be a timestamp
*/
struct auxtrace_heap_item {
unsigned int queue_nr;
u64 ordinal;
};
/**
* struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
* @heap_array: the heap
* @heap_cnt: the number of elements in the heap
* @heap_sz: maximum number of elements (grows as needed)
*/
struct auxtrace_heap {
struct auxtrace_heap_item *heap_array;
unsigned int heap_cnt;
unsigned int heap_sz;
};
/**
* struct auxtrace_mmap - records an mmap of the auxtrace buffer.
* @base: address of mapped area
* @userpg: pointer to buffer's perf_event_mmap_page
* @mask: %0 if @len is not a power of two, otherwise (@len - %1)
* @len: size of mapped area
* @prev: previous aux_head
* @idx: index of this mmap
* @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
* mmap) otherwise %0
* @cpu: cpu number for a per-cpu mmap otherwise %-1
*/
struct auxtrace_mmap {
void *base;
void *userpg;
size_t mask;
size_t len;
u64 prev;
int idx;
pid_t tid;
int cpu;
};
/**
* struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
* @mask: %0 if @len is not a power of two, otherwise (@len - %1)
* @offset: file offset of mapped area
* @len: size of mapped area
* @prot: mmap memory protection
* @idx: index of this mmap
* @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
* mmap) otherwise %0
* @cpu: cpu number for a per-cpu mmap otherwise %-1
*/
struct auxtrace_mmap_params {
size_t mask;
off_t offset;
size_t len;
int prot;
int idx;
pid_t tid;
int cpu;
};
/**
* struct auxtrace_record - callbacks for recording AUX area data.
* @recording_options: validate and process recording options
* @info_priv_size: return the size of the private data in auxtrace_info_event
* @info_fill: fill-in the private data in auxtrace_info_event
* @free: free this auxtrace record structure
* @snapshot_start: starting a snapshot
* @snapshot_finish: finishing a snapshot
* @find_snapshot: find data to snapshot within auxtrace mmap
* @parse_snapshot_options: parse snapshot options
* @reference: provide a 64-bit reference number for auxtrace_event
* @read_finish: called after reading from an auxtrace mmap
*/
struct auxtrace_record {
int (*recording_options)(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts);
size_t (*info_priv_size)(struct auxtrace_record *itr);
int (*info_fill)(struct auxtrace_record *itr,
struct perf_session *session,
struct auxtrace_info_event *auxtrace_info,
size_t priv_size);
void (*free)(struct auxtrace_record *itr);
int (*snapshot_start)(struct auxtrace_record *itr);
int (*snapshot_finish)(struct auxtrace_record *itr);
int (*find_snapshot)(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm, unsigned char *data,
u64 *head, u64 *old);
int (*parse_snapshot_options)(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str);
u64 (*reference)(struct auxtrace_record *itr);
int (*read_finish)(struct auxtrace_record *itr, int idx);
};
#ifdef HAVE_AUXTRACE_SUPPORT
/*
* In snapshot mode the mmapped page is read-only which makes using
* __sync_val_compare_and_swap() problematic. However, snapshot mode expects
* the buffer is not updated while the snapshot is made (e.g. Intel PT disables
* the event) so there is not a race anyway.
*/
static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
u64 head = ACCESS_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
rmb();
return head;
}
static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
u64 head = ACCESS_ONCE(pc->aux_head);
#else
u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
#endif
/* Ensure all reads are done after we read the head */
rmb();
return head;
}
static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
{
struct perf_event_mmap_page *pc = mm->userpg;
#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
u64 old_tail;
#endif
/* Ensure all reads are done before we write the tail out */
mb();
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
pc->aux_tail = tail;
#else
do {
old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
#endif
}
int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
struct auxtrace_mmap_params *mp,
void *userpg, int fd);
void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
off_t auxtrace_offset,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct perf_evlist *evlist, int idx,
bool per_cpu);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2);
int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn);
int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
int auxtrace_queues__init(struct auxtrace_queues *queues);
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
union perf_event *event, off_t data_offset,
struct auxtrace_buffer **buffer_ptr);
void auxtrace_queues__free(struct auxtrace_queues *queues);
int auxtrace_queues__process_index(struct auxtrace_queues *queues,
struct perf_session *session);
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
struct auxtrace_buffer *buffer);
void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
u64 ordinal);
void auxtrace_heap__pop(struct auxtrace_heap *heap);
void auxtrace_heap__free(struct auxtrace_heap *heap);
struct auxtrace_cache_entry {
struct hlist_node hash;
u32 key;
};
struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
unsigned int limit_percent);
void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
struct auxtrace_cache_entry *entry);
void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
int *err);
int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str);
int auxtrace_record__options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts);
size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr);
int auxtrace_record__info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct auxtrace_info_event *auxtrace_info,
size_t priv_size);
void auxtrace_record__free(struct auxtrace_record *itr);
int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
int auxtrace_record__snapshot_finish(struct auxtrace_record *itr);
int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm,
unsigned char *data, u64 *head, u64 *old);
u64 auxtrace_record__reference(struct auxtrace_record *itr);
int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
off_t file_offset);
int auxtrace_index__write(int fd, struct list_head *head);
int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
bool needs_swap);
void auxtrace_index__free(struct list_head *head);
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg);
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
int perf_event__process_auxtrace_info(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
s64 perf_event__process_auxtrace(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
int perf_event__process_auxtrace_error(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
int itrace_parse_synth_opts(const struct option *opt, const char *str,
int unset);
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
void perf_session__auxtrace_error_inc(struct perf_session *session,
union perf_event *event);
void events_stats__auxtrace_error_warn(const struct events_stats *stats);
static inline int auxtrace__process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
return session->auxtrace->process_event(session, event, sample, tool);
}
static inline int auxtrace__flush_events(struct perf_session *session,
struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
return session->auxtrace->flush_events(session, tool);
}
static inline void auxtrace__free_events(struct perf_session *session)
{
if (!session->auxtrace)
return;
return session->auxtrace->free_events(session);
}
static inline void auxtrace__free(struct perf_session *session)
{
if (!session->auxtrace)
return;
return session->auxtrace->free(session);
}
#else
static inline struct auxtrace_record *
auxtrace_record__init(struct perf_evlist *evlist __maybe_unused,
int *err __maybe_unused)
{
*err = 0;
return NULL;
}
static inline
void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
{
}
static inline int
perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
struct perf_tool *tool __maybe_unused,
struct perf_session *session __maybe_unused,
perf_event__handler_t process __maybe_unused)
{
return -EINVAL;
}
static inline
int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
struct record_opts *opts __maybe_unused)
{
return 0;
}
#define perf_event__process_auxtrace_info 0
#define perf_event__process_auxtrace 0
#define perf_event__process_auxtrace_error 0
static inline
void perf_session__auxtrace_error_inc(struct perf_session *session
__maybe_unused,
union perf_event *event
__maybe_unused)
{
}
static inline
void events_stats__auxtrace_error_warn(const struct events_stats *stats
__maybe_unused)
{
}
static inline
int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
pr_err("AUX area tracing not supported\n");
return -EINVAL;
}
static inline
int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
struct record_opts *opts __maybe_unused,
const char *str)
{
if (!str)
return 0;
pr_err("AUX area tracing not supported\n");
return -EINVAL;
}
static inline
int auxtrace__process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
return 0;
}
static inline
int auxtrace__flush_events(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
return 0;
}
static inline
void auxtrace__free_events(struct perf_session *session __maybe_unused)
{
}
static inline
void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
{
}
static inline
void auxtrace__free(struct perf_session *session __maybe_unused)
{
}
static inline
int auxtrace_index__write(int fd __maybe_unused,
struct list_head *head __maybe_unused)
{
return -EINVAL;
}
static inline
int auxtrace_index__process(int fd __maybe_unused,
u64 size __maybe_unused,
struct perf_session *session __maybe_unused,
bool needs_swap __maybe_unused)
{
return -EINVAL;
}
static inline
void auxtrace_index__free(struct list_head *head __maybe_unused)
{
}
int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
struct auxtrace_mmap_params *mp,
void *userpg, int fd);
void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
off_t auxtrace_offset,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct perf_evlist *evlist, int idx,
bool per_cpu);
#endif
#endif

View File

@ -72,6 +72,10 @@ extern struct callchain_param callchain_param;
struct callchain_list {
u64 ip;
struct map_symbol ms;
struct /* for TUI */ {
bool unfolded;
bool has_children;
};
char *srcline;
struct list_head list;
};

View File

@ -14,6 +14,7 @@
#include <babeltrace/ctf-writer/event.h>
#include <babeltrace/ctf-writer/event-types.h>
#include <babeltrace/ctf-writer/event-fields.h>
#include <babeltrace/ctf-ir/utils.h>
#include <babeltrace/ctf/events.h>
#include <traceevent/event-parse.h>
#include "asm/bug.h"
@ -38,12 +39,21 @@ struct evsel_priv {
struct bt_ctf_event_class *event_class;
};
#define MAX_CPUS 4096
struct ctf_stream {
struct bt_ctf_stream *stream;
int cpu;
u32 count;
};
struct ctf_writer {
/* writer primitives */
struct bt_ctf_writer *writer;
struct bt_ctf_stream *stream;
struct bt_ctf_stream_class *stream_class;
struct bt_ctf_clock *clock;
struct bt_ctf_writer *writer;
struct ctf_stream **stream;
int stream_cnt;
struct bt_ctf_stream_class *stream_class;
struct bt_ctf_clock *clock;
/* data types */
union {
@ -65,6 +75,9 @@ struct convert {
u64 events_size;
u64 events_count;
/* Ordered events configured queue size. */
u64 queue_size;
};
static int value_set(struct bt_ctf_field_type *type,
@ -153,6 +166,43 @@ get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
return cw->data.u32;
}
static unsigned long long adjust_signedness(unsigned long long value_int, int size)
{
unsigned long long value_mask;
/*
* value_mask = (1 << (size * 8 - 1)) - 1.
* Directly set value_mask for code readers.
*/
switch (size) {
case 1:
value_mask = 0x7fULL;
break;
case 2:
value_mask = 0x7fffULL;
break;
case 4:
value_mask = 0x7fffffffULL;
break;
case 8:
/*
* For 64 bit value, return it self. There is no need
* to fill high bit.
*/
/* Fall through */
default:
/* BUG! */
return value_int;
}
/* If it is a positive value, don't adjust. */
if ((value_int & (~0ULL - value_mask)) == 0)
return value_int;
/* Fill upper part of value_int with 1 to make it a negative long long. */
return (value_int & value_mask) | ~value_mask;
}
static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
@ -164,7 +214,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_field *field;
const char *name = fmtf->name;
void *data = sample->raw_data;
unsigned long long value_int;
unsigned long flags = fmtf->flags;
unsigned int n_items;
unsigned int i;
@ -172,6 +221,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
unsigned int len;
int ret;
name = fmtf->alias;
offset = fmtf->offset;
len = fmtf->size;
if (flags & FIELD_IS_STRING)
@ -208,11 +258,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
type = get_tracepoint_field_type(cw, fmtf);
for (i = 0; i < n_items; i++) {
if (!(flags & FIELD_IS_STRING))
value_int = pevent_read_number(
fmtf->event->pevent,
data + offset + i * len, len);
if (flags & FIELD_IS_ARRAY)
field = bt_ctf_field_array_get_field(array_field, i);
else
@ -226,12 +271,21 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
if (flags & FIELD_IS_STRING)
ret = bt_ctf_field_string_set_value(field,
data + offset + i * len);
else if (!(flags & FIELD_IS_SIGNED))
ret = bt_ctf_field_unsigned_integer_set_value(
field, value_int);
else
ret = bt_ctf_field_signed_integer_set_value(
field, value_int);
else {
unsigned long long value_int;
value_int = pevent_read_number(
fmtf->event->pevent,
data + offset + i * len, len);
if (!(flags & FIELD_IS_SIGNED))
ret = bt_ctf_field_unsigned_integer_set_value(
field, value_int);
else
ret = bt_ctf_field_signed_integer_set_value(
field, adjust_signedness(value_int, len));
}
if (ret) {
pr_err("failed to set file value %s\n", name);
goto err_put_field;
@ -346,12 +400,6 @@ static int add_generic_values(struct ctf_writer *cw,
return -1;
}
if (type & PERF_SAMPLE_CPU) {
ret = value_set_u32(cw, event, "perf_cpu", sample->cpu);
if (ret)
return -1;
}
if (type & PERF_SAMPLE_PERIOD) {
ret = value_set_u64(cw, event, "perf_period", sample->period);
if (ret)
@ -381,6 +429,129 @@ static int add_generic_values(struct ctf_writer *cw,
return 0;
}
static int ctf_stream__flush(struct ctf_stream *cs)
{
int err = 0;
if (cs) {
err = bt_ctf_stream_flush(cs->stream);
if (err)
pr_err("CTF stream %d flush failed\n", cs->cpu);
pr("Flush stream for cpu %d (%u samples)\n",
cs->cpu, cs->count);
cs->count = 0;
}
return err;
}
static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
{
struct ctf_stream *cs;
struct bt_ctf_field *pkt_ctx = NULL;
struct bt_ctf_field *cpu_field = NULL;
struct bt_ctf_stream *stream = NULL;
int ret;
cs = zalloc(sizeof(*cs));
if (!cs) {
pr_err("Failed to allocate ctf stream\n");
return NULL;
}
stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
if (!stream) {
pr_err("Failed to create CTF stream\n");
goto out;
}
pkt_ctx = bt_ctf_stream_get_packet_context(stream);
if (!pkt_ctx) {
pr_err("Failed to obtain packet context\n");
goto out;
}
cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
bt_ctf_field_put(pkt_ctx);
if (!cpu_field) {
pr_err("Failed to obtain cpu field\n");
goto out;
}
ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
if (ret) {
pr_err("Failed to update CPU number\n");
goto out;
}
bt_ctf_field_put(cpu_field);
cs->cpu = cpu;
cs->stream = stream;
return cs;
out:
if (cpu_field)
bt_ctf_field_put(cpu_field);
if (stream)
bt_ctf_stream_put(stream);
free(cs);
return NULL;
}
static void ctf_stream__delete(struct ctf_stream *cs)
{
if (cs) {
bt_ctf_stream_put(cs->stream);
free(cs);
}
}
static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
{
struct ctf_stream *cs = cw->stream[cpu];
if (!cs) {
cs = ctf_stream__create(cw, cpu);
cw->stream[cpu] = cs;
}
return cs;
}
static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
struct perf_evsel *evsel)
{
int cpu = 0;
if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
cpu = sample->cpu;
if (cpu > cw->stream_cnt) {
pr_err("Event was recorded for CPU %d, limit is at %d.\n",
cpu, cw->stream_cnt);
cpu = 0;
}
return cpu;
}
#define STREAM_FLUSH_COUNT 100000
/*
* Currently we have no other way to determine the
* time for the stream flush other than keep track
* of the number of events and check it against
* threshold.
*/
static bool is_flush_needed(struct ctf_stream *cs)
{
return cs->count >= STREAM_FLUSH_COUNT;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *_event __maybe_unused,
struct perf_sample *sample,
@ -390,6 +561,7 @@ static int process_sample_event(struct perf_tool *tool,
struct convert *c = container_of(tool, struct convert, tool);
struct evsel_priv *priv = evsel->priv;
struct ctf_writer *cw = &c->writer;
struct ctf_stream *cs;
struct bt_ctf_event_class *event_class;
struct bt_ctf_event *event;
int ret;
@ -424,9 +596,93 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
bt_ctf_stream_append_event(cw->stream, event);
cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
if (cs) {
if (is_flush_needed(cs))
ctf_stream__flush(cs);
cs->count++;
bt_ctf_stream_append_event(cs->stream, event);
}
bt_ctf_event_put(event);
return 0;
return cs ? 0 : -1;
}
/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
static char *change_name(char *name, char *orig_name, int dup)
{
char *new_name = NULL;
size_t len;
if (!name)
name = orig_name;
if (dup >= 10)
goto out;
/*
* Add '_' prefix to potential keywork. According to
* Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
* futher CTF spec updating may require us to use '$'.
*/
if (dup < 0)
len = strlen(name) + sizeof("_");
else
len = strlen(orig_name) + sizeof("_dupl_X");
new_name = malloc(len);
if (!new_name)
goto out;
if (dup < 0)
snprintf(new_name, len, "_%s", name);
else
snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
out:
if (name != orig_name)
free(name);
return new_name;
}
static int event_class_add_field(struct bt_ctf_event_class *event_class,
struct bt_ctf_field_type *type,
struct format_field *field)
{
struct bt_ctf_field_type *t = NULL;
char *name;
int dup = 1;
int ret;
/* alias was already assigned */
if (field->alias != field->name)
return bt_ctf_event_class_add_field(event_class, type,
(char *)field->alias);
name = field->name;
/* If 'name' is a keywork, add prefix. */
if (bt_ctf_validate_identifier(name))
name = change_name(name, field->name, -1);
if (!name) {
pr_err("Failed to fix invalid identifier.");
return -1;
}
while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
bt_ctf_field_type_put(t);
name = change_name(name, field->name, dup++);
if (!name) {
pr_err("Failed to create dup name for '%s'\n", field->name);
return -1;
}
}
ret = bt_ctf_event_class_add_field(event_class, type, name);
if (!ret)
field->alias = name;
return ret;
}
static int add_tracepoint_fields_types(struct ctf_writer *cw,
@ -457,14 +713,14 @@ static int add_tracepoint_fields_types(struct ctf_writer *cw,
if (flags & FIELD_IS_ARRAY)
type = bt_ctf_field_type_array_create(type, field->arraylen);
ret = bt_ctf_event_class_add_field(event_class, type,
field->name);
ret = event_class_add_field(event_class, type, field);
if (flags & FIELD_IS_ARRAY)
bt_ctf_field_type_put(type);
if (ret) {
pr_err("Failed to add field '%s\n", field->name);
pr_err("Failed to add field '%s': %d\n",
field->name, ret);
return -1;
}
}
@ -508,7 +764,7 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
do { \
pr2(" field '%s'\n", n); \
if (bt_ctf_event_class_add_field(cl, t, n)) { \
pr_err("Failed to add field '%s;\n", n); \
pr_err("Failed to add field '%s';\n", n); \
return -1; \
} \
} while (0)
@ -528,9 +784,6 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
if (type & PERF_SAMPLE_STREAM_ID)
ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
if (type & PERF_SAMPLE_CPU)
ADD_FIELD(event_class, cw->data.u32, "perf_cpu");
if (type & PERF_SAMPLE_PERIOD)
ADD_FIELD(event_class, cw->data.u64, "perf_period");
@ -604,6 +857,39 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session)
return 0;
}
static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
{
struct ctf_stream **stream;
struct perf_header *ph = &session->header;
int ncpus;
/*
* Try to get the number of cpus used in the data file,
* if not present fallback to the MAX_CPUS.
*/
ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
stream = zalloc(sizeof(*stream) * ncpus);
if (!stream) {
pr_err("Failed to allocate streams.\n");
return -ENOMEM;
}
cw->stream = stream;
cw->stream_cnt = ncpus;
return 0;
}
static void free_streams(struct ctf_writer *cw)
{
int cpu;
for (cpu = 0; cpu < cw->stream_cnt; cpu++)
ctf_stream__delete(cw->stream[cpu]);
free(cw->stream);
}
static int ctf_writer__setup_env(struct ctf_writer *cw,
struct perf_session *session)
{
@ -713,7 +999,7 @@ static void ctf_writer__cleanup(struct ctf_writer *cw)
ctf_writer__cleanup_data(cw);
bt_ctf_clock_put(cw->clock);
bt_ctf_stream_put(cw->stream);
free_streams(cw);
bt_ctf_stream_class_put(cw->stream_class);
bt_ctf_writer_put(cw->writer);
@ -725,8 +1011,9 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path)
{
struct bt_ctf_writer *writer;
struct bt_ctf_stream_class *stream_class;
struct bt_ctf_stream *stream;
struct bt_ctf_clock *clock;
struct bt_ctf_field_type *pkt_ctx_type;
int ret;
/* CTF writer */
writer = bt_ctf_writer_create(path);
@ -767,14 +1054,15 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path)
if (ctf_writer__init_data(cw))
goto err_cleanup;
/* CTF stream instance */
stream = bt_ctf_writer_create_stream(writer, stream_class);
if (!stream) {
pr("Failed to create CTF stream.\n");
/* Add cpu_id for packet context */
pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
if (!pkt_ctx_type)
goto err_cleanup;
}
cw->stream = stream;
ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
bt_ctf_field_type_put(pkt_ctx_type);
if (ret)
goto err_cleanup;
/* CTF clock writer setup */
if (bt_ctf_writer_add_clock(writer, clock)) {
@ -791,6 +1079,28 @@ err:
return -1;
}
static int ctf_writer__flush_streams(struct ctf_writer *cw)
{
int cpu, ret = 0;
for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
ret = ctf_stream__flush(cw->stream[cpu]);
return ret;
}
static int convert__config(const char *var, const char *value, void *cb)
{
struct convert *c = cb;
if (!strcmp(var, "convert.queue-size")) {
c->queue_size = perf_config_u64(var, value);
return 0;
}
return perf_default_config(var, value, cb);
}
int bt_convert__perf2ctf(const char *input, const char *path, bool force)
{
struct perf_session *session;
@ -817,6 +1127,8 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
struct ctf_writer *cw = &c.writer;
int err = -1;
perf_config(convert__config, &c);
/* CTF writer */
if (ctf_writer__init(cw, path))
return -1;
@ -826,6 +1138,11 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
if (!session)
goto free_writer;
if (c.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
c.queue_size);
}
/* CTF writer env/clock setup */
if (ctf_writer__setup_env(cw, session))
goto free_session;
@ -834,9 +1151,14 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
if (setup_events(cw, session))
goto free_session;
if (setup_streams(cw, session))
goto free_session;
err = perf_session__process_events(session);
if (!err)
err = bt_ctf_stream_flush(cw->stream);
err = ctf_writer__flush_streams(cw);
else
pr_err("Error during conversion.\n");
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
@ -847,11 +1169,15 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
(double) c.events_size / 1024.0 / 1024.0,
c.events_count);
/* its all good */
perf_session__delete(session);
ctf_writer__cleanup(cw);
return err;
free_session:
perf_session__delete(session);
free_writer:
ctf_writer__cleanup(cw);
pr_err("Error during conversion setup.\n");
return err;
}

View File

@ -4,6 +4,7 @@
#include "symbol.h"
#include "dso.h"
#include "machine.h"
#include "auxtrace.h"
#include "util.h"
#include "debug.h"
@ -961,6 +962,7 @@ void dso__delete(struct dso *dso)
}
dso__data_close(dso);
auxtrace_cache__free(dso->auxtrace_cache);
dso_cache__free(&dso->data.cache);
dso__free_a2l(dso);
zfree(&dso->symsrc_filename);

View File

@ -126,6 +126,8 @@ struct dsos {
struct rb_root root; /* rbtree root sorted by long name */
};
struct auxtrace_cache;
struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
@ -156,6 +158,7 @@ struct dso {
u16 long_name_len;
u16 short_name_len;
void *dwfl; /* DWARF debug info */
struct auxtrace_cache *auxtrace_cache;
/* dso data file */
struct {

View File

@ -23,12 +23,17 @@ static const char *perf_event__names[] = {
[PERF_RECORD_FORK] = "FORK",
[PERF_RECORD_READ] = "READ",
[PERF_RECORD_SAMPLE] = "SAMPLE",
[PERF_RECORD_AUX] = "AUX",
[PERF_RECORD_ITRACE_START] = "ITRACE_START",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
[PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
[PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
[PERF_RECORD_ID_INDEX] = "ID_INDEX",
[PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
[PERF_RECORD_AUXTRACE] = "AUXTRACE",
[PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
};
const char *perf_event__name(unsigned int id)
@ -692,6 +697,22 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
return machine__process_lost_event(machine, event, sample);
}
int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_aux_event(machine, event);
}
int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return machine__process_itrace_start_event(machine, event);
}
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@ -755,6 +776,21 @@ int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
return machine__process_exit_event(machine, event, sample);
}
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
event->aux.aux_offset, event->aux.aux_size,
event->aux.flags,
event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
}
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
{
return fprintf(fp, " pid: %u tid: %u\n",
event->itrace_start.pid, event->itrace_start.tid);
}
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@ -774,6 +810,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_MMAP2:
ret += perf_event__fprintf_mmap2(event, fp);
break;
case PERF_RECORD_AUX:
ret += perf_event__fprintf_aux(event, fp);
break;
case PERF_RECORD_ITRACE_START:
ret += perf_event__fprintf_itrace_start(event, fp);
break;
default:
ret += fprintf(fp, "\n");
}

View File

@ -157,6 +157,8 @@ enum {
PERF_IP_FLAG_IN_TX = 1ULL << 10,
};
#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
#define PERF_BRANCH_MASK (\
PERF_IP_FLAG_BRANCH |\
PERF_IP_FLAG_CALL |\
@ -215,9 +217,17 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_HEADER_BUILD_ID = 67,
PERF_RECORD_FINISHED_ROUND = 68,
PERF_RECORD_ID_INDEX = 69,
PERF_RECORD_AUXTRACE_INFO = 70,
PERF_RECORD_AUXTRACE = 71,
PERF_RECORD_AUXTRACE_ERROR = 72,
PERF_RECORD_HEADER_MAX
};
enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_ITRACE = 1,
PERF_AUXTRACE_ERROR_MAX
};
/*
* The kernel collects the number of events it couldn't send in a stretch and
* when possible sends this number in a PERF_RECORD_LOST event. The number of
@ -242,6 +252,7 @@ struct events_stats {
u32 nr_invalid_chains;
u32 nr_unknown_id;
u32 nr_unprocessable_samples;
u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
};
struct attr_event {
@ -280,6 +291,50 @@ struct id_index_event {
struct id_index_entry entries[0];
};
struct auxtrace_info_event {
struct perf_event_header header;
u32 type;
u32 reserved__; /* For alignment */
u64 priv[];
};
struct auxtrace_event {
struct perf_event_header header;
u64 size;
u64 offset;
u64 reference;
u32 idx;
u32 tid;
u32 cpu;
u32 reserved__; /* For alignment */
};
#define MAX_AUXTRACE_ERROR_MSG 64
struct auxtrace_error_event {
struct perf_event_header header;
u32 type;
u32 code;
u32 cpu;
u32 pid;
u32 tid;
u32 reserved__; /* For alignment */
u64 ip;
char msg[MAX_AUXTRACE_ERROR_MSG];
};
struct aux_event {
struct perf_event_header header;
u64 aux_offset;
u64 aux_size;
u64 flags;
};
struct itrace_start_event {
struct perf_event_header header;
u32 pid, tid;
};
union perf_event {
struct perf_event_header header;
struct mmap_event mmap;
@ -295,6 +350,11 @@ union perf_event {
struct tracing_data_event tracing_data;
struct build_id_event build_id;
struct id_index_event id_index;
struct auxtrace_info_event auxtrace_info;
struct auxtrace_event auxtrace;
struct auxtrace_error_event auxtrace_error;
struct aux_event aux;
struct itrace_start_event itrace_start;
};
void perf_event__print_totals(void);
@ -330,6 +390,14 @@ int perf_event__process_lost(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
int perf_event__process_aux(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
int perf_event__process_itrace_start(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
int perf_event__process_mmap(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@ -387,6 +455,8 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
u64 kallsyms__get_function_start(const char *kallsyms_filename,

View File

@ -695,7 +695,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
static bool perf_mmap__empty(struct perf_mmap *md)
{
return perf_mmap__read_head(md) == md->prev;
return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
}
static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
@ -725,6 +725,34 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
perf_evlist__mmap_put(evlist, idx);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
int fd __maybe_unused)
{
return 0;
}
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}
void __weak auxtrace_mmap_params__init(
struct auxtrace_mmap_params *mp __maybe_unused,
off_t auxtrace_offset __maybe_unused,
unsigned int auxtrace_pages __maybe_unused,
bool auxtrace_overwrite __maybe_unused)
{
}
void __weak auxtrace_mmap_params__set_idx(
struct auxtrace_mmap_params *mp __maybe_unused,
struct perf_evlist *evlist __maybe_unused,
int idx __maybe_unused,
bool per_cpu __maybe_unused)
{
}
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
if (evlist->mmap[idx].base != NULL) {
@ -732,6 +760,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
evlist->mmap[idx].base = NULL;
evlist->mmap[idx].refcnt = 0;
}
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
@ -759,6 +788,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
struct mmap_params {
int prot;
int mask;
struct auxtrace_mmap_params auxtrace_mp;
};
static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
@ -789,6 +819,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
return -1;
}
if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
return -1;
return 0;
}
@ -853,6 +887,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
true);
for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
thread, &output))
@ -878,6 +915,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
&output))
goto out_unmap;
@ -960,10 +1000,8 @@ static long parse_pages_arg(const char *str, unsigned long min,
return pages;
}
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
int unset __maybe_unused)
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
{
unsigned int *mmap_pages = opt->value;
unsigned long max = UINT_MAX;
long pages;
@ -980,20 +1018,32 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
return 0;
}
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
int unset __maybe_unused)
{
return __perf_evlist__parse_mmap_pages(opt->value, str);
}
/**
* perf_evlist__mmap - Create mmaps to receive events.
* perf_evlist__mmap_ex - Create mmaps to receive events.
* @evlist: list of events
* @pages: map length in pages
* @overwrite: overwrite older events?
* @auxtrace_pages - auxtrace map length in pages
* @auxtrace_overwrite - overwrite older auxtrace data?
*
* If @overwrite is %false the user needs to signal event consumption using
* perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
* automatically.
*
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
* consumption using auxtrace_mmap__write_tail().
*
* Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite)
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
bool overwrite, unsigned int auxtrace_pages,
bool auxtrace_overwrite)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@ -1013,6 +1063,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
pr_debug("mmap size %zuB\n", evlist->mmap_len);
mp.mask = evlist->mmap_len - page_size - 1;
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
auxtrace_pages, auxtrace_overwrite);
evlist__for_each(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
@ -1026,6 +1079,12 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, &mp);
}
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite)
{
return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
{
evlist->threads = thread_map__new_str(target->pid, target->tid,

View File

@ -8,6 +8,7 @@
#include "event.h"
#include "evsel.h"
#include "util.h"
#include "auxtrace.h"
#include <unistd.h>
struct pollfd;
@ -28,6 +29,7 @@ struct perf_mmap {
int mask;
int refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
};
@ -122,10 +124,14 @@ int perf_evlist__start_workload(struct perf_evlist *evlist);
struct option;
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
int perf_evlist__parse_mmap_pages(const struct option *opt,
const char *str,
int unset);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
bool overwrite, unsigned int auxtrace_pages,
bool auxtrace_overwrite);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist);

View File

@ -1121,6 +1121,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(sample_stack_user, p_unsigned);
PRINT_ATTRf(clockid, p_signed);
PRINT_ATTRf(sample_regs_intr, p_hex);
PRINT_ATTRf(aux_watermark, p_unsigned);
return ret;
}

View File

@ -869,6 +869,20 @@ static int write_branch_stack(int fd __maybe_unused,
return 0;
}
static int write_auxtrace(int fd, struct perf_header *h,
struct perf_evlist *evlist __maybe_unused)
{
struct perf_session *session;
int err;
session = container_of(h, struct perf_session, header);
err = auxtrace_index__write(fd, &session->auxtrace_index);
if (err < 0)
pr_err("Failed to write auxtrace index\n");
return err;
}
static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
@ -1151,6 +1165,12 @@ static void print_branch_stack(struct perf_header *ph __maybe_unused,
fprintf(fp, "# contains samples with branch stack\n");
}
static void print_auxtrace(struct perf_header *ph __maybe_unused,
int fd __maybe_unused, FILE *fp)
{
fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
}
static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
@ -1821,6 +1841,22 @@ out_free:
return ret;
}
static int process_auxtrace(struct perf_file_section *section,
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
struct perf_session *session;
int err;
session = container_of(ph, struct perf_session, header);
err = auxtrace_index__process(fd, section->size, session,
ph->needs_swap);
if (err < 0)
pr_err("Failed to process auxtrace index\n");
return err;
}
struct feature_ops {
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
void (*print)(struct perf_header *h, int fd, FILE *fp);
@ -1861,6 +1897,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
FEAT_OPP(HEADER_GROUP_DESC, group_desc),
FEAT_OPP(HEADER_AUXTRACE, auxtrace),
};
struct header_print_data {

View File

@ -30,6 +30,7 @@ enum {
HEADER_BRANCH_STACK,
HEADER_PMU_MAPPINGS,
HEADER_GROUP_DESC,
HEADER_AUXTRACE,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};

View File

@ -1163,7 +1163,7 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h
return;
/* force fold unfiltered entry for simplicity */
h->ms.unfolded = false;
h->unfolded = false;
h->row_offset = 0;
h->nr_rows = 0;

View File

@ -486,6 +486,22 @@ machine__module_dso(struct machine *machine, struct kmod_path *m,
return dso;
}
int machine__process_aux_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_aux(event, stdout);
return 0;
}
int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_itrace_start(event, stdout);
return 0;
}
struct map *machine__new_module(struct machine *machine, u64 start,
const char *filename)
{
@ -1331,6 +1347,11 @@ int machine__process_event(struct machine *machine, union perf_event *event,
ret = machine__process_exit_event(machine, event, sample); break;
case PERF_RECORD_LOST:
ret = machine__process_lost_event(machine, event, sample); break;
case PERF_RECORD_AUX:
ret = machine__process_aux_event(machine, event); break;
case PERF_RECORD_ITRACE_START:
ret = machine__process_itrace_start_event(machine, event);
break;
default:
ret = -1;
break;

View File

@ -81,6 +81,10 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
struct perf_sample *sample);
int machine__process_lost_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int machine__process_aux_event(struct machine *machine,
union perf_event *event);
int machine__process_itrace_start_event(struct machine *machine,
union perf_event *event);
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,

View File

@ -292,6 +292,11 @@ int map__load(struct map *map, symbol_filter_t filter)
return 0;
}
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
return strcmp(namea, nameb);
}
struct symbol *map__find_symbol(struct map *map, u64 addr,
symbol_filter_t filter)
{

View File

@ -124,7 +124,7 @@ struct thread;
*/
#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
for (pos = map__find_symbol_by_name(map, sym_name, filter); \
pos && strcmp(pos->name, sym_name) == 0; \
pos && arch__compare_symbol_names(pos->name, sym_name) == 0; \
pos = symbol__next_by_name(pos))
#define map__for_each_symbol_by_name(map, sym_name, pos) \
@ -132,6 +132,7 @@ struct thread;
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
int arch__compare_symbol_names(const char *namea, const char *nameb);
void map__init(struct map *map, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso);
struct map *map__new(struct machine *machine, u64 start, u64 len,

View File

@ -17,6 +17,7 @@
#include "parse-events-flex.h"
#include "pmu.h"
#include "thread_map.h"
#include "asm/bug.h"
#define MAX_NAME_LEN 100
@ -24,6 +25,12 @@
extern int parse_events_debug;
#endif
int parse_events_parse(void *data, void *scanner);
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num,
YYLTYPE *loc_term, YYLTYPE *loc_val);
int parse_events_term__str(struct parse_events_term **term,
int type_term, char *config, char *str,
YYLTYPE *loc_term, YYLTYPE *loc_val);
static struct perf_pmu_event_symbol *perf_pmu_events_list;
/*
@ -538,16 +545,40 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
return add_event(list, idx, &attr, NULL);
}
static int config_term(struct perf_event_attr *attr,
struct parse_events_term *term)
static int check_type_val(struct parse_events_term *term,
struct parse_events_error *err,
int type)
{
#define CHECK_TYPE_VAL(type) \
do { \
if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
return -EINVAL; \
if (type == term->type_val)
return 0;
if (err) {
err->idx = term->err_val;
if (type == PARSE_EVENTS__TERM_TYPE_NUM)
err->str = strdup("expected numeric value");
else
err->str = strdup("expected string value");
}
return -EINVAL;
}
static int config_term(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
{
#define CHECK_TYPE_VAL(type) \
do { \
if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
return -EINVAL; \
} while (0)
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_USER:
/*
* Always succeed for sysfs terms, as we dont know
* at this point what type they need to have.
*/
return 0;
case PARSE_EVENTS__TERM_TYPE_CONFIG:
CHECK_TYPE_VAL(NUM);
attr->config = term->val.num;
@ -582,18 +613,20 @@ do { \
}
static int config_attr(struct perf_event_attr *attr,
struct list_head *head, int fail)
struct list_head *head,
struct parse_events_error *err)
{
struct parse_events_term *term;
list_for_each_entry(term, head, list)
if (config_term(attr, term) && fail)
if (config_term(attr, term, err))
return -EINVAL;
return 0;
}
int parse_events_add_numeric(struct list_head *list, int *idx,
int parse_events_add_numeric(struct parse_events_evlist *data,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config)
{
@ -604,10 +637,10 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
attr.config = config;
if (head_config &&
config_attr(&attr, head_config, 1))
config_attr(&attr, head_config, data->error))
return -EINVAL;
return add_event(list, idx, &attr, NULL);
return add_event(list, &data->idx, &attr, NULL);
}
static int parse_events__is_name_term(struct parse_events_term *term)
@ -626,8 +659,9 @@ static char *pmu_event_name(struct list_head *head_terms)
return NULL;
}
int parse_events_add_pmu(struct list_head *list, int *idx,
char *name, struct list_head *head_config)
int parse_events_add_pmu(struct parse_events_evlist *data,
struct list_head *list, char *name,
struct list_head *head_config)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
@ -647,7 +681,7 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
if (!head_config) {
attr.type = pmu->type;
evsel = __add_event(list, idx, &attr, NULL, pmu->cpus);
evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus);
return evsel ? 0 : -ENOMEM;
}
@ -658,13 +692,14 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
*/
config_attr(&attr, head_config, 0);
if (perf_pmu__config(pmu, &attr, head_config))
if (config_attr(&attr, head_config, data->error))
return -EINVAL;
evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
pmu->cpus);
if (perf_pmu__config(pmu, &attr, head_config, data->error))
return -EINVAL;
evsel = __add_event(list, &data->idx, &attr,
pmu_event_name(head_config), pmu->cpus);
if (evsel) {
evsel->unit = info.unit;
evsel->scale = info.scale;
@ -1019,11 +1054,13 @@ int parse_events_terms(struct list_head *terms, const char *str)
return ret;
}
int parse_events(struct perf_evlist *evlist, const char *str)
int parse_events(struct perf_evlist *evlist, const char *str,
struct parse_events_error *err)
{
struct parse_events_evlist data = {
.list = LIST_HEAD_INIT(data.list),
.idx = evlist->nr_entries,
.list = LIST_HEAD_INIT(data.list),
.idx = evlist->nr_entries,
.error = err,
};
int ret;
@ -1044,16 +1081,87 @@ int parse_events(struct perf_evlist *evlist, const char *str)
return ret;
}
#define MAX_WIDTH 1000
static int get_term_width(void)
{
struct winsize ws;
get_term_dimensions(&ws);
return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
}
static void parse_events_print_error(struct parse_events_error *err,
const char *event)
{
const char *str = "invalid or unsupported event: ";
char _buf[MAX_WIDTH];
char *buf = (char *) event;
int idx = 0;
if (err->str) {
/* -2 for extra '' in the final fprintf */
int width = get_term_width() - 2;
int len_event = strlen(event);
int len_str, max_len, cut = 0;
/*
* Maximum error index indent, we will cut
* the event string if it's bigger.
*/
int max_err_idx = 10;
/*
* Let's be specific with the message when
* we have the precise error.
*/
str = "event syntax error: ";
len_str = strlen(str);
max_len = width - len_str;
buf = _buf;
/* We're cutting from the beggining. */
if (err->idx > max_err_idx)
cut = err->idx - max_err_idx;
strncpy(buf, event + cut, max_len);
/* Mark cut parts with '..' on both sides. */
if (cut)
buf[0] = buf[1] = '.';
if ((len_event - cut) > max_len) {
buf[max_len - 1] = buf[max_len - 2] = '.';
buf[max_len] = 0;
}
idx = len_str + err->idx - cut;
}
fprintf(stderr, "%s'%s'\n", str, buf);
if (idx) {
fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str);
if (err->help)
fprintf(stderr, "\n%s\n", err->help);
free(err->str);
free(err->help);
}
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
#undef MAX_WIDTH
int parse_events_option(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
int ret = parse_events(evlist, str);
struct parse_events_error err = { .idx = 0, };
int ret = parse_events(evlist, str, &err);
if (ret)
parse_events_print_error(&err, str);
if (ret) {
fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
return ret;
}
@ -1460,7 +1568,7 @@ int parse_events__is_hardcoded_term(struct parse_events_term *term)
static int new_term(struct parse_events_term **_term, int type_val,
int type_term, char *config,
char *str, u64 num)
char *str, u64 num, int err_term, int err_val)
{
struct parse_events_term *term;
@ -1472,6 +1580,8 @@ static int new_term(struct parse_events_term **_term, int type_val,
term->type_val = type_val;
term->type_term = type_term;
term->config = config;
term->err_term = err_term;
term->err_val = err_val;
switch (type_val) {
case PARSE_EVENTS__TERM_TYPE_NUM:
@ -1490,17 +1600,23 @@ static int new_term(struct parse_events_term **_term, int type_val,
}
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num)
int type_term, char *config, u64 num,
YYLTYPE *loc_term, YYLTYPE *loc_val)
{
return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
config, NULL, num);
config, NULL, num,
loc_term ? loc_term->first_column : 0,
loc_val ? loc_val->first_column : 0);
}
int parse_events_term__str(struct parse_events_term **term,
int type_term, char *config, char *str)
int type_term, char *config, char *str,
YYLTYPE *loc_term, YYLTYPE *loc_val)
{
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
config, str, 0);
config, str, 0,
loc_term ? loc_term->first_column : 0,
loc_val ? loc_val->first_column : 0);
}
int parse_events_term__sym_hw(struct parse_events_term **term,
@ -1514,18 +1630,20 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
if (config)
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
PARSE_EVENTS__TERM_TYPE_USER, config,
(char *) sym->symbol, 0);
(char *) sym->symbol, 0, 0, 0);
else
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
PARSE_EVENTS__TERM_TYPE_USER,
(char *) "event", (char *) sym->symbol, 0);
(char *) "event", (char *) sym->symbol,
0, 0, 0);
}
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term)
{
return new_term(new, term->type_val, term->type_term, term->config,
term->val.str, term->val.num);
term->val.str, term->val.num,
term->err_term, term->err_val);
}
void parse_events__free_terms(struct list_head *terms)
@ -1535,3 +1653,13 @@ void parse_events__free_terms(struct list_head *terms)
list_for_each_entry_safe(term, h, terms, list)
free(term);
}
void parse_events_evlist_error(struct parse_events_evlist *data,
int idx, const char *str)
{
struct parse_events_error *err = data->error;
err->idx = idx;
err->str = strdup(str);
WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
}

View File

@ -12,6 +12,7 @@
struct list_head;
struct perf_evsel;
struct perf_evlist;
struct parse_events_error;
struct option;
@ -29,7 +30,8 @@ const char *event_type(int type);
extern int parse_events_option(const struct option *opt, const char *str,
int unset);
extern int parse_events(struct perf_evlist *evlist, const char *str);
extern int parse_events(struct perf_evlist *evlist, const char *str,
struct parse_events_error *error);
extern int parse_events_terms(struct list_head *terms, const char *str);
extern int parse_filter(const struct option *opt, const char *str, int unset);
@ -72,12 +74,23 @@ struct parse_events_term {
int type_term;
struct list_head list;
bool used;
/* error string indexes for within parsed string */
int err_term;
int err_val;
};
struct parse_events_error {
int idx; /* index in the parsed string */
char *str; /* string to display at the index */
char *help; /* optional help string */
};
struct parse_events_evlist {
struct list_head list;
int idx;
int nr_groups;
struct list_head list;
int idx;
int nr_groups;
struct parse_events_error *error;
};
struct parse_events_terms {
@ -85,10 +98,6 @@ struct parse_events_terms {
};
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **_term,
int type_term, char *config, u64 num);
int parse_events_term__str(struct parse_events_term **_term,
int type_term, char *config, char *str);
int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx);
int parse_events_term__clone(struct parse_events_term **new,
@ -99,21 +108,24 @@ int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, char *name);
int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event);
int parse_events_add_numeric(struct list_head *list, int *idx,
int parse_events_add_numeric(struct parse_events_evlist *data,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config);
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type, u64 len);
int parse_events_add_pmu(struct list_head *list, int *idx,
char *pmu , struct list_head *head_config);
int parse_events_add_pmu(struct parse_events_evlist *data,
struct list_head *list, char *name,
struct list_head *head_config);
enum perf_pmu_event_symbol_type
perf_pmu__parse_check(const char *name);
void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all);
void parse_events_error(void *data, void *scanner, char const *msg);
void parse_events_evlist_error(struct parse_events_evlist *data,
int idx, const char *str);
void print_events(const char *event_glob, bool name_only);

View File

@ -3,6 +3,8 @@
%option bison-bridge
%option prefix="parse_events_"
%option stack
%option bison-locations
%option yylineno
%{
#include <errno.h>
@ -51,6 +53,18 @@ static int str(yyscan_t scanner, int token)
return token;
}
#define REWIND(__alloc) \
do { \
YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \
char *text = parse_events_get_text(yyscanner); \
\
if (__alloc) \
__yylval->str = strdup(text); \
\
yycolumn -= strlen(text); \
yyless(0); \
} while (0)
static int pmu_str_check(yyscan_t scanner)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@ -85,6 +99,13 @@ static int term(yyscan_t scanner, int type)
return PE_TERM;
}
#define YY_USER_ACTION \
do { \
yylloc->last_column = yylloc->first_column; \
yylloc->first_column = yycolumn; \
yycolumn += yyleng; \
} while (0);
%}
%x mem
@ -119,6 +140,12 @@ modifier_bp [rwx]{1,3}
if (start_token) {
parse_events_set_extra(NULL, yyscanner);
/*
* The flex parser does not init locations variable
* via the scan_string interface, so we need do the
* init in here.
*/
yycolumn = 0;
return start_token;
}
}
@ -127,24 +154,30 @@ modifier_bp [rwx]{1,3}
<event>{
{group} {
BEGIN(INITIAL); yyless(0);
BEGIN(INITIAL);
REWIND(0);
}
{event_pmu} |
{event} {
str(yyscanner, PE_EVENT_NAME);
BEGIN(INITIAL); yyless(0);
BEGIN(INITIAL);
REWIND(1);
return PE_EVENT_NAME;
}
. |
<<EOF>> {
BEGIN(INITIAL); yyless(0);
BEGIN(INITIAL);
REWIND(0);
}
}
<config>{
/*
* Please update formats_error_string any time
* new static term is added.
*/
config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }

View File

@ -2,6 +2,7 @@
%parse-param {void *_data}
%parse-param {void *scanner}
%lex-param {void* scanner}
%locations
%{
@ -14,8 +15,6 @@
#include "parse-events.h"
#include "parse-events-bison.h"
extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
#define ABORT_ON(val) \
do { \
if (val) \
@ -208,7 +207,7 @@ PE_NAME '/' event_config '/'
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3));
ABORT_ON(parse_events_add_pmu(data, list, $1, $3));
parse_events__free_terms($3);
$$ = list;
}
@ -219,7 +218,7 @@ PE_NAME '/' '/'
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, NULL));
ABORT_ON(parse_events_add_pmu(data, list, $1, NULL));
$$ = list;
}
|
@ -232,11 +231,11 @@ PE_KERNEL_PMU_EVENT sep_dc
ALLOC_LIST(head);
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, 1));
$1, 1, &@1, NULL));
list_add_tail(&term->list, head);
ALLOC_LIST(list);
ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head));
ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
parse_events__free_terms(head);
$$ = list;
}
@ -252,7 +251,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
ALLOC_LIST(head);
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
&pmu_name, 1));
&pmu_name, 1, &@1, NULL));
list_add_tail(&term->list, head);
ALLOC_LIST(list);
@ -275,8 +274,7 @@ value_sym '/' event_config '/'
int config = $1 & 255;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, $3));
ABORT_ON(parse_events_add_numeric(data, list, type, config, $3));
parse_events__free_terms($3);
$$ = list;
}
@ -289,8 +287,7 @@ value_sym sep_slash_dc
int config = $1 & 255;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, NULL));
ABORT_ON(parse_events_add_numeric(data, list, type, config, NULL));
$$ = list;
}
@ -389,7 +386,13 @@ PE_NAME ':' PE_NAME
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3));
if (parse_events_add_tracepoint(list, &data->idx, $1, $3)) {
struct parse_events_error *error = data->error;
error->idx = @1.first_column;
error->str = strdup("unknown tracepoint");
return -1;
}
$$ = list;
}
@ -400,7 +403,7 @@ PE_VALUE ':' PE_VALUE
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL));
ABORT_ON(parse_events_add_numeric(data, list, (u32)$1, $3, NULL));
$$ = list;
}
@ -411,8 +414,7 @@ PE_RAW
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(list, &data->idx,
PERF_TYPE_RAW, $1, NULL));
ABORT_ON(parse_events_add_numeric(data, list, PERF_TYPE_RAW, $1, NULL));
$$ = list;
}
@ -450,7 +452,7 @@ PE_NAME '=' PE_NAME
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3));
$1, $3, &@1, &@3));
$$ = term;
}
|
@ -459,7 +461,7 @@ PE_NAME '=' PE_VALUE
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, $3));
$1, $3, &@1, &@3));
$$ = term;
}
|
@ -477,7 +479,7 @@ PE_NAME
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
$1, 1));
$1, 1, &@1, NULL));
$$ = term;
}
|
@ -494,7 +496,7 @@ PE_TERM '=' PE_NAME
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3));
$$ = term;
}
|
@ -502,7 +504,7 @@ PE_TERM '=' PE_VALUE
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, &@1, &@3));
$$ = term;
}
|
@ -510,7 +512,7 @@ PE_TERM
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL));
$$ = term;
}
@ -520,7 +522,9 @@ sep_slash_dc: '/' | ':' |
%%
void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused,
void parse_events_error(YYLTYPE *loc, void *data,
void *scanner __maybe_unused,
char const *msg __maybe_unused)
{
parse_events_evlist_error(data, loc->last_column, "parser error");
}

View File

@ -123,6 +123,10 @@ struct option {
#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
#define OPT_STRING_OPTARG(s, l, v, a, h, d) \
{ .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
.value = check_vtype(v, const char **), (a), .help = (h), \
.flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
#define OPT_DATE(s, l, v, h) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }

View File

@ -579,6 +579,38 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
return -1;
}
static char *formats_error_string(struct list_head *formats)
{
struct perf_pmu_format *format;
char *err, *str;
static const char *static_terms = "config,config1,config2,name,period,branch_type\n";
unsigned i = 0;
if (!asprintf(&str, "valid terms:"))
return NULL;
/* sysfs exported terms */
list_for_each_entry(format, formats, list) {
char c = i++ ? ',' : ' ';
err = str;
if (!asprintf(&str, "%s%c%s", err, c, format->name))
goto fail;
free(err);
}
/* static terms */
err = str;
if (!asprintf(&str, "%s,%s", err, static_terms))
goto fail;
free(err);
return str;
fail:
free(err);
return NULL;
}
/*
* Setup one of config[12] attr members based on the
* user input data - term parameter.
@ -587,7 +619,7 @@ static int pmu_config_term(struct list_head *formats,
struct perf_event_attr *attr,
struct parse_events_term *term,
struct list_head *head_terms,
bool zero)
bool zero, struct parse_events_error *err)
{
struct perf_pmu_format *format;
__u64 *vp;
@ -611,6 +643,11 @@ static int pmu_config_term(struct list_head *formats,
if (!format) {
if (verbose)
printf("Invalid event/parameter '%s'\n", term->config);
if (err) {
err->idx = term->err_term;
err->str = strdup("unknown term");
err->help = formats_error_string(formats);
}
return -EINVAL;
}
@ -636,9 +673,14 @@ static int pmu_config_term(struct list_head *formats,
val = term->val.num;
else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (strcmp(term->val.str, "?")) {
if (verbose)
if (verbose) {
pr_info("Invalid sysfs entry %s=%s\n",
term->config, term->val.str);
}
if (err) {
err->idx = term->err_val;
err->str = strdup("expected numeric value");
}
return -EINVAL;
}
@ -654,12 +696,13 @@ static int pmu_config_term(struct list_head *formats,
int perf_pmu__config_terms(struct list_head *formats,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero)
bool zero, struct parse_events_error *err)
{
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list) {
if (pmu_config_term(formats, attr, term, head_terms, zero))
if (pmu_config_term(formats, attr, term, head_terms,
zero, err))
return -EINVAL;
}
@ -672,12 +715,14 @@ int perf_pmu__config_terms(struct list_head *formats,
* 2) pmu format definitions - specified by pmu parameter
*/
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms)
struct list_head *head_terms,
struct parse_events_error *err)
{
bool zero = !!pmu->default_config;
attr->type = pmu->type;
return perf_pmu__config_terms(&pmu->format, attr, head_terms, zero);
return perf_pmu__config_terms(&pmu->format, attr, head_terms,
zero, err);
}
static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,

View File

@ -4,6 +4,7 @@
#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <stdbool.h>
#include "parse-events.h"
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
@ -47,11 +48,12 @@ struct perf_pmu_alias {
struct perf_pmu *perf_pmu__find(const char *name);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms);
struct list_head *head_terms,
struct parse_events_error *error);
int perf_pmu__config_terms(struct list_head *formats,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero);
bool zero, struct parse_events_error *error);
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
struct perf_pmu_info *info);
struct list_head *perf_pmu__alias(struct perf_pmu *pmu,

View File

@ -1077,6 +1077,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
struct perf_probe_point *pp = &pev->point;
char *ptr, *tmp;
char c, nc = 0;
bool file_spec = false;
/*
* <Syntax>
* perf probe [EVENT=]SRC[:LN|;PTN]
@ -1105,6 +1106,23 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg = tmp;
}
/*
* Check arg is function or file name and copy it.
*
* We consider arg to be a file spec if and only if it satisfies
* all of the below criteria::
* - it does not include any of "+@%",
* - it includes one of ":;", and
* - it has a period '.' in the name.
*
* Otherwise, we consider arg to be a function specification.
*/
if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) {
/* This is a file spec if it includes a '.' before ; or : */
if (memchr(arg, '.', ptr - arg))
file_spec = true;
}
ptr = strpbrk(arg, ";:+@%");
if (ptr) {
nc = *ptr;
@ -1115,10 +1133,9 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
if (tmp == NULL)
return -ENOMEM;
/* Check arg is function or file and copy it */
if (strchr(tmp, '.')) /* File */
if (file_spec)
pp->file = tmp;
else /* Function */
else
pp->function = tmp;
/* Parse other options */
@ -2129,7 +2146,23 @@ static int show_perf_probe_event(struct perf_probe_event *pev,
return ret;
}
static int __show_perf_probe_events(int fd, bool is_kprobe)
static bool filter_probe_trace_event(struct probe_trace_event *tev,
struct strfilter *filter)
{
char tmp[128];
/* At first, check the event name itself */
if (strfilter__compare(filter, tev->event))
return true;
/* Next, check the combination of name and group */
if (e_snprintf(tmp, 128, "%s:%s", tev->group, tev->event) < 0)
return false;
return strfilter__compare(filter, tmp);
}
static int __show_perf_probe_events(int fd, bool is_kprobe,
struct strfilter *filter)
{
int ret = 0;
struct probe_trace_event tev;
@ -2147,12 +2180,15 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
strlist__for_each(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
if (ret >= 0) {
if (!filter_probe_trace_event(&tev, filter))
goto next;
ret = convert_to_perf_probe_event(&tev, &pev,
is_kprobe);
if (ret >= 0)
ret = show_perf_probe_event(&pev,
tev.point.module);
}
next:
clear_perf_probe_event(&pev);
clear_probe_trace_event(&tev);
if (ret < 0)
@ -2164,7 +2200,7 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
}
/* List up current perf-probe events */
int show_perf_probe_events(void)
int show_perf_probe_events(struct strfilter *filter)
{
int kp_fd, up_fd, ret;
@ -2176,7 +2212,7 @@ int show_perf_probe_events(void)
kp_fd = open_kprobe_events(false);
if (kp_fd >= 0) {
ret = __show_perf_probe_events(kp_fd, true);
ret = __show_perf_probe_events(kp_fd, true, filter);
close(kp_fd);
if (ret < 0)
goto out;
@ -2190,7 +2226,7 @@ int show_perf_probe_events(void)
}
if (up_fd >= 0) {
ret = __show_perf_probe_events(up_fd, false);
ret = __show_perf_probe_events(up_fd, false, filter);
close(up_fd);
}
out:
@ -2265,6 +2301,9 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
{
int i, ret;
if (*base == '.')
base++;
/* Try no suffix */
ret = e_snprintf(buf, len, "%s", base);
if (ret < 0) {
@ -2447,6 +2486,10 @@ static int find_probe_functions(struct map *map, char *name)
#define strdup_or_goto(str, label) \
({ char *__p = strdup(str); if (!__p) goto label; __p; })
void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
struct probe_trace_event *tev __maybe_unused,
struct map *map __maybe_unused) { }
/*
* Find probe function addresses from map.
* Return an error or the number of found probe_trace_event
@ -2553,6 +2596,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
strdup_or_goto(pev->args[i].type,
nomem_out);
}
arch__fix_tev_from_maps(pev, tev, map);
}
out:
@ -2567,6 +2611,8 @@ err_out:
goto out;
}
bool __weak arch__prefers_symtab(void) { return false; }
static int convert_to_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs, const char *target)
@ -2582,6 +2628,12 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
}
}
if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
ret = find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
if (ret > 0)
return ret; /* Found in symbol table */
}
/* Convert perf_probe_event with debuginfo */
ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
if (ret != 0)
@ -2682,40 +2734,39 @@ error:
return ret;
}
static int del_trace_probe_event(int fd, const char *buf,
struct strlist *namelist)
static int del_trace_probe_events(int fd, struct strfilter *filter,
struct strlist *namelist)
{
struct str_node *ent, *n;
int ret = -1;
struct str_node *ent;
const char *p;
int ret = -ENOENT;
if (strpbrk(buf, "*?")) { /* Glob-exp */
strlist__for_each_safe(ent, n, namelist)
if (strglobmatch(ent->s, buf)) {
ret = __del_trace_probe_event(fd, ent);
if (ret < 0)
break;
strlist__remove(namelist, ent);
}
} else {
ent = strlist__find(namelist, buf);
if (ent) {
if (!namelist)
return -ENOENT;
strlist__for_each(ent, namelist) {
p = strchr(ent->s, ':');
if ((p && strfilter__compare(filter, p + 1)) ||
strfilter__compare(filter, ent->s)) {
ret = __del_trace_probe_event(fd, ent);
if (ret >= 0)
strlist__remove(namelist, ent);
if (ret < 0)
break;
}
}
return ret;
}
int del_perf_probe_events(struct strlist *dellist)
int del_perf_probe_events(struct strfilter *filter)
{
int ret = -1, ufd = -1, kfd = -1;
char buf[128];
const char *group, *event;
char *p, *str;
struct str_node *ent;
int ret, ret2, ufd = -1, kfd = -1;
struct strlist *namelist = NULL, *unamelist = NULL;
char *str = strfilter__string(filter);
if (!str)
return -EINVAL;
pr_debug("Delete filter: \'%s\'\n", str);
/* Get current event names */
kfd = open_kprobe_events(true);
@ -2728,48 +2779,21 @@ int del_perf_probe_events(struct strlist *dellist)
if (kfd < 0 && ufd < 0) {
print_both_open_warning(kfd, ufd);
ret = kfd;
goto error;
}
if (namelist == NULL && unamelist == NULL)
ret = del_trace_probe_events(kfd, filter, namelist);
if (ret < 0 && ret != -ENOENT)
goto error;
strlist__for_each(ent, dellist) {
str = strdup(ent->s);
if (str == NULL) {
ret = -ENOMEM;
goto error;
}
pr_debug("Parsing: %s\n", str);
p = strchr(str, ':');
if (p) {
group = str;
*p = '\0';
event = p + 1;
} else {
group = "*";
event = str;
}
ret = e_snprintf(buf, 128, "%s:%s", group, event);
if (ret < 0) {
pr_err("Failed to copy event.");
free(str);
goto error;
}
pr_debug("Group: %s, Event: %s\n", group, event);
if (namelist)
ret = del_trace_probe_event(kfd, buf, namelist);
if (unamelist && ret != 0)
ret = del_trace_probe_event(ufd, buf, unamelist);
if (ret != 0)
pr_info("Info: Event \"%s\" does not exist.\n", buf);
free(str);
ret2 = del_trace_probe_events(ufd, filter, unamelist);
if (ret2 < 0 && ret2 != -ENOENT)
ret = ret2;
else if (ret == -ENOENT && ret2 == -ENOENT) {
pr_debug("\"%s\" does not hit any event.\n", str);
/* Note that this is silently ignored */
ret = 0;
}
error:
@ -2782,6 +2806,7 @@ error:
strlist__delete(unamelist);
close(ufd);
}
free(str);
return ret;
}

View File

@ -126,8 +126,8 @@ extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
int max_probe_points, bool force_add);
extern int del_perf_probe_events(struct strlist *dellist);
extern int show_perf_probe_events(void);
extern int del_perf_probe_events(struct strfilter *filter);
extern int show_perf_probe_events(struct strfilter *filter);
extern int show_line_range(struct line_range *lr, const char *module,
bool user);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
@ -135,6 +135,9 @@ extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
struct strfilter *filter, bool externs);
extern int show_available_funcs(const char *module, struct strfilter *filter,
bool user);
bool arch__prefers_symtab(void);
void arch__fix_tev_from_maps(struct perf_probe_event *pev,
struct probe_trace_event *tev, struct map *map);
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024

View File

@ -74,3 +74,10 @@ void *pstack__pop(struct pstack *pstack)
pstack->entries[pstack->top] = NULL;
return ret;
}
void *pstack__peek(struct pstack *pstack)
{
if (pstack->top == 0)
return NULL;
return pstack->entries[pstack->top - 1];
}

View File

@ -10,5 +10,6 @@ bool pstack__empty(const struct pstack *pstack);
void pstack__remove(struct pstack *pstack, void *key);
void pstack__push(struct pstack *pstack, void *key);
void *pstack__pop(struct pstack *pstack);
void *pstack__peek(struct pstack *pstack);
#endif /* _PERF_PSTACK_ */

View File

@ -20,7 +20,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
if (!evlist)
return -ENOMEM;
if (parse_events(evlist, str))
if (parse_events(evlist, str, NULL))
goto out_delete;
evsel = perf_evlist__first(evlist);
@ -119,7 +119,16 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
evsel->attr.comm_exec = 1;
}
if (evlist->nr_entries > 1) {
if (opts->full_auxtrace) {
/*
* Need to be able to synthesize and parse selected events with
* arbitrary sample types, which requires always being able to
* match the id.
*/
use_sample_identifier = perf_can_sample_identifier();
evlist__for_each(evlist, evsel)
perf_evsel__set_sample_id(evsel, use_sample_identifier);
} else if (evlist->nr_entries > 1) {
struct perf_evsel *first = perf_evlist__first(evlist);
evlist__for_each(evlist, evsel) {
@ -207,7 +216,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
if (!temp_evlist)
return false;
err = parse_events(temp_evlist, str);
err = parse_events(temp_evlist, str, NULL);
if (err)
goto out_delete;

View File

@ -15,12 +15,13 @@
#include "cpumap.h"
#include "perf_regs.h"
#include "asm/bug.h"
#include "auxtrace.h"
static int machines__deliver_event(struct machines *machines,
struct perf_evlist *evlist,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool, u64 file_offset);
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
static int perf_session__open(struct perf_session *session)
{
@ -105,8 +106,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
return ret;
}
return machines__deliver_event(&session->machines, session->evlist, event->event,
&sample, session->tool, event->file_offset);
return perf_session__deliver_event(session, event->event, &sample,
session->tool, event->file_offset);
}
struct perf_session *perf_session__new(struct perf_data_file *file,
@ -119,6 +120,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
session->repipe = repipe;
session->tool = tool;
INIT_LIST_HEAD(&session->auxtrace_index);
machines__init(&session->machines);
ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
@ -185,6 +187,8 @@ static void perf_session_env__delete(struct perf_session_env *env)
void perf_session__delete(struct perf_session *session)
{
auxtrace__free(session);
auxtrace_index__free(&session->auxtrace_index);
perf_session__destroy_kernel_maps(session);
perf_session__delete_threads(session);
perf_session_env__delete(&session->header.env);
@ -262,6 +266,49 @@ static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int skipn(int fd, off_t n)
{
char buf[4096];
ssize_t ret;
while (n > 0) {
ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
if (ret <= 0)
return ret;
n -= ret;
}
return 0;
}
static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session
__maybe_unused)
{
dump_printf(": unhandled!\n");
if (perf_data_file__is_pipe(session->file))
skipn(perf_data_file__fd(session->file), event->auxtrace.size);
return event->auxtrace.size;
}
static
int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
@ -278,6 +325,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->exit = process_event_stub;
if (tool->lost == NULL)
tool->lost = perf_event__process_lost;
if (tool->aux == NULL)
tool->aux = perf_event__process_aux;
if (tool->itrace_start == NULL)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@ -298,6 +349,12 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
}
if (tool->id_index == NULL)
tool->id_index = process_id_index_stub;
if (tool->auxtrace_info == NULL)
tool->auxtrace_info = process_event_auxtrace_info_stub;
if (tool->auxtrace == NULL)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
tool->auxtrace_error = process_event_auxtrace_error_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@ -390,6 +447,26 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
swap_sample_id_all(event, &event->read + 1);
}
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
event->aux.aux_offset = bswap_64(event->aux.aux_offset);
event->aux.aux_size = bswap_64(event->aux.aux_size);
event->aux.flags = bswap_64(event->aux.flags);
if (sample_id_all)
swap_sample_id_all(event, &event->aux + 1);
}
static void perf_event__itrace_start_swap(union perf_event *event,
bool sample_id_all)
{
event->itrace_start.pid = bswap_32(event->itrace_start.pid);
event->itrace_start.tid = bswap_32(event->itrace_start.tid);
if (sample_id_all)
swap_sample_id_all(event, &event->itrace_start + 1);
}
static void perf_event__throttle_swap(union perf_event *event,
bool sample_id_all)
{
@ -449,6 +526,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
attr->branch_sample_type = bswap_64(attr->branch_sample_type);
attr->sample_regs_user = bswap_64(attr->sample_regs_user);
attr->sample_stack_user = bswap_32(attr->sample_stack_user);
attr->aux_watermark = bswap_32(attr->aux_watermark);
swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
}
@ -478,6 +556,40 @@ static void perf_event__tracing_data_swap(union perf_event *event,
event->tracing_data.size = bswap_32(event->tracing_data.size);
}
static void perf_event__auxtrace_info_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
size_t size;
event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
size = event->header.size;
size -= (void *)&event->auxtrace_info.priv - (void *)event;
mem_bswap_64(event->auxtrace_info.priv, size);
}
static void perf_event__auxtrace_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace.size = bswap_64(event->auxtrace.size);
event->auxtrace.offset = bswap_64(event->auxtrace.offset);
event->auxtrace.reference = bswap_64(event->auxtrace.reference);
event->auxtrace.idx = bswap_32(event->auxtrace.idx);
event->auxtrace.tid = bswap_32(event->auxtrace.tid);
event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
}
static void perf_event__auxtrace_error_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
}
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
@ -492,11 +604,16 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
[PERF_RECORD_AUX] = perf_event__aux_swap,
[PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
[PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
[PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
[PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@ -938,12 +1055,34 @@ static int machines__deliver_event(struct machines *machines,
return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
return tool->unthrottle(tool, event, sample, machine);
case PERF_RECORD_AUX:
return tool->aux(tool, event, sample, machine);
case PERF_RECORD_ITRACE_START:
return tool->itrace_start(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
}
}
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
int ret;
ret = auxtrace__process_event(session, event, sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
return machines__deliver_event(&session->machines, session->evlist,
event, sample, tool, file_offset);
}
static s64 perf_session__process_user_event(struct perf_session *session,
union perf_event *event,
u64 file_offset)
@ -980,6 +1119,15 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->finished_round(tool, event, oe);
case PERF_RECORD_ID_INDEX:
return tool->id_index(tool, event, session);
case PERF_RECORD_AUXTRACE_INFO:
return tool->auxtrace_info(tool, event, session);
case PERF_RECORD_AUXTRACE:
/* setup for reading amidst mmap */
lseek(fd, file_offset + event->header.size, SEEK_SET);
return tool->auxtrace(tool, event, session);
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
return tool->auxtrace_error(tool, event, session);
default:
return -EINVAL;
}
@ -1096,8 +1244,8 @@ static s64 perf_session__process_event(struct perf_session *session,
return ret;
}
return machines__deliver_event(&session->machines, evlist, event,
&sample, tool, file_offset);
return perf_session__deliver_event(session, event, &sample, tool,
file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@ -1168,6 +1316,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
if (oe->nr_unordered_events != 0)
ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
events_stats__auxtrace_error_warn(stats);
}
volatile int session_done;
@ -1256,10 +1406,14 @@ more:
done:
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
out_err:
free(buf);
perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
}
@ -1402,10 +1556,14 @@ more:
out:
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
out_err:
ui_progress__finish();
perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
session->one_mmap = false;
return err;
}
@ -1488,7 +1646,13 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
size_t ret = fprintf(fp, "Aggregated stats:\n");
size_t ret;
const char *msg = "";
if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
ret = fprintf(fp, "Aggregated stats:%s\n", msg);
ret += events_stats__fprintf(&session->evlist->stats, fp);
return ret;

View File

@ -15,10 +15,16 @@
struct ip_callchain;
struct thread;
struct auxtrace;
struct itrace_synth_opts;
struct perf_session {
struct perf_header header;
struct machines machines;
struct perf_evlist *evlist;
struct auxtrace *auxtrace;
struct itrace_synth_opts *itrace_synth_opts;
struct list_head auxtrace_index;
struct trace_event tevent;
bool repipe;
bool one_mmap;

View File

@ -58,15 +58,16 @@ struct he_stat {
struct hist_entry_diff {
bool computed;
union {
/* PERF_HPP__DELTA */
double period_ratio_delta;
/* PERF_HPP__DELTA */
double period_ratio_delta;
/* PERF_HPP__RATIO */
double period_ratio;
/* PERF_HPP__RATIO */
double period_ratio;
/* HISTC_WEIGHTED_DIFF */
s64 wdiff;
/* HISTC_WEIGHTED_DIFF */
s64 wdiff;
};
};
/**
@ -92,21 +93,28 @@ struct hist_entry {
s32 cpu;
u8 cpumode;
struct hist_entry_diff diff;
/* We are added by hists__add_dummy_entry. */
bool dummy;
/* XXX These two should move to some tree widget lib */
u16 row_offset;
u16 nr_rows;
bool init_have_children;
char level;
u8 filtered;
union {
/*
* Since perf diff only supports the stdio output, TUI
* fields are only accessed from perf report (or perf
* top). So make it an union to reduce memory usage.
*/
struct hist_entry_diff diff;
struct /* for TUI */ {
u16 row_offset;
u16 nr_rows;
bool init_have_children;
bool unfolded;
bool has_children;
};
};
char *srcline;
struct symbol *parent;
unsigned long position;
struct rb_root sorted_chain;
struct branch_info *branch_info;
struct hists *hists;

View File

@ -170,6 +170,46 @@ struct strfilter *strfilter__new(const char *rules, const char **err)
return filter;
}
static int strfilter__append(struct strfilter *filter, bool _or,
const char *rules, const char **err)
{
struct strfilter_node *right, *root;
const char *ep = NULL;
if (!filter || !rules)
return -EINVAL;
right = strfilter_node__new(rules, &ep);
if (!right || *ep != '\0') {
if (err)
*err = ep;
goto error;
}
root = strfilter_node__alloc(_or ? OP_or : OP_and, filter->root, right);
if (!root) {
ep = NULL;
goto error;
}
filter->root = root;
return 0;
error:
strfilter_node__delete(right);
return ep ? -EINVAL : -ENOMEM;
}
int strfilter__or(struct strfilter *filter, const char *rules, const char **err)
{
return strfilter__append(filter, true, rules, err);
}
int strfilter__and(struct strfilter *filter, const char *rules,
const char **err)
{
return strfilter__append(filter, false, rules, err);
}
static bool strfilter_node__compare(struct strfilter_node *node,
const char *str)
{
@ -197,3 +237,70 @@ bool strfilter__compare(struct strfilter *filter, const char *str)
return false;
return strfilter_node__compare(filter->root, str);
}
static int strfilter_node__sprint(struct strfilter_node *node, char *buf);
/* sprint node in parenthesis if needed */
static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf)
{
int len;
int pt = node->r ? 2 : 0; /* don't need to check node->l */
if (buf && pt)
*buf++ = '(';
len = strfilter_node__sprint(node, buf);
if (len < 0)
return len;
if (buf && pt)
*(buf + len) = ')';
return len + pt;
}
static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
{
int len = 0, rlen;
if (!node || !node->p)
return -EINVAL;
switch (*node->p) {
case '|':
case '&':
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
case '!':
if (buf) {
*(buf + len++) = *node->p;
buf += len;
} else
len++;
rlen = strfilter_node__sprint_pt(node->r, buf);
if (rlen < 0)
return rlen;
len += rlen;
break;
default:
len = strlen(node->p);
if (buf)
strcpy(buf, node->p);
}
return len;
}
char *strfilter__string(struct strfilter *filter)
{
int len;
char *ret = NULL;
len = strfilter_node__sprint(filter->root, NULL);
if (len < 0)
return NULL;
ret = malloc(len + 1);
if (ret)
strfilter_node__sprint(filter->root, ret);
return ret;
}

View File

@ -28,6 +28,32 @@ struct strfilter {
*/
struct strfilter *strfilter__new(const char *rules, const char **err);
/**
* strfilter__or - Append an additional rule by logical-or
* @filter: Original string filter
* @rules: Filter rule to be appended at left of the root of
* @filter by using logical-or.
* @err: Pointer which points an error detected on @rules
*
* Parse @rules and join it to the @filter by using logical-or.
* Return 0 if success, or return the error code.
*/
int strfilter__or(struct strfilter *filter,
const char *rules, const char **err);
/**
* strfilter__add - Append an additional rule by logical-and
* @filter: Original string filter
* @rules: Filter rule to be appended at left of the root of
* @filter by using logical-and.
* @err: Pointer which points an error detected on @rules
*
* Parse @rules and join it to the @filter by using logical-and.
* Return 0 if success, or return the error code.
*/
int strfilter__and(struct strfilter *filter,
const char *rules, const char **err);
/**
* strfilter__compare - compare given string and a string filter
* @filter: String filter
@ -45,4 +71,13 @@ bool strfilter__compare(struct strfilter *filter, const char *str);
*/
void strfilter__delete(struct strfilter *filter);
/**
* strfilter__string - Reconstruct a rule string from filter
* @filter: String filter to reconstruct
*
* Reconstruct a rule string from @filter. This will be good for
* debug messages. Note that returning string must be freed afterward.
*/
char *strfilter__string(struct strfilter *filter);
#endif

View File

@ -630,6 +630,11 @@ void symsrc__destroy(struct symsrc *ss)
close(ss->fd);
}
bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
}
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
enum dso_binary_type type)
{
@ -678,6 +683,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
}
if (!dso__build_id_equal(dso, build_id)) {
pr_debug("%s: build id mismatch for %s.\n", __func__, name);
dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
goto out_elf_end;
}
@ -711,8 +717,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
".gnu.prelink_undo",
NULL) != NULL);
} else {
ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
ehdr.e_type == ET_REL;
ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
}
ss->name = strdup(name);
@ -771,6 +776,8 @@ static bool want_demangle(bool is_kernel_sym)
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
}
void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss,
symbol_filter_t filter, int kmodule)
@ -935,6 +942,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
(sym.st_value & 1))
--sym.st_value;
arch__elf_sym_adjust(&sym);
if (dso->kernel || kmodule) {
char dso_name[PATH_MAX];

View File

@ -85,8 +85,17 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
#define SYMBOL_A 0
#define SYMBOL_B 1
int __weak arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
/* Avoid "SyS" kernel syscall aliases */
if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
return SYMBOL_B;
if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B;
return SYMBOL_A;
}
static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
@ -134,13 +143,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if (na < nb)
return SYMBOL_B;
/* Avoid "SyS" kernel syscall aliases */
if (na >= 3 && !strncmp(syma->name, "SyS", 3))
return SYMBOL_B;
if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B;
return SYMBOL_A;
return arch__choose_best_symbol(syma, symb);
}
void symbols__fixup_duplicate(struct rb_root *symbols)
@ -408,7 +411,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
int cmp;
s = rb_entry(n, struct symbol_name_rb_node, rb_node);
cmp = strcmp(name, s->sym.name);
cmp = arch__compare_symbol_names(name, s->sym.name);
if (cmp < 0)
n = n->rb_left;
@ -426,7 +429,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
struct symbol_name_rb_node *tmp;
tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
if (strcmp(tmp->sym.name, s->sym.name))
if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
break;
s = tmp;

View File

@ -158,8 +158,6 @@ struct ref_reloc_sym {
struct map_symbol {
struct map *map;
struct symbol *sym;
bool unfolded;
bool has_children;
};
struct addr_map_symbol {
@ -303,4 +301,14 @@ int setup_list(struct strlist **list, const char *list_str,
int setup_intlist(struct intlist **list, const char *list_str,
const char *list_name);
#ifdef HAVE_LIBELF_SUPPORT
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
void arch__elf_sym_adjust(GElf_Sym *sym);
#endif
#define SYMBOL_A 0
#define SYMBOL_B 1
int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb);
#endif /* __PERF_SYMBOL */

View File

@ -3,6 +3,8 @@
#include <stdbool.h>
#include <linux/types.h>
struct perf_session;
union perf_event;
struct perf_evlist;
@ -29,6 +31,9 @@ typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
struct ordered_events *oe);
typedef s64 (*event_op3)(struct perf_tool *tool, union perf_event *event,
struct perf_session *session);
struct perf_tool {
event_sample sample,
read;
@ -38,13 +43,18 @@ struct perf_tool {
fork,
exit,
lost,
aux,
itrace_start,
throttle,
unthrottle;
event_attr_op attr;
event_op2 tracing_data;
event_oe finished_round;
event_op2 build_id,
id_index;
id_index,
auxtrace_info,
auxtrace_error;
event_op3 auxtrace;
bool ordered_events;
bool ordering_requires_timestamps;
};