1
0
Fork 0

perf/core improvements and fixes:

BPF:
 
   Song Liu:
 
   - Add support for annotating BPF programs, using the PERF_RECORD_BPF_EVENT
     and PERF_RECORD_KSYMBOL recently added to the kernel and plugging
     binutils's libopcodes disassembly of BPF programs with the existing
     annotation interfaces in 'perf annotate', 'perf report' and 'perf top'
     various output formats (--stdio, --stdio2, --tui).
 
 perf list:
 
   Andi Kleen:
 
   - Filter metrics when using substring search.
 
 perf record:
 
   Andi Kleen:
 
   - Allow to limit number of reported perf.data files
 
   - Clarify help for --switch-output.
 
 perf report:
 
   Andi Kleen
 
   - Indicate JITed code better.
 
   - Show all sort keys in help output.
 
 perf script:
 
   Andi Kleen:
 
   - Support relative time.
 
 perf stat:
 
   Andi Kleen:
 
   - Improve scaling.
 
 General:
 
   Changbin Du:
 
   - Fix some mostly error path memory and reference count leaks found
     using gcc's ASan and UBSan.
 
 Vendor events:
 
   Mamatha Inamdar:
 
   - Remove P8 HW events which are not supported.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXJOmigAKCRCyPKLppCJ+
 J+EPAQDNzH1M3uJ6cOhyzAMowpsl0Dgs0Q+5iNlOnDYVr2RfhgEA2Sr2fQyl/qiG
 h6jRbzvdE+PTXbcMNO79ajmufAHdLgQ=
 =DuTU
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-5.1-20190321' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/core improvements and fixes from Arnaldo:

BPF:

  Song Liu:

  - Add support for annotating BPF programs, using the PERF_RECORD_BPF_EVENT
    and PERF_RECORD_KSYMBOL recently added to the kernel and plugging
    binutils's libopcodes disassembly of BPF programs with the existing
    annotation interfaces in 'perf annotate', 'perf report' and 'perf top'
    various output formats (--stdio, --stdio2, --tui).

perf list:

  Andi Kleen:

  - Filter metrics when using substring search.

perf record:

  Andi Kleen:

  - Allow to limit number of reported perf.data files

  - Clarify help for --switch-output.

perf report:

  Andi Kleen

  - Indicate JITed code better.

  - Show all sort keys in help output.

perf script:

  Andi Kleen:

  - Support relative time.

perf stat:

  Andi Kleen:

  - Improve scaling.

General:

  Changbin Du:

  - Fix some mostly error path memory and reference count leaks found
    using gcc's ASan and UBSan.

Vendor events:

  Mamatha Inamdar:

  - Remove P8 HW events which are not supported.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hifive-unleashed-5.1
Thomas Gleixner 2019-03-22 22:51:21 +01:00
commit d8b5297f6d
78 changed files with 1797 additions and 1031 deletions

View File

@ -401,41 +401,31 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_linfo *prog_linfo = NULL;
unsigned long *func_ksyms = NULL;
struct bpf_prog_info info = {};
unsigned int *func_lens = NULL;
enum {DUMP_JITED, DUMP_XLATED} mode;
const char *disasm_opt = NULL;
unsigned int nr_func_ksyms;
unsigned int nr_func_lens;
struct bpf_prog_info *info;
struct dump_data dd = {};
__u32 len = sizeof(info);
void *func_info = NULL;
struct btf *btf = NULL;
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
char func_sig[1024];
unsigned char *buf;
bool linum = false;
__u32 *member_len;
__u64 *member_ptr;
__u32 member_len;
__u64 arrays;
ssize_t n;
int err;
int fd;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
return -1;
member_len = &info.jited_prog_len;
member_ptr = &info.jited_prog_insns;
mode = DUMP_JITED;
} else if (is_prefix(*argv, "xlated")) {
member_len = &info.xlated_prog_len;
member_ptr = &info.xlated_prog_insns;
mode = DUMP_XLATED;
} else {
p_err("expected 'xlated' or 'jited', got: %s", *argv);
return -1;
@ -474,175 +464,50 @@ static int do_dump(int argc, char **argv)
return -1;
}
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
return -1;
}
if (mode == DUMP_JITED)
arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
else
arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
if (!*member_len) {
p_info("no instructions returned");
close(fd);
return 0;
}
arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
buf_size = *member_len;
buf = malloc(buf_size);
if (!buf) {
p_err("mem alloc failed");
close(fd);
return -1;
}
nr_func_ksyms = info.nr_jited_ksyms;
if (nr_func_ksyms) {
func_ksyms = malloc(nr_func_ksyms * sizeof(__u64));
if (!func_ksyms) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
nr_func_lens = info.nr_jited_func_lens;
if (nr_func_lens) {
func_lens = malloc(nr_func_lens * sizeof(__u32));
if (!func_lens) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
nr_finfo = info.nr_func_info;
finfo_rec_size = info.func_info_rec_size;
if (nr_finfo && finfo_rec_size) {
func_info = malloc(nr_finfo * finfo_rec_size);
if (!func_info) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
linfo_rec_size = info.line_info_rec_size;
if (info.nr_line_info && linfo_rec_size && info.btf_id) {
nr_linfo = info.nr_line_info;
linfo = malloc(nr_linfo * linfo_rec_size);
if (!linfo) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
jited_linfo_rec_size = info.jited_line_info_rec_size;
if (info.nr_jited_line_info &&
jited_linfo_rec_size &&
info.nr_jited_ksyms &&
info.nr_jited_func_lens &&
info.btf_id) {
nr_jited_linfo = info.nr_jited_line_info;
jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
if (!jited_linfo) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
memset(&info, 0, sizeof(info));
*member_ptr = ptr_to_u64(buf);
*member_len = buf_size;
info.jited_ksyms = ptr_to_u64(func_ksyms);
info.nr_jited_ksyms = nr_func_ksyms;
info.jited_func_lens = ptr_to_u64(func_lens);
info.nr_jited_func_lens = nr_func_lens;
info.nr_func_info = nr_finfo;
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(func_info);
info.nr_line_info = nr_linfo;
info.line_info_rec_size = linfo_rec_size;
info.line_info = ptr_to_u64(linfo);
info.nr_jited_line_info = nr_jited_linfo;
info.jited_line_info_rec_size = jited_linfo_rec_size;
info.jited_line_info = ptr_to_u64(jited_linfo);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
info_linear = bpf_program__get_prog_info_linear(fd, arrays);
close(fd);
if (err) {
if (IS_ERR_OR_NULL(info_linear)) {
p_err("can't get prog info: %s", strerror(errno));
goto err_free;
return -1;
}
if (*member_len > buf_size) {
p_err("too many instructions returned");
goto err_free;
info = &info_linear->info;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0) {
p_info("no instructions returned");
goto err_free;
}
buf = (unsigned char *)(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}
buf = (unsigned char *)info->xlated_prog_insns;
member_len = info->xlated_prog_len;
}
if (info.nr_jited_ksyms > nr_func_ksyms) {
p_err("too many addresses returned");
goto err_free;
}
if (info.nr_jited_func_lens > nr_func_lens) {
p_err("too many values returned");
goto err_free;
}
if (info.nr_func_info != nr_finfo) {
p_err("incorrect nr_func_info %d vs. expected %d",
info.nr_func_info, nr_finfo);
goto err_free;
}
if (info.func_info_rec_size != finfo_rec_size) {
p_err("incorrect func_info_rec_size %d vs. expected %d",
info.func_info_rec_size, finfo_rec_size);
goto err_free;
}
if (linfo && info.nr_line_info != nr_linfo) {
p_err("incorrect nr_line_info %u vs. expected %u",
info.nr_line_info, nr_linfo);
goto err_free;
}
if (info.line_info_rec_size != linfo_rec_size) {
p_err("incorrect line_info_rec_size %u vs. expected %u",
info.line_info_rec_size, linfo_rec_size);
goto err_free;
}
if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
p_err("incorrect nr_jited_line_info %u vs. expected %u",
info.nr_jited_line_info, nr_jited_linfo);
goto err_free;
}
if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
info.jited_line_info_rec_size, jited_linfo_rec_size);
goto err_free;
}
if ((member_len == &info.jited_prog_len &&
info.jited_prog_insns == 0) ||
(member_len == &info.xlated_prog_len &&
info.xlated_prog_insns == 0)) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}
if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
p_err("failed to get btf");
goto err_free;
}
if (nr_linfo) {
prog_linfo = bpf_prog_linfo__new(&info);
func_info = (void *)info->func_info;
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
if (!prog_linfo)
p_info("error in processing bpf_line_info. continue without it.");
}
@ -655,9 +520,9 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
n = write(fd, buf, *member_len);
n = write(fd, buf, member_len);
close(fd);
if (n != *member_len) {
if (n != member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
goto err_free;
@ -665,19 +530,19 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_null(json_wtr);
} else if (member_len == &info.jited_prog_len) {
} else if (mode == DUMP_JITED) {
const char *name = NULL;
if (info.ifindex) {
name = ifindex_to_bfd_params(info.ifindex,
info.netns_dev,
info.netns_ino,
if (info->ifindex) {
name = ifindex_to_bfd_params(info->ifindex,
info->netns_dev,
info->netns_ino,
&disasm_opt);
if (!name)
goto err_free;
}
if (info.nr_jited_func_lens && info.jited_func_lens) {
if (info->nr_jited_func_lens && info->jited_func_lens) {
struct kernel_sym *sym = NULL;
struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
@ -685,17 +550,16 @@ static int do_dump(int argc, char **argv)
__u64 *ksyms = NULL;
__u32 *lens;
__u32 i;
if (info.nr_jited_ksyms) {
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
ksyms = (__u64 *) info.jited_ksyms;
ksyms = (__u64 *) info->jited_ksyms;
}
if (json_output)
jsonw_start_array(json_wtr);
lens = (__u32 *) info.jited_func_lens;
for (i = 0; i < info.nr_jited_func_lens; i++) {
lens = (__u32 *) info->jited_func_lens;
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
if (sym)
@ -707,7 +571,7 @@ static int do_dump(int argc, char **argv)
}
if (func_info) {
record = func_info + i * finfo_rec_size;
record = func_info + i * info->func_info_rec_size;
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
@ -744,49 +608,37 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
} else {
disasm_print_insn(buf, *member_len, opcodes, name,
disasm_print_insn(buf, member_len, opcodes, name,
disasm_opt, btf, NULL, 0, 0, false);
}
} else if (visual) {
if (json_output)
jsonw_null(json_wtr);
else
dump_xlated_cfg(buf, *member_len);
dump_xlated_cfg(buf, member_len);
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info.nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info.jited_ksyms;
dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = finfo_rec_size;
dd.finfo_rec_size = info->func_info_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
dump_xlated_json(&dd, buf, *member_len, opcodes,
dump_xlated_json(&dd, buf, member_len, opcodes,
linum);
else
dump_xlated_plain(&dd, buf, *member_len, opcodes,
dump_xlated_plain(&dd, buf, member_len, opcodes,
linum);
kernel_syms_destroy(&dd);
}
free(buf);
free(func_ksyms);
free(func_lens);
free(func_info);
free(linfo);
free(jited_linfo);
bpf_prog_linfo__free(prog_linfo);
free(info_linear);
return 0;
err_free:
free(buf);
free(func_ksyms);
free(func_lens);
free(func_info);
free(linfo);
free(jited_linfo);
bpf_prog_linfo__free(prog_linfo);
free(info_linear);
return -1;
}

View File

@ -66,7 +66,8 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
libaio
libaio \
disassembler-four-args
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
@ -118,7 +119,8 @@ FEATURE_DISPLAY ?= \
lzma \
get_cpuid \
bpf \
libaio
libaio \
disassembler-four-args
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not

View File

@ -178,6 +178,10 @@
# include "test-reallocarray.c"
#undef main
#define main main_test_disassembler_four_args
# include "test-disassembler-four-args.c"
#undef main
int main(int argc, char *argv[])
{
main_test_libpython();
@ -219,6 +223,7 @@ int main(int argc, char *argv[])
main_test_setns();
main_test_libaio();
main_test_reallocarray();
main_test_disassembler_four_args();
return 0;
}

View File

@ -112,6 +112,11 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
struct bpf_capabilities {
/* v4.14: kernel support for program & map names. */
__u32 name:1;
@ -622,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
bool strict = !(flags & MAPS_RELAX_COMPAT);
int i, map_idx, map_def_sz, nr_maps = 0;
Elf_Scn *scn;
Elf_Data *data;
Elf_Data *data = NULL;
Elf_Data *symbols = obj->efile.symbols;
if (obj->efile.maps_shndx < 0)
@ -2999,3 +3004,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
ring_buffer_write_tail(header, data_tail);
return ret;
}
struct bpf_prog_info_array_desc {
int array_offset; /* e.g. offset of jited_prog_insns */
int count_offset; /* e.g. offset of jited_prog_len */
int size_offset; /* > 0: offset of rec size,
* < 0: fix size of -size_offset
*/
};
static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
[BPF_PROG_INFO_JITED_INSNS] = {
offsetof(struct bpf_prog_info, jited_prog_insns),
offsetof(struct bpf_prog_info, jited_prog_len),
-1,
},
[BPF_PROG_INFO_XLATED_INSNS] = {
offsetof(struct bpf_prog_info, xlated_prog_insns),
offsetof(struct bpf_prog_info, xlated_prog_len),
-1,
},
[BPF_PROG_INFO_MAP_IDS] = {
offsetof(struct bpf_prog_info, map_ids),
offsetof(struct bpf_prog_info, nr_map_ids),
-(int)sizeof(__u32),
},
[BPF_PROG_INFO_JITED_KSYMS] = {
offsetof(struct bpf_prog_info, jited_ksyms),
offsetof(struct bpf_prog_info, nr_jited_ksyms),
-(int)sizeof(__u64),
},
[BPF_PROG_INFO_JITED_FUNC_LENS] = {
offsetof(struct bpf_prog_info, jited_func_lens),
offsetof(struct bpf_prog_info, nr_jited_func_lens),
-(int)sizeof(__u32),
},
[BPF_PROG_INFO_FUNC_INFO] = {
offsetof(struct bpf_prog_info, func_info),
offsetof(struct bpf_prog_info, nr_func_info),
offsetof(struct bpf_prog_info, func_info_rec_size),
},
[BPF_PROG_INFO_LINE_INFO] = {
offsetof(struct bpf_prog_info, line_info),
offsetof(struct bpf_prog_info, nr_line_info),
offsetof(struct bpf_prog_info, line_info_rec_size),
},
[BPF_PROG_INFO_JITED_LINE_INFO] = {
offsetof(struct bpf_prog_info, jited_line_info),
offsetof(struct bpf_prog_info, nr_jited_line_info),
offsetof(struct bpf_prog_info, jited_line_info_rec_size),
},
[BPF_PROG_INFO_PROG_TAGS] = {
offsetof(struct bpf_prog_info, prog_tags),
offsetof(struct bpf_prog_info, nr_prog_tags),
-(int)sizeof(__u8) * BPF_TAG_SIZE,
},
};
static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
{
__u32 *array = (__u32 *)info;
if (offset >= 0)
return array[offset / sizeof(__u32)];
return -(int)offset;
}
static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
{
__u64 *array = (__u64 *)info;
if (offset >= 0)
return array[offset / sizeof(__u64)];
return -(int)offset;
}
static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
__u32 val)
{
__u32 *array = (__u32 *)info;
if (offset >= 0)
array[offset / sizeof(__u32)] = val;
}
static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
__u64 val)
{
__u64 *array = (__u64 *)info;
if (offset >= 0)
array[offset / sizeof(__u64)] = val;
}
struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 data_len = 0;
int i, err;
void *ptr;
if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
return ERR_PTR(-EINVAL);
/* step 1: get array dimensions */
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
return ERR_PTR(-EFAULT);
}
/* step 2: calculate total size of all arrays */
for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
bool include_array = (arrays & (1UL << i)) > 0;
struct bpf_prog_info_array_desc *desc;
__u32 count, size;
desc = bpf_prog_info_array_desc + i;
/* kernel is too old to support this field */
if (info_len < desc->array_offset + sizeof(__u32) ||
info_len < desc->count_offset + sizeof(__u32) ||
(desc->size_offset > 0 && info_len < desc->size_offset))
include_array = false;
if (!include_array) {
arrays &= ~(1UL << i); /* clear the bit */
continue;
}
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
data_len += count * size;
}
/* step 3: allocate continuous memory */
data_len = roundup(data_len, sizeof(__u64));
info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
if (!info_linear)
return ERR_PTR(-ENOMEM);
/* step 4: fill data to info_linear->info */
info_linear->arrays = arrays;
memset(&info_linear->info, 0, sizeof(info));
ptr = info_linear->data;
for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
struct bpf_prog_info_array_desc *desc;
__u32 count, size;
if ((arrays & (1UL << i)) == 0)
continue;
desc = bpf_prog_info_array_desc + i;
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->count_offset, count);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->size_offset, size);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset,
ptr_to_u64(ptr));
ptr += count * size;
}
/* step 5: call syscall again to get required arrays */
err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
if (err) {
pr_debug("can't get prog info: %s", strerror(errno));
free(info_linear);
return ERR_PTR(-EFAULT);
}
/* step 6: verify the data */
for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
struct bpf_prog_info_array_desc *desc;
__u32 v1, v2;
if ((arrays & (1UL << i)) == 0)
continue;
desc = bpf_prog_info_array_desc + i;
v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->count_offset);
if (v1 != v2)
pr_warning("%s: mismatch in element count\n", __func__);
v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->size_offset);
if (v1 != v2)
pr_warning("%s: mismatch in rec size\n", __func__);
}
/* step 7: update info_len and data_len */
info_linear->info_len = sizeof(struct bpf_prog_info);
info_linear->data_len = data_len;
return info_linear;
}
void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
{
int i;
for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
struct bpf_prog_info_array_desc *desc;
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
desc = bpf_prog_info_array_desc + i;
addr = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
offs = addr - ptr_to_u64(info_linear->data);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset, offs);
}
}
void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
{
int i;
for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
struct bpf_prog_info_array_desc *desc;
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
desc = bpf_prog_info_array_desc + i;
offs = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
addr = offs + ptr_to_u64(info_linear->data);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset, addr);
}
}

View File

@ -378,6 +378,69 @@ LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
enum bpf_prog_type prog_type, __u32 ifindex);
/*
* Get bpf_prog_info in continuous memory
*
* struct bpf_prog_info has multiple arrays. The user has option to choose
* arrays to fetch from kernel. The following APIs provide an uniform way to
* fetch these data. All arrays in bpf_prog_info are stored in a single
* continuous memory region. This makes it easy to store the info in a
* file.
*
* Before writing bpf_prog_info_linear to files, it is necessary to
* translate pointers in bpf_prog_info to offsets. Helper functions
* bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
* are introduced to switch between pointers and offsets.
*
* Examples:
* # To fetch map_ids and prog_tags:
* __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
* (1UL << BPF_PROG_INFO_PROG_TAGS);
* struct bpf_prog_info_linear *info_linear =
* bpf_program__get_prog_info_linear(fd, arrays);
*
* # To save data in file
* bpf_program__bpil_addr_to_offs(info_linear);
* write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
*
* # To read data from file
* read(f, info_linear, <proper_size>);
* bpf_program__bpil_offs_to_addr(info_linear);
*/
enum bpf_prog_info_array {
BPF_PROG_INFO_FIRST_ARRAY = 0,
BPF_PROG_INFO_JITED_INSNS = 0,
BPF_PROG_INFO_XLATED_INSNS,
BPF_PROG_INFO_MAP_IDS,
BPF_PROG_INFO_JITED_KSYMS,
BPF_PROG_INFO_JITED_FUNC_LENS,
BPF_PROG_INFO_FUNC_INFO,
BPF_PROG_INFO_LINE_INFO,
BPF_PROG_INFO_JITED_LINE_INFO,
BPF_PROG_INFO_PROG_TAGS,
BPF_PROG_INFO_LAST_ARRAY,
};
struct bpf_prog_info_linear {
/* size of struct bpf_prog_info, when the tool is compiled */
__u32 info_len;
/* total bytes allocated for data, round up to 8 bytes */
__u32 data_len;
/* which arrays are included in data */
__u64 arrays;
struct bpf_prog_info info;
__u8 data[];
};
LIBBPF_API struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
LIBBPF_API void
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@ -153,4 +153,7 @@ LIBBPF_0.0.2 {
xsk_socket__delete;
xsk_umem__fd;
xsk_socket__fd;
bpf_program__get_prog_info_linear;
bpf_program__bpil_addr_to_offs;
bpf_program__bpil_offs_to_addr;
} LIBBPF_0.0.1;

View File

@ -47,3 +47,27 @@ Those objects are then used in final linking:
NOTE this description is omitting other libraries involved, only
focusing on build framework outcomes
3) Build with ASan or UBSan
==========================
$ cd tools/perf
$ make DESTDIR=/usr
$ make DESTDIR=/usr install
AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
such as buffer overflows and memory leaks.
$ cd tools/perf
$ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
$ ASAN_OPTIONS=log_path=asan.log ./perf record -a
ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
supported by GCC. UBSan detects undefined behaviors of programs at runtime.
$ cd tools/perf
$ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
$ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
If UBSan detects any problem at runtime, it outputs a “runtime error:” message.

View File

@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
[report]
# Defaults
sort-order = comm,dso,symbol
sort_order = comm,dso,symbol
percent-limit = 0
queue-size = 0
children = true

View File

@ -495,6 +495,10 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
--switch-max-files=N::
When rotating perf.data with --switch-output, only keep N files.
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
options.

View File

@ -380,6 +380,9 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
--reltime::
Print time stamps relative to trace start.
--per-event-dump::
Create per event files with a "perf.data.EVENT.dump" name instead of
printing to stdout, useful, for instance, for generating flamegraphs.

View File

@ -72,9 +72,8 @@ report::
--all-cpus::
system-wide collection from all CPUs (default if no target is specified)
-c::
--scale::
scale/normalize counter values
--no-scale::
Don't scale/normalize counter values
-d::
--detailed::

View File

@ -227,6 +227,8 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@ -713,7 +715,7 @@ else
endif
ifeq ($(feature-libbfd), 1)
EXTLIBS += -lbfd
EXTLIBS += -lbfd -lopcodes
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
@ -724,12 +726,15 @@ else
$(call feature_check,libbfd-liberty-z)
ifeq ($(feature-libbfd-liberty), 1)
EXTLIBS += -lbfd -liberty
EXTLIBS += -lbfd -lopcodes -liberty
FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
EXTLIBS += -lbfd -liberty -lz
EXTLIBS += -lbfd -lopcodes -liberty -lz
FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
endif
endif
$(call feature_check,disassembler-four-args)
endif
ifdef NO_DEMANGLE
@ -808,6 +813,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
CFLAGS += -DHAVE_KVM_STAT_SUPPORT
endif
ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)

View File

@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
int ret;
int ret = 0;
if (!noaffinity)
pthread_attr_init(&thread_attr);

View File

@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
int ret, events = EPOLLIN;
int ret = 0, events = EPOLLIN;
if (oneshot)
events |= EPOLLONESHOT;

View File

@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
metricgroup__print(true, true, NULL, raw_dump, details_flag);
metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
}
}

View File

@ -62,6 +62,9 @@ struct switch_output {
unsigned long time;
const char *str;
bool set;
char **filenames;
int num_files;
int cur_file;
};
struct record {
@ -892,6 +895,7 @@ record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
int fd, err;
char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@ -912,7 +916,7 @@ record__switch_output(struct record *rec, bool at_exit)
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
at_exit);
at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
@ -922,6 +926,21 @@ record__switch_output(struct record *rec, bool at_exit)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
data->path, timestamp);
if (rec->switch_output.num_files) {
int n = rec->switch_output.cur_file + 1;
if (n >= rec->switch_output.num_files)
n = 0;
rec->switch_output.cur_file = n;
if (rec->switch_output.filenames[n]) {
remove(rec->switch_output.filenames[n]);
free(rec->switch_output.filenames[n]);
}
rec->switch_output.filenames[n] = new_filename;
} else {
free(new_filename);
}
/* Output tracking events */
if (!at_exit) {
record__synthesize(rec, false);
@ -1095,7 +1114,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
machine, opts);
if (err < 0)
pr_warning("Couldn't synthesize bpf events.\n");
@ -1118,6 +1137,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
struct perf_evlist *sb_evlist = NULL;
int fd;
atexit(record__sig_exit);
@ -1218,6 +1238,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
if (!opts->no_bpf_event)
bpf_event__add_sb_event(&sb_evlist, &session->header.env);
if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
err = record__synthesize(rec, false);
if (err < 0)
goto out_child;
@ -1468,6 +1496,9 @@ out_child:
out_delete_session:
perf_session__delete(session);
if (!opts->no_bpf_event)
perf_evlist__stop_sb_thread(sb_evlist);
return status;
}
@ -1872,7 +1903,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@ -1970,9 +2001,11 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
&record.switch_output.set, "signal,size,time",
"Switch output when receive SIGUSR2 or cross size,time threshold",
&record.switch_output.set, "signal or size[BKMG] or time[smhd]",
"Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
"Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
@ -2059,6 +2092,13 @@ int cmd_record(int argc, const char **argv)
alarm(rec->switch_output.time);
}
if (rec->switch_output.num_files) {
rec->switch_output.filenames = calloc(sizeof(char *),
rec->switch_output.num_files);
if (!rec->switch_output.filenames)
return -EINVAL;
}
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().

View File

@ -1083,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
" Please refer the man page for the complete list."),
sort_help("sort by key(s):")),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
"output field(s): overhead, period, sample plus all of sort keys"),
sort_help("output field(s): overhead period sample ")),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,

View File

@ -53,6 +53,8 @@
static char const *script_name;
static char const *generate_script_lang;
static bool reltime;
static u64 initial_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
@ -686,7 +688,13 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
}
if (PRINT_FIELD(TIME)) {
nsecs = sample->time;
u64 t = sample->time;
if (reltime) {
if (!initial_time)
initial_time = sample->time;
t = sample->time - initial_time;
}
nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
@ -694,7 +702,7 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
printed += fprintf(fp, "%12s: ", sample_time);
}
}
@ -3413,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@ -3487,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
}
}
if (script.time_str && reltime) {
fprintf(stderr, "Don't combine --reltime with --time\n");
return -1;
}
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;

View File

@ -718,7 +718,8 @@ static struct option stat_options[] = {
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
OPT_BOOLEAN(0, "scale", &stat_config.scale,
"Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &stat_config.run_count,

View File

@ -1189,30 +1189,26 @@ static int __cmd_top(struct perf_top *top)
pthread_t thread, thread_process;
int ret;
top->session = perf_session__new(NULL, false, NULL);
if (top->session == NULL)
return -1;
if (!top->annotation_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
&top->annotation_opts.objdump_path);
if (ret)
goto out_delete;
return ret;
}
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
goto out_delete;
return ret;
if (perf_session__register_idle_thread(top->session) < 0)
goto out_delete;
return ret;
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
init_process_thread(top);
ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
&top->session->machines.host,
&top->record_opts);
if (ret < 0)
@ -1227,13 +1223,18 @@ static int __cmd_top(struct perf_top *top)
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
if (ret < 0)
goto out_err_cpu_topo;
if (ret < 0) {
char errbuf[BUFSIZ];
const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
ui__error("Could not read the CPU topology map: %s\n", err);
return ret;
}
}
ret = perf_top__start_counters(top);
if (ret)
goto out_delete;
return ret;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
@ -1252,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
ui__error("Could not create process thread.\n");
goto out_delete;
return ret;
}
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@ -1296,19 +1297,7 @@ out_join:
out_join_thread:
pthread_cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
out_delete:
perf_session__delete(top->session);
top->session = NULL;
return ret;
out_err_cpu_topo: {
char errbuf[BUFSIZ];
const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
ui__error("Could not read the CPU topology map: %s\n", err);
goto out_delete;
}
}
static int
@ -1480,6 +1469,7 @@ int cmd_top(int argc, const char **argv)
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@ -1511,6 +1501,7 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_END()
};
struct perf_evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@ -1628,8 +1619,9 @@ int cmd_top(int argc, const char **argv)
annotation_config__init();
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init(NULL) < 0)
return -1;
status = symbol__init(NULL);
if (status < 0)
goto out_delete_evlist;
sort__setup_elide(stdout);
@ -1639,10 +1631,28 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
top.session = perf_session__new(NULL, false, NULL);
if (top.session == NULL) {
status = -1;
goto out_delete_evlist;
}
if (!top.record_opts.no_bpf_event)
bpf_event__add_sb_event(&sb_evlist, &perf_env);
if (perf_evlist__start_sb_thread(sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
status = __cmd_top(&top);
if (!opts->no_bpf_event)
perf_evlist__stop_sb_thread(sb_evlist);
out_delete_evlist:
perf_evlist__delete(top.evlist);
perf_session__delete(top.session);
return status;
}

View File

@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
perf_env__init(&perf_env);
perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();

View File

@ -66,7 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
bool bpf_event;
bool no_bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;

View File

@ -347,18 +347,6 @@
"BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
"PublicDescription": ""
},
{,
"EventCode": "0x517082",
"EventName": "PM_CO_DISP_FAIL",
"BriefDescription": "CO dispatch failed due to all CO machines being busy",
"PublicDescription": ""
},
{,
"EventCode": "0x527084",
"EventName": "PM_CO_TM_SC_FOOTPRINT",
"BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
"PublicDescription": ""
},
{,
"EventCode": "0x3608a",
"EventName": "PM_CO_USAGE",
@ -1577,36 +1565,12 @@
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request",
"PublicDescription": ""
},
{,
"EventCode": "0x617082",
"EventName": "PM_ISIDE_DISP",
"BriefDescription": "All i-side dispatch attempts",
"PublicDescription": ""
},
{,
"EventCode": "0x627084",
"EventName": "PM_ISIDE_DISP_FAIL",
"BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
"PublicDescription": ""
},
{,
"EventCode": "0x627086",
"EventName": "PM_ISIDE_DISP_FAIL_OTHER",
"BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
"PublicDescription": ""
},
{,
"EventCode": "0x4608e",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
"PublicDescription": ""
},
{,
"EventCode": "0x44608e",
"EventName": "PM_ISIDE_MRU_TOUCH",
"BriefDescription": "Iside L2 MRU touch",
"PublicDescription": ""
},
{,
"EventCode": "0x30ac",
"EventName": "PM_ISU_REF_FX0",
@ -1733,222 +1697,36 @@
"BriefDescription": "Instruction Demand sectors wriittent into IL1",
"PublicDescription": ""
},
{,
"EventCode": "0x417080",
"EventName": "PM_L2_CASTOUT_MOD",
"BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
"PublicDescription": ""
},
{,
"EventCode": "0x417082",
"EventName": "PM_L2_CASTOUT_SHR",
"BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
"PublicDescription": ""
},
{,
"EventCode": "0x27084",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local on chip pump attempts",
"PublicDescription": ""
},
{,
"EventCode": "0x427086",
"EventName": "PM_L2_DC_INV",
"BriefDescription": "Dcache invalidates from L2",
"PublicDescription": ""
},
{,
"EventCode": "0x44608c",
"EventName": "PM_L2_DISP_ALL_L2MISS",
"BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
"PublicDescription": ""
},
{,
"EventCode": "0x27086",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on Node Pump attempts",
"PublicDescription": ""
},
{,
"EventCode": "0x626084",
"EventName": "PM_L2_GRP_GUESS_CORRECT",
"BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
"PublicDescription": ""
},
{,
"EventCode": "0x626086",
"EventName": "PM_L2_GRP_GUESS_WRONG",
"BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
"PublicDescription": ""
},
{,
"EventCode": "0x427084",
"EventName": "PM_L2_IC_INV",
"BriefDescription": "Icache Invalidates from L2",
"PublicDescription": ""
},
{,
"EventCode": "0x436088",
"EventName": "PM_L2_INST",
"BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
"PublicDescription": ""
},
{,
"EventCode": "0x43608a",
"EventName": "PM_L2_INST_MISS",
"BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
"PublicDescription": ""
},
{,
"EventCode": "0x416080",
"EventName": "PM_L2_LD",
"BriefDescription": "All successful D-side Load dispatches for this thread",
"PublicDescription": ""
},
{,
"EventCode": "0x437088",
"EventName": "PM_L2_LD_DISP",
"BriefDescription": "All successful load dispatches",
"PublicDescription": ""
},
{,
"EventCode": "0x43708a",
"EventName": "PM_L2_LD_HIT",
"BriefDescription": "All successful load dispatches that were L2 hits",
"PublicDescription": ""
},
{,
"EventCode": "0x426084",
"EventName": "PM_L2_LD_MISS",
"BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
"PublicDescription": ""
},
{,
"EventCode": "0x616080",
"EventName": "PM_L2_LOC_GUESS_CORRECT",
"BriefDescription": "L2 guess loc and guess was correct (ie data local)",
"PublicDescription": ""
},
{,
"EventCode": "0x616082",
"EventName": "PM_L2_LOC_GUESS_WRONG",
"BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
"PublicDescription": ""
},
{,
"EventCode": "0x516080",
"EventName": "PM_L2_RCLD_DISP",
"BriefDescription": "L2 RC load dispatch attempt",
"PublicDescription": ""
},
{,
"EventCode": "0x516082",
"EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
"BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
"PublicDescription": ""
},
{,
"EventCode": "0x526084",
"EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
"BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
"PublicDescription": ""
},
{,
"EventCode": "0x536088",
"EventName": "PM_L2_RCST_DISP",
"BriefDescription": "L2 RC store dispatch attempt",
"PublicDescription": ""
},
{,
"EventCode": "0x53608a",
"EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
"BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
"PublicDescription": ""
},
{,
"EventCode": "0x54608c",
"EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
"BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
"PublicDescription": ""
},
{,
"EventCode": "0x537088",
"EventName": "PM_L2_RC_ST_DONE",
"BriefDescription": "RC did st to line that was Tx or Sx",
"PublicDescription": ""
},
{,
"EventCode": "0x63708a",
"EventName": "PM_L2_RTY_LD",
"BriefDescription": "RC retries on PB for any load from core",
"PublicDescription": ""
},
{,
"EventCode": "0x3708a",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core",
"PublicDescription": ""
},
{,
"EventCode": "0x54708c",
"EventName": "PM_L2_SN_M_RD_DONE",
"BriefDescription": "SNP dispatched for a read and was M",
"PublicDescription": ""
},
{,
"EventCode": "0x54708e",
"EventName": "PM_L2_SN_M_WR_DONE",
"BriefDescription": "SNP dispatched for a write and was M",
"PublicDescription": ""
},
{,
"EventCode": "0x53708a",
"EventName": "PM_L2_SN_SX_I_DONE",
"BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
"PublicDescription": ""
},
{,
"EventCode": "0x17080",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread",
"PublicDescription": ""
},
{,
"EventCode": "0x44708c",
"EventName": "PM_L2_ST_DISP",
"BriefDescription": "All successful store dispatches",
"PublicDescription": ""
},
{,
"EventCode": "0x44708e",
"EventName": "PM_L2_ST_HIT",
"BriefDescription": "All successful store dispatches that were L2Hits",
"PublicDescription": ""
},
{,
"EventCode": "0x17082",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
"PublicDescription": ""
},
{,
"EventCode": "0x636088",
"EventName": "PM_L2_SYS_GUESS_CORRECT",
"BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
"PublicDescription": ""
},
{,
"EventCode": "0x63608a",
"EventName": "PM_L2_SYS_GUESS_WRONG",
"BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
"PublicDescription": ""
},
{,
"EventCode": "0x617080",
"EventName": "PM_L2_SYS_PUMP",
"BriefDescription": "RC requests that were system pump attempts",
"PublicDescription": ""
},
{,
"EventCode": "0x1e05e",
"EventName": "PM_L2_TM_REQ_ABORT",
@ -1961,36 +1739,12 @@
"BriefDescription": "TM marked store abort",
"PublicDescription": ""
},
{,
"EventCode": "0x23808a",
"EventName": "PM_L3_CINJ",
"BriefDescription": "l3 ci of cache inject",
"PublicDescription": ""
},
{,
"EventCode": "0x128084",
"EventName": "PM_L3_CI_HIT",
"BriefDescription": "L3 Castins Hit (total count",
"PublicDescription": ""
},
{,
"EventCode": "0x128086",
"EventName": "PM_L3_CI_MISS",
"BriefDescription": "L3 castins miss (total count",
"PublicDescription": ""
},
{,
"EventCode": "0x819082",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "rotating sample of 16 CI or CO actives",
"PublicDescription": ""
},
{,
"EventCode": "0x438088",
"EventName": "PM_L3_CO",
"BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
"PublicDescription": ""
},
{,
"EventCode": "0x83908b",
"EventName": "PM_L3_CO0_ALLOC",
@ -2009,120 +1763,18 @@
"BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
{,
"EventCode": "0x238088",
"EventName": "PM_L3_CO_LCO",
"BriefDescription": "Total L3 castouts occurred on LCO",
"PublicDescription": ""
},
{,
"EventCode": "0x28084",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
{,
"EventCode": "0xb19082",
"EventName": "PM_L3_GRP_GUESS_CORRECT",
"BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
"PublicDescription": ""
},
{,
"EventCode": "0xb3908a",
"EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
"BriefDescription": "Initial scope=group but data from local node. Predition too high",
"PublicDescription": ""
},
{,
"EventCode": "0xb39088",
"EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
"BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
"PublicDescription": ""
},
{,
"EventCode": "0x218080",
"EventName": "PM_L3_HIT",
"BriefDescription": "L3 Hits",
"PublicDescription": ""
},
{,
"EventCode": "0x138088",
"EventName": "PM_L3_L2_CO_HIT",
"BriefDescription": "L2 castout hits",
"PublicDescription": ""
},
{,
"EventCode": "0x13808a",
"EventName": "PM_L3_L2_CO_MISS",
"BriefDescription": "L2 castout miss",
"PublicDescription": ""
},
{,
"EventCode": "0x14808c",
"EventName": "PM_L3_LAT_CI_HIT",
"BriefDescription": "L3 Lateral Castins Hit",
"PublicDescription": ""
},
{,
"EventCode": "0x14808e",
"EventName": "PM_L3_LAT_CI_MISS",
"BriefDescription": "L3 Lateral Castins Miss",
"PublicDescription": ""
},
{,
"EventCode": "0x228084",
"EventName": "PM_L3_LD_HIT",
"BriefDescription": "L3 demand LD Hits",
"PublicDescription": ""
},
{,
"EventCode": "0x228086",
"EventName": "PM_L3_LD_MISS",
"BriefDescription": "L3 demand LD Miss",
"PublicDescription": ""
},
{,
"EventCode": "0x1e052",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 Load Prefetches",
"PublicDescription": ""
},
{,
"EventCode": "0xb19080",
"EventName": "PM_L3_LOC_GUESS_CORRECT",
"BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
"PublicDescription": ""
},
{,
"EventCode": "0xb29086",
"EventName": "PM_L3_LOC_GUESS_WRONG",
"BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
"PublicDescription": ""
},
{,
"EventCode": "0x218082",
"EventName": "PM_L3_MISS",
"BriefDescription": "L3 Misses",
"PublicDescription": ""
},
{,
"EventCode": "0x54808c",
"EventName": "PM_L3_P0_CO_L31",
"BriefDescription": "l3 CO to L3.1 (lco) port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x538088",
"EventName": "PM_L3_P0_CO_MEM",
"BriefDescription": "l3 CO to memory port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x929084",
"EventName": "PM_L3_P0_CO_RTY",
"BriefDescription": "L3 CO received retry port 0",
"PublicDescription": ""
},
{,
"EventCode": "0xa29084",
"EventName": "PM_L3_P0_GRP_PUMP",
@ -2147,120 +1799,6 @@
"BriefDescription": "L3 LCO received retry port 0",
"PublicDescription": ""
},
{,
"EventCode": "0xa19080",
"EventName": "PM_L3_P0_NODE_PUMP",
"BriefDescription": "L3 pf sent with nodal scope port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x919080",
"EventName": "PM_L3_P0_PF_RTY",
"BriefDescription": "L3 PF received retry port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x939088",
"EventName": "PM_L3_P0_SN_HIT",
"BriefDescription": "L3 snoop hit port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x118080",
"EventName": "PM_L3_P0_SN_INV",
"BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
"PublicDescription": ""
},
{,
"EventCode": "0x94908c",
"EventName": "PM_L3_P0_SN_MISS",
"BriefDescription": "L3 snoop miss port 0",
"PublicDescription": ""
},
{,
"EventCode": "0xa39088",
"EventName": "PM_L3_P0_SYS_PUMP",
"BriefDescription": "L3 pf sent with sys scope port 0",
"PublicDescription": ""
},
{,
"EventCode": "0x54808e",
"EventName": "PM_L3_P1_CO_L31",
"BriefDescription": "l3 CO to L3.1 (lco) port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x53808a",
"EventName": "PM_L3_P1_CO_MEM",
"BriefDescription": "l3 CO to memory port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x929086",
"EventName": "PM_L3_P1_CO_RTY",
"BriefDescription": "L3 CO received retry port 1",
"PublicDescription": ""
},
{,
"EventCode": "0xa29086",
"EventName": "PM_L3_P1_GRP_PUMP",
"BriefDescription": "L3 pf sent with grp scope port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x528086",
"EventName": "PM_L3_P1_LCO_DATA",
"BriefDescription": "lco sent with data port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x518082",
"EventName": "PM_L3_P1_LCO_NO_DATA",
"BriefDescription": "dataless l3 lco sent port 1",
"PublicDescription": ""
},
{,
"EventCode": "0xa4908e",
"EventName": "PM_L3_P1_LCO_RTY",
"BriefDescription": "L3 LCO received retry port 1",
"PublicDescription": ""
},
{,
"EventCode": "0xa19082",
"EventName": "PM_L3_P1_NODE_PUMP",
"BriefDescription": "L3 pf sent with nodal scope port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x919082",
"EventName": "PM_L3_P1_PF_RTY",
"BriefDescription": "L3 PF received retry port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x93908a",
"EventName": "PM_L3_P1_SN_HIT",
"BriefDescription": "L3 snoop hit port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x118082",
"EventName": "PM_L3_P1_SN_INV",
"BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
"PublicDescription": ""
},
{,
"EventCode": "0x94908e",
"EventName": "PM_L3_P1_SN_MISS",
"BriefDescription": "L3 snoop miss port 1",
"PublicDescription": ""
},
{,
"EventCode": "0xa3908a",
"EventName": "PM_L3_P1_SYS_PUMP",
"BriefDescription": "L3 pf sent with sys scope port 1",
"PublicDescription": ""
},
{,
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
@ -2273,12 +1811,6 @@
"BriefDescription": "lifetime, sample of PF machine 0 valid",
"PublicDescription": ""
},
{,
"EventCode": "0x428084",
"EventName": "PM_L3_PF_HIT_L3",
"BriefDescription": "l3 pf hit in l3",
"PublicDescription": ""
},
{,
"EventCode": "0x18080",
"EventName": "PM_L3_PF_MISS_L3",
@ -2369,42 +1901,12 @@
"BriefDescription": "Data stream touchto L3",
"PublicDescription": ""
},
{,
"EventCode": "0xb29084",
"EventName": "PM_L3_SYS_GUESS_CORRECT",
"BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
"PublicDescription": ""
},
{,
"EventCode": "0xb4908c",
"EventName": "PM_L3_SYS_GUESS_WRONG",
"BriefDescription": "Initial scope=system but data from local or near. Predction too high",
"PublicDescription": ""
},
{,
"EventCode": "0x24808e",
"EventName": "PM_L3_TRANS_PF",
"BriefDescription": "L3 Transient prefetch",
"PublicDescription": ""
},
{,
"EventCode": "0x18081",
"EventName": "PM_L3_WI0_ALLOC",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": "0.0"
},
{,
"EventCode": "0x418080",
"EventName": "PM_L3_WI0_BUSY",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": ""
},
{,
"EventCode": "0x418082",
"EventName": "PM_L3_WI_USAGE",
"BriefDescription": "rotating sample of 8 WI actives",
"PublicDescription": ""
},
{,
"EventCode": "0xc080",
"EventName": "PM_LD_REF_L1_LSU0",
@ -3311,12 +2813,6 @@
"BriefDescription": "Dispatch time non favored tbegin",
"PublicDescription": ""
},
{,
"EventCode": "0x328084",
"EventName": "PM_NON_TM_RST_SC",
"BriefDescription": "non tm snp rst tm sc",
"PublicDescription": ""
},
{,
"EventCode": "0x2001a",
"EventName": "PM_NTCG_ALL_FIN",
@ -3419,24 +2915,6 @@
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
"PublicDescription": ""
},
{,
"EventCode": "0x34808e",
"EventName": "PM_RD_CLEARING_SC",
"BriefDescription": "rd clearing sc",
"PublicDescription": ""
},
{,
"EventCode": "0x34808c",
"EventName": "PM_RD_FORMING_SC",
"BriefDescription": "rd forming sc",
"PublicDescription": ""
},
{,
"EventCode": "0x428086",
"EventName": "PM_RD_HIT_PF",
"BriefDescription": "rd machine hit l3 pf machine",
"PublicDescription": ""
},
{,
"EventCode": "0x20004",
"EventName": "PM_REAL_SRQ_FULL",
@ -3503,18 +2981,6 @@
"BriefDescription": "TLBIE snoop",
"PublicDescription": "TLBIE snoopSnoop TLBIE"
},
{,
"EventCode": "0x338088",
"EventName": "PM_SNP_TM_HIT_M",
"BriefDescription": "snp tm st hit m mu",
"PublicDescription": ""
},
{,
"EventCode": "0x33808a",
"EventName": "PM_SNP_TM_HIT_T",
"BriefDescription": "snp tm_st_hit t tn te",
"PublicDescription": ""
},
{,
"EventCode": "0x4608c",
"EventName": "PM_SN_USAGE",
@ -3533,12 +2999,6 @@
"BriefDescription": "STCX executed reported at sent to nest",
"PublicDescription": "STCX executed reported at sent to nest42"
},
{,
"EventCode": "0x717080",
"EventName": "PM_ST_CAUSED_FAIL",
"BriefDescription": "Non TM St caused any thread to fail",
"PublicDescription": ""
},
{,
"EventCode": "0x3090",
"EventName": "PM_SWAP_CANCEL",
@ -3623,18 +3083,6 @@
"BriefDescription": "Tm any tbegin",
"PublicDescription": ""
},
{,
"EventCode": "0x318082",
"EventName": "PM_TM_CAM_OVERFLOW",
"BriefDescription": "l3 tm cam overflow during L2 co of SC",
"PublicDescription": ""
},
{,
"EventCode": "0x74708c",
"EventName": "PM_TM_CAP_OVERFLOW",
"BriefDescription": "TM Footprint Capactiy Overflow",
"PublicDescription": ""
},
{,
"EventCode": "0x20ba",
"EventName": "PM_TM_END_ALL",
@ -3689,48 +3137,6 @@
"BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas",
"PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
},
{,
"EventCode": "0x727086",
"EventName": "PM_TM_FAV_CAUSED_FAIL",
"BriefDescription": "TM Load (fav) caused another thread to fail",
"PublicDescription": ""
},
{,
"EventCode": "0x717082",
"EventName": "PM_TM_LD_CAUSED_FAIL",
"BriefDescription": "Non TM Ld caused any thread to fail",
"PublicDescription": ""
},
{,
"EventCode": "0x727084",
"EventName": "PM_TM_LD_CONF",
"BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
"PublicDescription": ""
},
{,
"EventCode": "0x328086",
"EventName": "PM_TM_RST_SC",
"BriefDescription": "tm snp rst tm sc",
"PublicDescription": ""
},
{,
"EventCode": "0x318080",
"EventName": "PM_TM_SC_CO",
"BriefDescription": "l3 castout tm Sc line",
"PublicDescription": ""
},
{,
"EventCode": "0x73708a",
"EventName": "PM_TM_ST_CAUSED_FAIL",
"BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
"PublicDescription": ""
},
{,
"EventCode": "0x737088",
"EventName": "PM_TM_ST_CONF",
"BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
"PublicDescription": ""
},
{,
"EventCode": "0x20bc",
"EventName": "PM_TM_TBEGIN",

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -C 0 kill >/dev/null 2>&1
args = --no-bpf-event -C 0 kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = kill >/dev/null 2>&1
args = --no-bpf-event kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -b kill >/dev/null 2>&1
args = --no-bpf-event -b kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j any kill >/dev/null 2>&1
args = --no-bpf-event -j any kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j any_call kill >/dev/null 2>&1
args = --no-bpf-event -j any_call kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j any_ret kill >/dev/null 2>&1
args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j hv kill >/dev/null 2>&1
args = --no-bpf-event -j hv kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j ind_call kill >/dev/null 2>&1
args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j k kill >/dev/null 2>&1
args = --no-bpf-event -j k kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -j u kill >/dev/null 2>&1
args = --no-bpf-event -j u kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -c 123 kill >/dev/null 2>&1
args = --no-bpf-event -c 123 kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -d kill >/dev/null 2>&1
args = --no-bpf-event -d kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -F 100 kill >/dev/null 2>&1
args = --no-bpf-event -F 100 kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -g kill >/dev/null 2>&1
args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = --call-graph dwarf -- kill >/dev/null 2>&1
args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = --call-graph fp kill >/dev/null 2>&1
args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = --group -e cycles,instructions kill >/dev/null 2>&1
args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
ret = 1
[event-1:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -e '{cycles,instructions}' kill >/dev/null 2>&1
args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = --no-buffering kill >/dev/null 2>&1
args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -i kill >/dev/null 2>&1
args = --no-bpf-event -i kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -n kill >/dev/null 2>&1
args = --no-bpf-event -n kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -c 100 -P kill >/dev/null 2>&1
args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -1,6 +1,6 @@
[config]
command = record
args = -R kill >/dev/null 2>&1
args = --no-bpf-event -R kill >/dev/null 2>&1
ret = 1
[event:base-record]

View File

@ -18,7 +18,7 @@ static void testcase(void)
int i;
for (i = 0; i < NR_ITERS; i++) {
char proc_name[10];
char proc_name[15];
snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
prctl(PR_SET_NAME, proc_name);

View File

@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
perf_evsel__delete(evsel);
return ret;
}

View File

@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
const char *p;
const char **other;
double val;
int ret;
int i, ret;
struct parse_ctx ctx;
int num_other;
@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
TEST_ASSERT_VAL("find other", other[3] == NULL);
for (i = 0; i < num_other; i++)
free((void *)other[i]);
free((void *)other);
return 0;

View File

@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
goto out_thread_map_delete;
goto out_cpu_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@ -119,6 +119,8 @@ out_close_fd:
perf_evsel__close_fd(evsel);
out_evsel_delete:
perf_evsel__delete(evsel);
out_cpu_map_delete:
cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;

View File

@ -10,6 +10,10 @@
#include <errno.h>
#include <inttypes.h>
#include <libgen.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@ -24,6 +28,7 @@
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@ -31,6 +36,7 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <bpf/libbpf.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@ -1674,6 +1683,156 @@ fallback:
return 0;
}
#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
#define PACKAGE "perf"
#include <bfd.h>
#include <dis-asm.h>
static int symbol__disassemble_bpf(struct symbol *sym,
struct annotate_args *args)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_options *opts = args->options;
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_linfo *prog_linfo = NULL;
struct bpf_prog_info_node *info_node;
int len = sym->end - sym->start;
disassembler_ftype disassemble;
struct map *map = args->ms.map;
struct disassemble_info info;
struct dso *dso = map->dso;
int pc = 0, count, sub_id;
struct btf *btf = NULL;
char tpath[PATH_MAX];
size_t buf_size;
int nr_skip = 0;
int ret = -1;
char *buf;
bfd *bfdf;
FILE *s;
if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
return -1;
pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__,
sym->name, sym->start, sym->end - sym->start);
memset(tpath, 0, sizeof(tpath));
perf_exe(tpath, sizeof(tpath));
bfdf = bfd_openr(tpath, NULL);
assert(bfdf);
assert(bfd_check_format(bfdf, bfd_object));
s = open_memstream(&buf, &buf_size);
if (!s)
goto out;
init_disassemble_info(&info, s,
(fprintf_ftype) fprintf);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
dso->bpf_prog.id);
if (!info_node)
goto out;
info_linear = info_node->info_linear;
sub_id = dso->bpf_prog.sub_id;
info.buffer = (void *)(info_linear->info.jited_prog_insns);
info.buffer_length = info_linear->info.jited_prog_len;
if (info_linear->info.nr_line_info)
prog_linfo = bpf_prog_linfo__new(&info_linear->info);
if (info_linear->info.btf_id) {
struct btf_node *node;
node = perf_env__find_btf(dso->bpf_prog.env,
info_linear->info.btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
}
disassemble_init_for_target(&info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
disassemble = disassembler(info.arch,
bfd_big_endian(bfdf),
info.mach,
bfdf);
#else
disassemble = disassembler(bfdf);
#endif
assert(disassemble);
fflush(s);
do {
const struct bpf_line_info *linfo = NULL;
struct disasm_line *dl;
size_t prev_buf_size;
const char *srcline;
u64 addr;
addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id];
count = disassemble(pc, &info);
if (prog_linfo)
linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
addr, sub_id,
nr_skip);
if (linfo && btf) {
srcline = btf__name_by_offset(btf, linfo->line_off);
nr_skip++;
} else
srcline = NULL;
fprintf(s, "\n");
prev_buf_size = buf_size;
fflush(s);
if (!opts->hide_src_code && srcline) {
args->offset = -1;
args->line = strdup(srcline);
args->line_nr = 0;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl) {
annotation_line__add(&dl->al,
&notes->src->source);
}
}
args->offset = pc;
args->line = buf + prev_buf_size;
args->line_nr = 0;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl)
annotation_line__add(&dl->al, &notes->src->source);
pc += count;
} while (count > 0 && pc < len);
ret = 0;
out:
free(prog_linfo);
free(btf);
fclose(s);
bfd_close(bfdf);
return ret;
}
#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
struct annotate_args *args __maybe_unused)
{
return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
}
#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
if (dso__is_kcore(dso)) {
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
return symbol__disassemble_bpf(sym, args);
} else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;

View File

@ -369,6 +369,7 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
__SYMBOL_ANNOTATE_ERRNO__END,
};

View File

@ -3,11 +3,17 @@
#include <stdlib.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <linux/err.h>
#include "bpf-event.h"
#include "debug.h"
#include "symbol.h"
#include "machine.h"
#include "env.h"
#include "session.h"
#include "map.h"
#include "evlist.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
return ret;
}
static int machine__process_bpf_event_load(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_info_node *info_node;
struct perf_env *env = machine->env;
int id = event->bpf_event.id;
unsigned int i;
/* perf-record, no need to handle bpf-event */
if (env == NULL)
return 0;
info_node = perf_env__find_bpf_prog_info(env, id);
if (!info_node)
return 0;
info_linear = info_node->info_linear;
for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
u64 addr = addrs[i];
struct map *map;
map = map_groups__find(&machine->kmaps, addr);
if (map) {
map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
map->dso->bpf_prog.id = id;
map->dso->bpf_prog.sub_id = i;
map->dso->bpf_prog.env = env;
}
}
return 0;
}
int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_bpf_event(event, stdout);
switch (event->bpf_event.type) {
case PERF_BPF_EVENT_PROG_LOAD:
return machine__process_bpf_event_load(machine, event, sample);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
* Do not free bpf_prog_info and btf of the program here,
* as annotation still need them. They will be freed at
* the end of the session.
*/
break;
default:
pr_debug("unexpected bpf_event type of %d\n",
event->bpf_event.type);
break;
}
return 0;
}
static int perf_env__fetch_btf(struct perf_env *env,
u32 btf_id,
struct btf *btf)
{
struct btf_node *node;
u32 data_size;
const void *data;
data = btf__get_raw_data(btf, &data_size);
node = malloc(data_size + sizeof(struct btf_node));
if (!node)
return -1;
node->id = btf_id;
node->data_size = data_size;
memcpy(node->data, data, data_size);
perf_env__insert_btf(env, node);
return 0;
}
static int synthesize_bpf_prog_name(char *buf, int size,
struct bpf_prog_info *info,
struct btf *btf,
u32 sub_id)
{
u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
void *func_infos = (void *)(uintptr_t)(info->func_info);
u32 sub_prog_cnt = info->nr_jited_ksyms;
const struct bpf_func_info *finfo;
const char *short_name = NULL;
const struct btf_type *t;
int name_len;
name_len = snprintf(buf, size, "bpf_prog_");
name_len += snprintf_hex(buf + name_len, size - name_len,
prog_tags[sub_id], BPF_TAG_SIZE);
if (btf) {
finfo = func_infos + sub_id * info->func_info_rec_size;
t = btf__type_by_id(btf, finfo->type_id);
short_name = btf__name_by_offset(btf, t->name_off);
} else if (sub_id == 0 && sub_prog_cnt == 1) {
/* no subprog */
if (info->name[0])
short_name = info->name;
} else
short_name = "F";
if (short_name)
name_len += snprintf(buf + name_len, size - name_len,
"_%s", short_name);
return name_len;
}
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
* -1 for failures;
* -2 for lack of kernel support.
*/
static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
int fd,
@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
{
struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
struct bpf_event *bpf_event = &event->bpf_event;
u32 sub_prog_cnt, i, func_info_rec_size = 0;
u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
struct bpf_prog_info info = { .type = 0, };
u32 info_len = sizeof(info);
void *func_infos = NULL;
u64 *prog_addrs = NULL;
struct bpf_prog_info_linear *info_linear;
struct perf_tool *tool = session->tool;
struct bpf_prog_info_node *info_node;
struct bpf_prog_info *info;
struct btf *btf = NULL;
u32 *prog_lens = NULL;
bool has_btf = false;
char errbuf[512];
struct perf_env *env;
u32 sub_prog_cnt, i;
int err = 0;
u64 arrays;
/* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
/*
* for perf-record and perf-report use header.env;
* otherwise, use global perf_env.
*/
env = session->data ? &session->header.env : &perf_env;
if (err) {
pr_debug("%s: failed to get BPF program info: %s, aborting\n",
__func__, str_error_r(errno, errbuf, sizeof(errbuf)));
arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
info_linear = bpf_program__get_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
info_linear = NULL;
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
return -1;
}
if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
info = &info_linear->info;
/* number of ksyms, func_lengths, and tags should match */
sub_prog_cnt = info.nr_jited_ksyms;
if (sub_prog_cnt != info.nr_prog_tags ||
sub_prog_cnt != info.nr_jited_func_lens)
sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags ||
sub_prog_cnt != info->nr_jited_func_lens)
return -1;
/* check BTF func info support */
if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
if (sub_prog_cnt != info.nr_func_info) {
if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
return -1;
err = -1;
goto out;
}
if (btf__get_from_id(info.btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
return -1;
if (btf__get_from_id(info->btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
err = -1;
btf = NULL;
goto out;
}
func_info_rec_size = info.func_info_rec_size;
func_infos = calloc(sub_prog_cnt, func_info_rec_size);
if (!func_infos) {
pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
return -1;
}
has_btf = true;
}
/*
* We need address, length, and tag for each sub program.
* Allocate memory and call bpf_obj_get_info_by_fd() again
*/
prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
if (!prog_addrs) {
pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
goto out;
}
prog_lens = calloc(sub_prog_cnt, sizeof(u32));
if (!prog_lens) {
pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
goto out;
}
prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
if (!prog_tags) {
pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
goto out;
}
memset(&info, 0, sizeof(info));
info.nr_jited_ksyms = sub_prog_cnt;
info.nr_jited_func_lens = sub_prog_cnt;
info.nr_prog_tags = sub_prog_cnt;
info.jited_ksyms = ptr_to_u64(prog_addrs);
info.jited_func_lens = ptr_to_u64(prog_lens);
info.prog_tags = ptr_to_u64(prog_tags);
info_len = sizeof(info);
if (has_btf) {
info.nr_func_info = sub_prog_cnt;
info.func_info_rec_size = func_info_rec_size;
info.func_info = ptr_to_u64(func_infos);
}
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (err) {
pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
goto out;
perf_env__fetch_btf(env, info->btf_id, btf);
}
/* Synthesize PERF_RECORD_KSYMBOL */
for (i = 0; i < sub_prog_cnt; i++) {
const struct bpf_func_info *finfo;
const char *short_name = NULL;
const struct btf_type *t;
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
int name_len;
*ksymbol_event = (struct ksymbol_event){
@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
.flags = 0,
};
name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
"bpf_prog_");
name_len += snprintf_hex(ksymbol_event->name + name_len,
KSYM_NAME_LEN - name_len,
prog_tags[i], BPF_TAG_SIZE);
if (has_btf) {
finfo = func_infos + i * info.func_info_rec_size;
t = btf__type_by_id(btf, finfo->type_id);
short_name = btf__name_by_offset(btf, t->name_off);
} else if (i == 0 && sub_prog_cnt == 1) {
/* no subprog */
if (info.name[0])
short_name = info.name;
} else
short_name = "F";
if (short_name)
name_len += snprintf(ksymbol_event->name + name_len,
KSYM_NAME_LEN - name_len,
"_%s", short_name);
name_len = synthesize_bpf_prog_name(ksymbol_event->name,
KSYM_NAME_LEN, info, btf, i);
ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
sizeof(u64));
@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
machine, process);
}
/* Synthesize PERF_RECORD_BPF_EVENT */
if (opts->bpf_event) {
if (!opts->no_bpf_event) {
/* Synthesize PERF_RECORD_BPF_EVENT */
*bpf_event = (struct bpf_event){
.header = {
.type = PERF_RECORD_BPF_EVENT,
@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
},
.type = PERF_BPF_EVENT_PROG_LOAD,
.flags = 0,
.id = info.id,
.id = info->id,
};
memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
/* save bpf_prog_info to env */
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (!info_node) {
err = -1;
goto out;
}
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
info_linear = NULL;
/*
* process after saving bpf_prog_info to env, so that
* required information is ready for look up
*/
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
out:
free(prog_tags);
free(prog_lens);
free(prog_addrs);
free(func_infos);
free(info_linear);
free(btf);
return err ? -1 : 0;
}
int perf_event__synthesize_bpf_events(struct perf_tool *tool,
int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts)
@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
continue;
}
err = perf_event__synthesize_one_bpf_prog(tool, process,
err = perf_event__synthesize_one_bpf_prog(session, process,
machine, fd,
event, opts);
close(fd);
@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
free(event);
return err;
}
static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_info_node *info_node;
struct btf *btf = NULL;
u64 arrays;
u32 btf_id;
int fd;
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0)
return;
arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
info_linear = bpf_program__get_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
goto out;
}
btf_id = info_linear->info.btf_id;
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (info_node) {
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
} else
free(info_linear);
if (btf_id == 0)
goto out;
if (btf__get_from_id(btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n",
__func__, btf_id);
goto out;
}
perf_env__fetch_btf(env, btf_id, btf);
out:
free(btf);
close(fd);
}
static int bpf_event__sb_cb(union perf_event *event, void *data)
{
struct perf_env *env = data;
if (event->header.type != PERF_RECORD_BPF_EVENT)
return -1;
switch (event->bpf_event.type) {
case PERF_BPF_EVENT_PROG_LOAD:
perf_env__add_bpf_info(env, event->bpf_event.id);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
* Do not free bpf_prog_info and btf of the program here,
* as annotation still need them. They will be freed at
* the end of the session.
*/
break;
default:
pr_debug("unexpected bpf_event type of %d\n",
event->bpf_event.type);
break;
}
return 0;
}
int bpf_event__add_sb_event(struct perf_evlist **evlist,
struct perf_env *env)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.sample_id_all = 1,
.watermark = 1,
.bpf_event = 1,
.size = sizeof(attr), /* to capture ABI version */
};
/*
* Older gcc versions don't support designated initializers, like above,
* for unnamed union members, such as the following:
*/
attr.wakeup_watermark = 1;
return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
}
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env,
FILE *fp)
{
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
char name[KSYM_NAME_LEN];
struct btf *btf = NULL;
u32 sub_prog_cnt, i;
sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags ||
sub_prog_cnt != info->nr_jited_func_lens)
return;
if (info->btf_id) {
struct btf_node *node;
node = perf_env__find_btf(env, info->btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
}
if (sub_prog_cnt == 1) {
synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
info->id, name, prog_addrs[0], prog_lens[0]);
return;
}
fprintf(fp, "# bpf_prog_info %u:\n", info->id);
for (i = 0; i < sub_prog_cnt; i++) {
synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
i, name, prog_addrs[i], prog_lens[i]);
}
}

View File

@ -3,22 +3,45 @@
#define __PERF_BPF_EVENT_H
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <api/fd/array.h>
#include "event.h"
#include <stdio.h>
struct machine;
union perf_event;
struct perf_env;
struct perf_sample;
struct perf_tool;
struct record_opts;
struct evlist;
struct target;
struct bpf_prog_info_node {
struct bpf_prog_info_linear *info_linear;
struct rb_node rb_node;
};
struct btf_node {
struct rb_node rb_node;
u32 id;
u32 data_size;
char data[];
};
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int perf_event__synthesize_bpf_events(struct perf_tool *tool,
int perf_event__synthesize_bpf_events(struct perf_session *session,
perf_event__handler_t process,
struct machine *machine,
struct record_opts *opts);
int bpf_event__add_sb_event(struct perf_evlist **evlist,
struct perf_env *env);
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env,
FILE *fp);
#else
static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
return 0;
}
static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused,
struct record_opts *opts __maybe_unused)
{
return 0;
}
static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
struct perf_env *env __maybe_unused)
{
return 0;
}
static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
struct perf_env *env __maybe_unused,
FILE *fp __maybe_unused)
{
}
#endif // HAVE_LIBBPF_SUPPORT
#endif

View File

@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;

View File

@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
return ret;
out_free:
free(key);
return -1;
return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,

View File

@ -361,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit)
size_t pos, bool at_exit,
char **new_filepath)
{
char *new_filepath;
int ret;
if (check_pipe(data))
@ -371,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
if (rename(data->path, new_filepath))
pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
if (rename(data->path, *new_filepath))
pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
@ -396,7 +396,6 @@ int perf_data__switch(struct perf_data *data,
}
ret = data->file.fd;
out:
free(new_filepath);
return ret;
}

View File

@ -70,7 +70,7 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
*/
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit);
size_t pos, bool at_exit, char **new_filepath);
int perf_data__create_dir(struct perf_data *data, int nr);
int perf_data__open_dir(struct perf_data *data);

View File

@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
static void dso__set_basename(struct dso *dso)
{
/*
* basename() may modify path buffer, so we must pass
* a copy.
*/
char *base, *lname = strdup(dso->long_name);
char *base, *lname;
int tid;
if (!lname)
return;
if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
if (asprintf(&base, "[JIT] tid %d", tid) < 0)
return;
} else {
/*
* basename() may modify path buffer, so we must pass
* a copy.
*/
lname = strdup(dso->long_name);
if (!lname)
return;
/*
* basename() may return a pointer to internal
* storage which is reused in subsequent calls
* so copy the result.
*/
base = strdup(basename(lname));
/*
* basename() may return a pointer to internal
* storage which is reused in subsequent calls
* so copy the result.
*/
base = strdup(basename(lname));
free(lname);
free(lname);
if (!base)
return;
dso__set_short_name(dso, base, true);
if (!base)
return;
}
dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)

View File

@ -14,6 +14,7 @@
struct machine;
struct map;
struct perf_env;
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
@ -35,6 +36,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__BPF_PROG_INFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@ -189,6 +191,12 @@ struct dso {
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
} data;
/* bpf prog information */
struct {
u32 id;
u32 sub_id;
struct perf_env *env;
} bpf_prog;
union { /* Tool specific area */
void *priv;

View File

@ -3,15 +3,163 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
#include <bpf/libbpf.h>
struct perf_env perf_env;
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
struct rb_node *parent = NULL;
struct rb_node **p;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node;
while (*p != NULL) {
parent = *p;
node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
if (prog_id < node->info_linear->info.id) {
p = &(*p)->rb_left;
} else if (prog_id > node->info_linear->info.id) {
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
goto out;
}
}
rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
out:
up_write(&env->bpf_progs.lock);
}
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id)
{
struct bpf_prog_info_node *node = NULL;
struct rb_node *n;
down_read(&env->bpf_progs.lock);
n = env->bpf_progs.infos.rb_node;
while (n) {
node = rb_entry(n, struct bpf_prog_info_node, rb_node);
if (prog_id < node->info_linear->info.id)
n = n->rb_left;
else if (prog_id > node->info_linear->info.id)
n = n->rb_right;
else
break;
}
up_read(&env->bpf_progs.lock);
return node;
}
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;
while (*p != NULL) {
parent = *p;
node = rb_entry(parent, struct btf_node, rb_node);
if (btf_id < node->id) {
p = &(*p)->rb_left;
} else if (btf_id > node->id) {
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
goto out;
}
}
rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++;
out:
up_write(&env->bpf_progs.lock);
}
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
struct btf_node *node = NULL;
struct rb_node *n;
down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node;
while (n) {
node = rb_entry(n, struct btf_node, rb_node);
if (btf_id < node->id)
n = n->rb_left;
else if (btf_id > node->id)
n = n->rb_right;
else
break;
}
up_read(&env->bpf_progs.lock);
return node;
}
/* purge data in bpf_progs.infos tree */
static void perf_env__purge_bpf(struct perf_env *env)
{
struct rb_root *root;
struct rb_node *next;
down_write(&env->bpf_progs.lock);
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
free(node);
}
env->bpf_progs.infos_cnt = 0;
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
free(node);
}
env->bpf_progs.btfs_cnt = 0;
up_write(&env->bpf_progs.lock);
}
void perf_env__exit(struct perf_env *env)
{
int i;
perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@ -38,6 +186,13 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
void perf_env__init(struct perf_env *env)
{
env->bpf_progs.infos = RB_ROOT;
env->bpf_progs.btfs = RB_ROOT;
init_rwsem(&env->bpf_progs.lock);
}
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;

View File

@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
#include <linux/rbtree.h>
#include "cpumap.h"
#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@ -64,8 +66,23 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
* trees are accessed by different threads in perf-top
*/
struct {
struct rw_semaphore lock;
struct rb_root infos;
u32 infos_cnt;
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
};
struct bpf_prog_info_node;
struct btf_node;
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
#endif /* __PERF_ENV_H */

View File

@ -19,6 +19,7 @@
#include "debug.h"
#include "units.h"
#include "asm/bug.h"
#include "bpf-event.h"
#include <signal.h>
#include <unistd.h>
@ -1856,3 +1857,121 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
}
return leader;
}
int perf_evlist__add_sb_event(struct perf_evlist **evlist,
struct perf_event_attr *attr,
perf_evsel__sb_cb_t cb,
void *data)
{
struct perf_evsel *evsel;
bool new_evlist = (*evlist) == NULL;
if (*evlist == NULL)
*evlist = perf_evlist__new();
if (*evlist == NULL)
return -1;
if (!attr->sample_id_all) {
pr_warning("enabling sample_id_all for all side band events\n");
attr->sample_id_all = 1;
}
evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
if (!evsel)
goto out_err;
evsel->side_band.cb = cb;
evsel->side_band.data = data;
perf_evlist__add(*evlist, evsel);
return 0;
out_err:
if (new_evlist) {
perf_evlist__delete(*evlist);
*evlist = NULL;
}
return -1;
}
static void *perf_evlist__poll_thread(void *arg)
{
struct perf_evlist *evlist = arg;
bool draining = false;
int i;
while (draining || !(evlist->thread.done)) {
if (draining)
draining = false;
else if (evlist->thread.done)
draining = true;
if (!draining)
perf_evlist__poll(evlist, 1000);
for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &evlist->mmap[i];
union perf_event *event;
if (perf_mmap__read_init(map))
continue;
while ((event = perf_mmap__read_event(map)) != NULL) {
struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
if (evsel && evsel->side_band.cb)
evsel->side_band.cb(event, evsel->side_band.data);
else
pr_warning("cannot locate proper evsel for the side band event\n");
perf_mmap__consume(map);
}
perf_mmap__read_done(map);
}
}
return NULL;
}
int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
struct target *target)
{
struct perf_evsel *counter;
if (!evlist)
return 0;
if (perf_evlist__create_maps(evlist, target))
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
if (perf_evsel__open(counter, evlist->cpus,
evlist->threads) < 0)
goto out_delete_evlist;
}
if (perf_evlist__mmap(evlist, UINT_MAX))
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
if (perf_evsel__enable(counter))
goto out_delete_evlist;
}
evlist->thread.done = 0;
if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
goto out_delete_evlist;
return 0;
out_delete_evlist:
perf_evlist__delete(evlist);
evlist = NULL;
return -1;
}
void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
{
if (!evlist)
return;
evlist->thread.done = 1;
pthread_join(evlist->thread.th, NULL);
perf_evlist__delete(evlist);
}

View File

@ -54,6 +54,10 @@ struct perf_evlist {
struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
struct {
pthread_t th;
volatile int done;
} thread;
};
struct perf_evsel_str_handler {
@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
int perf_evlist__add_dummy(struct perf_evlist *evlist);
int perf_evlist__add_sb_event(struct perf_evlist **evlist,
struct perf_event_attr *attr,
perf_evsel__sb_cb_t cb,
void *data);
int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
struct target *target);
void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);

View File

@ -1036,7 +1036,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
attr->ksymbol = track && !perf_missing_features.ksymbol;
attr->bpf_event = track && opts->bpf_event &&
attr->bpf_event = track && !opts->no_bpf_event &&
!perf_missing_features.bpf_event;
if (opts->record_namespaces)
@ -1292,6 +1292,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
@ -1342,10 +1343,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
count->val = 0;
} else if (count->run < count->ena) {
scaled = 1;
count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
count->val = (u64)((double) count->val * count->ena / count->run);
}
} else
count->ena = count->run = 0;
}
if (pscaled)
*pscaled = scaled;

View File

@ -73,6 +73,8 @@ struct perf_evsel_config_term {
struct perf_stat_evsel;
typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@ -151,6 +153,10 @@ struct perf_evsel {
bool collect_stat;
bool weak_group;
const char *pmu_name;
struct {
perf_evsel__sb_cb_t *cb;
void *data;
} side_band;
};
union u64_swap {

View File

@ -18,6 +18,7 @@
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
#include <bpf/libbpf.h>
#include "evlist.h"
#include "evsel.h"
@ -40,6 +41,7 @@
#include "time-utils.h"
#include "units.h"
#include "cputopo.h"
#include "bpf-event.h"
#include "sane_ctype.h"
@ -876,6 +878,89 @@ static int write_dir_format(struct feat_fd *ff,
return do_write(ff, &data->dir.version, sizeof(data->dir.version));
}
#ifdef HAVE_LIBBPF_SUPPORT
static int write_bpf_prog_info(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
int ret;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.infos_cnt,
sizeof(env->bpf_progs.infos_cnt));
if (ret < 0)
goto out;
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
size_t len;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
len = sizeof(struct bpf_prog_info_linear) +
node->info_linear->data_len;
/* before writing to file, translate address to offset */
bpf_program__bpil_addr_to_offs(node->info_linear);
ret = do_write(ff, node->info_linear, len);
/*
* translate back to address even when do_write() fails,
* so that this function never changes the data.
*/
bpf_program__bpil_offs_to_addr(node->info_linear);
if (ret < 0)
goto out;
}
out:
up_read(&env->bpf_progs.lock);
return ret;
}
#else // HAVE_LIBBPF_SUPPORT
static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
{
return 0;
}
#endif // HAVE_LIBBPF_SUPPORT
static int write_bpf_btf(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
int ret;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.btfs_cnt,
sizeof(env->bpf_progs.btfs_cnt));
if (ret < 0)
goto out;
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
ret = do_write(ff, &node->id,
sizeof(u32) * 2 + node->data_size);
if (ret < 0)
goto out;
}
out:
up_read(&env->bpf_progs.lock);
return ret;
}
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@ -1367,6 +1452,52 @@ static void print_dir_format(struct feat_fd *ff, FILE *fp)
fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
}
static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
down_read(&env->bpf_progs.lock);
root = &env->bpf_progs.infos;
next = rb_first(root);
while (next) {
struct bpf_prog_info_node *node;
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
bpf_event__print_bpf_prog_info(&node->info_linear->info,
env, fp);
}
up_read(&env->bpf_progs.lock);
}
static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
{
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
down_read(&env->bpf_progs.lock);
root = &env->bpf_progs.btfs;
next = rb_first(root);
while (next) {
struct btf_node *node;
node = rb_entry(next, struct btf_node, rb_node);
next = rb_next(&node->rb_node);
fprintf(fp, "# btf info of id %u\n", node->id);
}
up_read(&env->bpf_progs.lock);
}
static void free_event_desc(struct perf_evsel *events)
{
struct perf_evsel *evsel;
@ -2414,6 +2545,124 @@ static int process_dir_format(struct feat_fd *ff,
return do_read_u64(ff, &data->dir.version);
}
#ifdef HAVE_LIBBPF_SUPPORT
static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_info_node *info_node;
struct perf_env *env = &ff->ph->env;
u32 count, i;
int err = -1;
if (ff->ph->needs_swap) {
pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
return 0;
}
if (do_read_u32(ff, &count))
return -1;
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
u32 info_len, data_len;
info_linear = NULL;
info_node = NULL;
if (do_read_u32(ff, &info_len))
goto out;
if (do_read_u32(ff, &data_len))
goto out;
if (info_len > sizeof(struct bpf_prog_info)) {
pr_warning("detected invalid bpf_prog_info\n");
goto out;
}
info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
data_len);
if (!info_linear)
goto out;
info_linear->info_len = sizeof(struct bpf_prog_info);
info_linear->data_len = data_len;
if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
goto out;
if (__do_read(ff, &info_linear->info, info_len))
goto out;
if (info_len < sizeof(struct bpf_prog_info))
memset(((void *)(&info_linear->info)) + info_len, 0,
sizeof(struct bpf_prog_info) - info_len);
if (__do_read(ff, info_linear->data, data_len))
goto out;
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (!info_node)
goto out;
/* after reading from file, translate offset to address */
bpf_program__bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node);
}
return 0;
out:
free(info_linear);
free(info_node);
up_write(&env->bpf_progs.lock);
return err;
}
#else // HAVE_LIBBPF_SUPPORT
static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
{
return 0;
}
#endif // HAVE_LIBBPF_SUPPORT
static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
{
struct perf_env *env = &ff->ph->env;
u32 count, i;
if (ff->ph->needs_swap) {
pr_warning("interpreting btf from systems with endianity is not yet supported\n");
return 0;
}
if (do_read_u32(ff, &count))
return -1;
down_write(&env->bpf_progs.lock);
for (i = 0; i < count; ++i) {
struct btf_node *node;
u32 id, data_size;
if (do_read_u32(ff, &id))
return -1;
if (do_read_u32(ff, &data_size))
return -1;
node = malloc(sizeof(struct btf_node) + data_size);
if (!node)
return -1;
node->id = id;
node->data_size = data_size;
if (__do_read(ff, node->data, data_size)) {
free(node);
return -1;
}
perf_env__insert_btf(env, node);
}
up_write(&env->bpf_progs.lock);
return 0;
}
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@ -2474,7 +2723,9 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
FEAT_OPR(CLOCKID, clockid, false),
FEAT_OPN(DIR_FORMAT, dir_format, false)
FEAT_OPN(DIR_FORMAT, dir_format, false),
FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
FEAT_OPR(BPF_BTF, bpf_btf, false),
};
struct header_print_data {

View File

@ -40,6 +40,8 @@ enum {
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
HEADER_DIR_FORMAT,
HEADER_BPF_PROG_INFO,
HEADER_BPF_BTF,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};

View File

@ -1111,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
if (err)
if (err) {
map__put(alm);
return err;
}
err = iter->ops->prepare_entry(iter, al);
if (err)

View File

@ -577,10 +577,25 @@ static void __maps__purge(struct maps *maps)
}
}
static void __maps__purge_names(struct maps *maps)
{
struct rb_root *root = &maps->names;
struct rb_node *next = rb_first(root);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node_name);
next = rb_next(&pos->rb_node_name);
rb_erase_init(&pos->rb_node_name, root);
map__put(pos);
}
}
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
__maps__purge_names(maps);
up_write(&maps->lock);
}
@ -917,6 +932,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
rb_erase_init(&map->rb_node_name, &maps->names);
map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)

View File

@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
"FINAL",
"ROUND",
"HALF ",
"TOP ",
"TIME ",
};
int err;
bool show_progress = false;

View File

@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
perf_evsel__delete(evsel);
}
thread_map__put(tmap);
return ret;
}
@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {

View File

@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
perf_env__init(&session->header.env);
if (data) {
if (perf_data__open(data))
goto out_delete;

View File

@ -13,6 +13,7 @@
#include "evsel.h"
#include "evlist.h"
#include "strlist.h"
#include "strbuf.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
@ -3107,3 +3108,54 @@ void reset_output_field(void)
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
#define INDENT (3*8 + 1)
static void add_key(struct strbuf *sb, const char *str, int *llen)
{
if (*llen >= 75) {
strbuf_addstr(sb, "\n\t\t\t ");
*llen = INDENT;
}
strbuf_addf(sb, " %s", str);
*llen += strlen(str) + 1;
}
static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
int *llen)
{
int i;
for (i = 0; i < n; i++)
add_key(sb, s[i].name, llen);
}
static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
int *llen)
{
int i;
for (i = 0; i < n; i++)
add_key(sb, s[i].name, llen);
}
const char *sort_help(const char *prefix)
{
struct strbuf sb;
char *s;
int len = strlen(prefix) + INDENT;
strbuf_init(&sb, 300);
strbuf_addstr(&sb, prefix);
add_hpp_sort_string(&sb, hpp_sort_dimensions,
ARRAY_SIZE(hpp_sort_dimensions), &len);
add_sort_string(&sb, common_sort_dimensions,
ARRAY_SIZE(common_sort_dimensions), &len);
add_sort_string(&sb, bstack_sort_dimensions,
ARRAY_SIZE(bstack_sort_dimensions), &len);
add_sort_string(&sb, memory_sort_dimensions,
ARRAY_SIZE(memory_sort_dimensions), &len);
s = strbuf_detach(&sb, NULL);
strbuf_release(&sb);
return s;
}

View File

@ -296,6 +296,8 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
const char *sort_help(const char *prefix);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
bool is_strict_order(const char *order);

View File

@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
break;
case AGGR_GLOBAL:
aggr->val += count->val;
if (config->scale) {
aggr->ena += count->ena;
aggr->run += count->run;
}
aggr->ena += count->ena;
aggr->run += count->run;
case AGGR_UNSET:
default:
break;
@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
if (config->scale) {
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
}
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable

View File

@ -1455,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;