1
0
Fork 0

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Thomas Gleixner:
 "Mostly updates to the perf tool plus two fixes to the kernel core code:

   - Handle tracepoint filters correctly for inherited events (Peter
     Zijlstra)

   - Prevent a deadlock in perf_lock_task_context (Paul McKenney)

   - Add missing newlines to some pr_err() calls (Arnaldo Carvalho de
     Melo)

   - Print full source file paths when using 'perf annotate --print-line
     --full-paths' (Michael Petlan)

   - Fix 'perf probe -d' when just one out of uprobes and kprobes is
     enabled (Wang Nan)

   - Add compiler.h to list.h to fix 'make perf-tar-src-pkg' generated
     tarballs, i.e. out of tree building (Arnaldo Carvalho de Melo)

   - Add the llvm-src-base.c and llvm-src-kbuild.c files, generated by
     the 'perf test' LLVM entries, when running it in-tree, to
     .gitignore (Yunlong Song)

   - libbpf error reporting improvements, using a strerror interface to
     more precisely tell the user about problems with the provided
     scriptlet, be it in C or as a ready made object file (Wang Nan)

   - Do not be case sensitive when searching for matching 'perf test'
     entries (Arnaldo Carvalho de Melo)

   - Inform the user about objdump failures in 'perf annotate' (Andi
     Kleen)

   - Improve the LLVM 'perf test' entry, introduce a new ones for BPF
     and kbuild tests to check the environment used by clang to compile
     .c scriptlets (Wang Nan)"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
  perf/x86/intel/rapl: Remove the unused RAPL_EVENT_DESC() macro
  tools include: Add compiler.h to list.h
  perf probe: Verify parameters in two functions
  perf session: Add missing newlines to some pr_err() calls
  perf annotate: Support full source file paths for srcline fix
  perf test: Add llvm-src-base.c and llvm-src-kbuild.c to .gitignore
  perf: Fix inherited events vs. tracepoint filters
  perf: Disable IRQs across RCU RS CS that acquires scheduler lock
  perf test: Do not be case sensitive when searching for matching tests
  perf test: Add 'perf test BPF'
  perf test: Enhance the LLVM tests: add kbuild test
  perf test: Enhance the LLVM test: update basic BPF test program
  perf bpf: Improve BPF related error messages
  perf tools: Make fetch_kernel_version() publicly available
  bpf tools: Add new API bpf_object__get_kversion()
  bpf tools: Improve libbpf error reporting
  perf probe: Cleanup find_perf_probe_point_from_map to reduce redundancy
  perf annotate: Inform the user about objdump failures in --stdio
  perf stat: Make stat options global
  perf sched latency: Fix thread pid reuse issue
  ...
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2015-11-15 09:36:24 -08:00
commit 0ca9b67606
37 changed files with 949 additions and 294 deletions

View File

@ -107,12 +107,6 @@ static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
static struct kobj_attribute format_attr_##_var = \
__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
#define RAPL_EVENT_DESC(_name, _config) \
{ \
.attr = __ATTR(_name, 0444, rapl_event_show, NULL), \
.config = _config, \
}
#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
#define RAPL_EVENT_ATTR_STR(_name, v, str) \

View File

@ -1050,13 +1050,13 @@ retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
* part of the read side critical section was preemptible -- see
* part of the read side critical section was irqs-enabled -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
* side critical section is non-preemptible.
* side critical section has interrupts disabled.
*/
preempt_disable();
local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
@ -1070,21 +1070,22 @@ retry:
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
preempt_enable();
local_irq_restore(*flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
raw_spin_unlock(&ctx->lock);
ctx = NULL;
}
}
rcu_read_unlock();
preempt_enable();
if (!ctx)
local_irq_restore(*flags);
return ctx;
}
@ -6913,6 +6914,10 @@ static int perf_tp_filter_match(struct perf_event *event,
{
void *record = data->raw->data;
/* only top level events have filters set */
if (event->parent)
event = event->parent;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;

View File

@ -1,3 +1,4 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>

View File

@ -1,2 +1,2 @@
libbpf_version.h
FEATURE-DUMP
FEATURE-DUMP.libbpf

View File

@ -180,7 +180,7 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
$(RM) LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf

View File

@ -61,6 +61,60 @@ void libbpf_set_print(libbpf_print_fn_t warn,
__pr_debug = debug;
}
#define STRERR_BUFSIZE 128
#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
static const char *libbpf_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
[ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
[ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
[ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch",
[ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
[ERRCODE_OFFSET(RELOC)] = "Relocation failed",
[ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
[ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
[ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
};
int libbpf_strerror(int err, char *buf, size_t size)
{
if (!buf || !size)
return -1;
err = err > 0 ? err : -err;
if (err < __LIBBPF_ERRNO__START) {
int ret;
ret = strerror_r(err, buf, size);
buf[size - 1] = '\0';
return ret;
}
if (err < __LIBBPF_ERRNO__END) {
const char *msg;
msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
snprintf(buf, size, "%s", msg);
buf[size - 1] = '\0';
return 0;
}
snprintf(buf, size, "Unknown libbpf error %d", err);
buf[size - 1] = '\0';
return -1;
}
#define CHECK_ERR(action, err, out) do { \
err = action; \
if (err) \
goto out; \
} while(0)
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
@ -258,7 +312,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
if (!obj) {
pr_warning("alloc memory failed for %s\n", path);
return NULL;
return ERR_PTR(-ENOMEM);
}
strcpy(obj->path, path);
@ -305,7 +359,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
if (obj_elf_valid(obj)) {
pr_warning("elf init: internal error\n");
return -EEXIST;
return -LIBBPF_ERRNO__LIBELF;
}
if (obj->efile.obj_buf_sz > 0) {
@ -331,14 +385,14 @@ static int bpf_object__elf_init(struct bpf_object *obj)
if (!obj->efile.elf) {
pr_warning("failed to open %s as ELF file\n",
obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
pr_warning("failed to get EHDR from %s\n",
obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
ep = &obj->efile.ehdr;
@ -346,7 +400,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
pr_warning("%s is not an eBPF object file\n",
obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@ -374,14 +428,14 @@ bpf_object__check_endianness(struct bpf_object *obj)
goto mismatch;
break;
default:
return -EINVAL;
return -LIBBPF_ERRNO__ENDIAN;
}
return 0;
mismatch:
pr_warning("Error: endianness mismatch.\n");
return -EINVAL;
return -LIBBPF_ERRNO__ENDIAN;
}
static int
@ -402,7 +456,7 @@ bpf_object__init_kversion(struct bpf_object *obj,
if (size != sizeof(kver)) {
pr_warning("invalid kver section in %s\n", obj->path);
return -EINVAL;
return -LIBBPF_ERRNO__FORMAT;
}
memcpy(&kver, data, sizeof(kver));
obj->kern_version = kver;
@ -444,7 +498,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
pr_warning("failed to get e_shstrndx from %s\n",
obj->path);
return -EINVAL;
return -LIBBPF_ERRNO__FORMAT;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
@ -456,7 +510,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section header from %s\n",
obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
@ -464,7 +518,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!name) {
pr_warning("failed to get section name from %s\n",
obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
@ -472,7 +526,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!data) {
pr_warning("failed to get section data from %s(%s)\n",
name, obj->path);
err = -EINVAL;
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
@ -495,7 +549,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
obj->path);
err = -EEXIST;
err = -LIBBPF_ERRNO__FORMAT;
} else
obj->efile.symbols = data;
} else if ((sh.sh_type == SHT_PROGBITS) &&
@ -504,7 +558,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
err = bpf_object__add_program(obj, data->d_buf,
data->d_size, name, idx);
if (err) {
char errmsg[128];
char errmsg[STRERR_BUFSIZE];
strerror_r(-err, errmsg, sizeof(errmsg));
pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, errmsg);
@ -576,7 +631,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
if (!gelf_getrel(data, i, &rel)) {
pr_warning("relocation: failed to get %d reloc\n", i);
return -EINVAL;
return -LIBBPF_ERRNO__FORMAT;
}
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
@ -587,20 +642,20 @@ bpf_program__collect_reloc(struct bpf_program *prog,
&sym)) {
pr_warning("relocation: symbol %"PRIx64" not found\n",
GELF_R_SYM(rel.r_info));
return -EINVAL;
return -LIBBPF_ERRNO__FORMAT;
}
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
return -EINVAL;
return -LIBBPF_ERRNO__RELOC;
}
map_idx = sym.st_value / sizeof(struct bpf_map_def);
if (map_idx >= nr_maps) {
pr_warning("bpf relocation: map_idx %d large than %d\n",
(int)map_idx, (int)nr_maps - 1);
return -EINVAL;
return -LIBBPF_ERRNO__RELOC;
}
prog->reloc_desc[i].insn_idx = insn_idx;
@ -683,7 +738,7 @@ bpf_program__relocate(struct bpf_program *prog, int *map_fds)
if (insn_idx >= (int)prog->insns_cnt) {
pr_warning("relocation out of range: '%s'\n",
prog->section_name);
return -ERANGE;
return -LIBBPF_ERRNO__RELOC;
}
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
insns[insn_idx].imm = map_fds[map_idx];
@ -721,7 +776,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
if (!obj_elf_valid(obj)) {
pr_warning("Internal error: elf object is closed\n");
return -EINVAL;
return -LIBBPF_ERRNO__INTERNAL;
}
for (i = 0; i < obj->efile.nr_reloc; i++) {
@ -734,21 +789,21 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__);
return -EINVAL;
return -LIBBPF_ERRNO__INTERNAL;
}
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
pr_warning("relocation failed: no %d section\n",
idx);
return -ENOENT;
return -LIBBPF_ERRNO__RELOC;
}
err = bpf_program__collect_reloc(prog, nr_maps,
shdr, data,
obj->efile.symbols);
if (err)
return -EINVAL;
return err;
}
return 0;
}
@ -777,13 +832,23 @@ load_program(struct bpf_insn *insns, int insns_cnt,
goto out;
}
ret = -EINVAL;
ret = -LIBBPF_ERRNO__LOAD;
pr_warning("load bpf program failed: %s\n", strerror(errno));
if (log_buf) {
if (log_buf && log_buf[0] != '\0') {
ret = -LIBBPF_ERRNO__VERIFY;
pr_warning("-- BEGIN DUMP LOG ---\n");
pr_warning("\n%s\n", log_buf);
pr_warning("-- END LOG --\n");
} else {
if (insns_cnt >= BPF_MAXINSNS) {
pr_warning("Program too large (%d insns), at most %d insns\n",
insns_cnt, BPF_MAXINSNS);
ret = -LIBBPF_ERRNO__PROG2BIG;
} else if (log_buf) {
pr_warning("log buffer is empty\n");
ret = -LIBBPF_ERRNO__KVER;
}
}
out:
@ -831,7 +896,7 @@ static int bpf_object__validate(struct bpf_object *obj)
if (obj->kern_version == 0) {
pr_warning("%s doesn't provide kernel version\n",
obj->path);
return -EINVAL;
return -LIBBPF_ERRNO__KVERSION;
}
return 0;
}
@ -840,32 +905,28 @@ static struct bpf_object *
__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
{
struct bpf_object *obj;
int err;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warning("failed to init libelf for %s\n", path);
return NULL;
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
obj = bpf_object__new(path, obj_buf, obj_buf_sz);
if (!obj)
return NULL;
if (IS_ERR(obj))
return obj;
if (bpf_object__elf_init(obj))
goto out;
if (bpf_object__check_endianness(obj))
goto out;
if (bpf_object__elf_collect(obj))
goto out;
if (bpf_object__collect_reloc(obj))
goto out;
if (bpf_object__validate(obj))
goto out;
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
CHECK_ERR(bpf_object__elf_collect(obj), err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
CHECK_ERR(bpf_object__validate(obj), err, out);
bpf_object__elf_finish(obj);
return obj;
out:
bpf_object__close(obj);
return NULL;
return ERR_PTR(err);
}
struct bpf_object *bpf_object__open(const char *path)
@ -922,6 +983,8 @@ int bpf_object__unload(struct bpf_object *obj)
int bpf_object__load(struct bpf_object *obj)
{
int err;
if (!obj)
return -EINVAL;
@ -931,18 +994,16 @@ int bpf_object__load(struct bpf_object *obj)
}
obj->loaded = true;
if (bpf_object__create_maps(obj))
goto out;
if (bpf_object__relocate(obj))
goto out;
if (bpf_object__load_progs(obj))
goto out;
CHECK_ERR(bpf_object__create_maps(obj), err, out);
CHECK_ERR(bpf_object__relocate(obj), err, out);
CHECK_ERR(bpf_object__load_progs(obj), err, out);
return 0;
out:
bpf_object__unload(obj);
pr_warning("failed to load object '%s'\n", obj->path);
return -EINVAL;
return err;
}
void bpf_object__close(struct bpf_object *obj)
@ -990,10 +1051,18 @@ const char *
bpf_object__get_name(struct bpf_object *obj)
{
if (!obj)
return NULL;
return ERR_PTR(-EINVAL);
return obj->path;
}
unsigned int
bpf_object__get_kversion(struct bpf_object *obj)
{
if (!obj)
return 0;
return obj->kern_version;
}
struct bpf_program *
bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
{
@ -1034,16 +1103,16 @@ int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
return 0;
}
const char *bpf_program__title(struct bpf_program *prog, bool dup)
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
{
const char *title;
title = prog->section_name;
if (dup) {
if (needs_copy) {
title = strdup(title);
if (!title) {
pr_warning("failed to strdup program title\n");
return NULL;
return ERR_PTR(-ENOMEM);
}
}

View File

@ -10,6 +10,26 @@
#include <stdio.h>
#include <stdbool.h>
#include <linux/err.h>
enum libbpf_errno {
__LIBBPF_ERRNO__START = 4000,
/* Something wrong in libelf */
LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START,
LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */
LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */
LIBBPF_ERRNO__ENDIAN, /* Endian missmatch */
LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */
LIBBPF_ERRNO__RELOC, /* Relocation failed */
LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */
LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */
LIBBPF_ERRNO__PROG2BIG, /* Program too big */
LIBBPF_ERRNO__KVER, /* Incorrect kernel version */
__LIBBPF_ERRNO__END,
};
int libbpf_strerror(int err, char *buf, size_t size);
/*
* In include/linux/compiler-gcc.h, __printf is defined. However
@ -36,6 +56,7 @@ void bpf_object__close(struct bpf_object *object);
int bpf_object__load(struct bpf_object *obj);
int bpf_object__unload(struct bpf_object *obj);
const char *bpf_object__get_name(struct bpf_object *obj);
unsigned int bpf_object__get_kversion(struct bpf_object *obj);
struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
@ -63,7 +84,7 @@ int bpf_program__set_private(struct bpf_program *prog, void *priv,
int bpf_program__get_private(struct bpf_program *prog,
void **ppriv);
const char *bpf_program__title(struct bpf_program *prog, bool dup);
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
int bpf_program__fd(struct bpf_program *prog);

View File

@ -62,7 +62,6 @@ OPTIONS
--verbose=::
Verbosity level.
-i::
--no-inherit::
Child tasks do not inherit counters.

View File

@ -78,7 +78,7 @@ clean:
# The build-test target is not really parallel, don't print the jobs info:
#
build-test:
@$(MAKE) -f tests/make --no-print-directory
@$(MAKE) SHUF=1 -f tests/make --no-print-directory
#
# All other targets get passed through:

View File

@ -1203,12 +1203,13 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->thread == r->thread)
return 0;
if (l->thread->tid < r->thread->tid)
return -1;
if (l->thread->tid > r->thread->tid)
return 1;
return 0;
return (int)(l->thread - r->thread);
}
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)

View File

@ -122,6 +122,9 @@ static bool forever = false;
static struct timespec ref_time;
static struct cpu_map *aggr_map;
static aggr_get_id_t aggr_get_id;
static bool append_file;
static const char *output_name;
static int output_fd;
static volatile int done = 0;
@ -513,15 +516,6 @@ static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
if (csv_output || stat_config.interval)
return;
if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
fprintf(output, " # %8.3f CPUs utilized ",
avg / avg_stats(&walltime_nsecs_stats));
else
fprintf(output, " ");
}
static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@ -529,7 +523,6 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
FILE *output = stat_config.output;
double sc = evsel->scale;
const char *fmt;
int cpu = cpu_map__id_to_cpu(id);
if (csv_output) {
fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
@ -542,9 +535,6 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
aggr_printout(evsel, id, nr);
if (stat_config.aggr_mode == AGGR_GLOBAL)
cpu = 0;
fprintf(output, fmt, avg, csv_sep);
if (evsel->unit)
@ -556,12 +546,24 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
}
if (csv_output || stat_config.interval)
return;
static void printout(int id, int nr, struct perf_evsel *counter, double uval)
{
int cpu = cpu_map__id_to_cpu(id);
perf_stat__print_shadow_stats(output, evsel, avg, cpu,
stat_config.aggr_mode);
if (stat_config.aggr_mode == AGGR_GLOBAL)
cpu = 0;
if (nsec_counter(counter))
nsec_printout(id, nr, counter, uval);
else
abs_printout(id, nr, counter, uval);
if (!csv_output && !stat_config.interval)
perf_stat__print_shadow_stats(stat_config.output, counter,
uval, cpu,
stat_config.aggr_mode);
}
static void print_aggr(char *prefix)
@ -617,12 +619,7 @@ static void print_aggr(char *prefix)
continue;
}
uval = val * counter->scale;
if (nsec_counter(counter))
nsec_printout(id, nr, counter, uval);
else
abs_printout(id, nr, counter, uval);
printout(id, nr, counter, uval);
if (!csv_output)
print_noise(counter, 1.0);
@ -653,11 +650,7 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
if (nsec_counter(counter))
nsec_printout(thread, 0, counter, uval);
else
abs_printout(thread, 0, counter, uval);
printout(thread, 0, counter, uval);
if (!csv_output)
print_noise(counter, 1.0);
@ -707,11 +700,7 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
}
uval = avg * counter->scale;
if (nsec_counter(counter))
nsec_printout(-1, 0, counter, uval);
else
abs_printout(-1, 0, counter, uval);
printout(-1, 0, counter, uval);
print_noise(counter, avg);
@ -764,12 +753,7 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
}
uval = val * counter->scale;
if (nsec_counter(counter))
nsec_printout(cpu, 0, counter, uval);
else
abs_printout(cpu, 0, counter, uval);
printout(cpu, 0, counter, uval);
if (!csv_output)
print_noise(counter, 1.0);
print_running(run, ena);
@ -946,6 +930,67 @@ static int stat__set_big_num(const struct option *opt __maybe_unused,
return 0;
}
static const struct option stat_options[] = {
OPT_BOOLEAN('T', "transaction", &transaction_run,
"hardware transaction statistics"),
OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
"child tasks do not inherit counters"),
OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"),
OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100, forever: 0)"),
OPT_BOOLEAN('n', "null", &null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_STRING('x', "field-separator", &csv_sep, "separator",
"print counts with custom separator"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only", parse_cgroups),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
OPT_INTEGER(0, "log-fd", &output_fd,
"log output to fd, instead of stderr"),
OPT_STRING(0, "pre", &pre_cmd, "command",
"command to run prior to the measured command"),
OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &stat_config.interval,
"print counts at regular interval in ms (>= 10)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
OPT_UINTEGER('D', "delay", &initial_delay,
"ms to wait before starting measurement after program start"),
OPT_END()
};
static int perf_stat__get_socket(struct cpu_map *map, int cpu)
{
return cpu_map__get_socket(map, cpu, NULL);
@ -1193,69 +1238,6 @@ static int add_default_attributes(void)
int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool append_file = false;
int output_fd = 0;
const char *output_name = NULL;
const struct option options[] = {
OPT_BOOLEAN('T', "transaction", &transaction_run,
"hardware transaction statistics"),
OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
"child tasks do not inherit counters"),
OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"),
OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100, forever: 0)"),
OPT_BOOLEAN('n', "null", &null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_STRING('x', "field-separator", &csv_sep, "separator",
"print counts with custom separator"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only", parse_cgroups),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
OPT_INTEGER(0, "log-fd", &output_fd,
"log output to fd, instead of stderr"),
OPT_STRING(0, "pre", &pre_cmd, "command",
"command to run prior to the measured command"),
OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &stat_config.interval,
"print counts at regular interval in ms (>= 10)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
OPT_UINTEGER('D', "delay", &initial_delay,
"ms to wait before starting measurement after program start"),
OPT_END()
};
const char * const stat_usage[] = {
"perf stat [<options>] [<command>]",
NULL
@ -1271,7 +1253,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (evsel_list == NULL)
return -ENOMEM;
argc = parse_options(argc, argv, options, stat_usage,
argc = parse_options(argc, argv, stat_options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
interval = stat_config.interval;
@ -1281,14 +1263,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (output_name && output_fd) {
fprintf(stderr, "cannot use both --output and --log-fd\n");
parse_options_usage(stat_usage, options, "o", 1);
parse_options_usage(NULL, options, "log-fd", 0);
parse_options_usage(stat_usage, stat_options, "o", 1);
parse_options_usage(NULL, stat_options, "log-fd", 0);
goto out;
}
if (output_fd < 0) {
fprintf(stderr, "argument to --log-fd must be a > 0\n");
parse_options_usage(stat_usage, options, "log-fd", 0);
parse_options_usage(stat_usage, stat_options, "log-fd", 0);
goto out;
}
@ -1328,8 +1310,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
/* User explicitly passed -B? */
if (big_num_opt == 1) {
fprintf(stderr, "-B option not supported with -x\n");
parse_options_usage(stat_usage, options, "B", 1);
parse_options_usage(NULL, options, "x", 1);
parse_options_usage(stat_usage, stat_options, "B", 1);
parse_options_usage(NULL, stat_options, "x", 1);
goto out;
} else /* Nope, so disable big number formatting */
big_num = false;
@ -1337,11 +1319,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
big_num = false;
if (!argc && target__none(&target))
usage_with_options(stat_usage, options);
usage_with_options(stat_usage, stat_options);
if (run_count < 0) {
pr_err("Run count must be a positive number\n");
parse_options_usage(stat_usage, options, "r", 1);
parse_options_usage(stat_usage, stat_options, "r", 1);
goto out;
} else if (run_count == 0) {
forever = true;
@ -1351,8 +1333,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
fprintf(stderr, "The --per-thread option is only available "
"when monitoring via -p -t options.\n");
parse_options_usage(NULL, options, "p", 1);
parse_options_usage(NULL, options, "t", 1);
parse_options_usage(NULL, stat_options, "p", 1);
parse_options_usage(NULL, stat_options, "t", 1);
goto out;
}
@ -1366,9 +1348,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
parse_options_usage(stat_usage, options, "G", 1);
parse_options_usage(NULL, options, "A", 1);
parse_options_usage(NULL, options, "a", 1);
parse_options_usage(stat_usage, stat_options, "G", 1);
parse_options_usage(NULL, stat_options, "A", 1);
parse_options_usage(NULL, stat_options, "a", 1);
goto out;
}
@ -1380,12 +1362,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
parse_options_usage(stat_usage, options, "p", 1);
parse_options_usage(NULL, options, "t", 1);
parse_options_usage(stat_usage, stat_options, "p", 1);
parse_options_usage(NULL, stat_options, "t", 1);
} else if (target__has_cpu(&target)) {
perror("failed to parse CPUs map");
parse_options_usage(stat_usage, options, "C", 1);
parse_options_usage(NULL, options, "a", 1);
parse_options_usage(stat_usage, stat_options, "C", 1);
parse_options_usage(NULL, stat_options, "a", 1);
}
goto out;
}
@ -1400,7 +1382,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (interval && interval < 100) {
if (interval < 10) {
pr_err("print interval must be >= 10ms\n");
parse_options_usage(stat_usage, options, "I", 1);
parse_options_usage(stat_usage, stat_options, "I", 1);
goto out;
} else
pr_warning("print interval < 100ms. "

2
tools/perf/tests/.gitignore vendored 100644
View File

@ -0,0 +1,2 @@
llvm-src-base.c
llvm-src-kbuild.c

View File

@ -31,9 +31,24 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o
perf-y += thread-map.o
perf-y += llvm.o
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o
perf-y += bpf.o
perf-y += topology.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_base_prog[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
$(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_test_kbuild_prog[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif

View File

@ -171,6 +171,5 @@ int test__attr(void)
!lstat(path_perf, &st))
return run_dir(path_dir, path_perf);
fprintf(stderr, " (omitted)");
return 0;
return TEST_SKIP;
}

View File

@ -1,3 +1,7 @@
/*
* bpf-script-example.c
* Test basic LLVM building
*/
#ifndef LINUX_VERSION_CODE
# error Need LINUX_VERSION_CODE
# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'

View File

@ -0,0 +1,21 @@
/*
* bpf-script-test-kbuild.c
* Test include from kernel header
*/
#ifndef LINUX_VERSION_CODE
# error Need LINUX_VERSION_CODE
# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
#endif
#define SEC(NAME) __attribute__((section(NAME), used))
#include <uapi/linux/fs.h>
#include <uapi/asm/ptrace.h>
SEC("func=vfs_llseek")
int bpf_func__vfs_llseek(void *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;

View File

@ -0,0 +1,209 @@
#include <stdio.h>
#include <sys/epoll.h>
#include <util/bpf-loader.h>
#include <util/evlist.h>
#include "tests.h"
#include "llvm.h"
#include "debug.h"
#define NR_ITERS 111
#ifdef HAVE_LIBBPF_SUPPORT
static int epoll_pwait_loop(void)
{
int i;
/* Should fail NR_ITERS times */
for (i = 0; i < NR_ITERS; i++)
epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
return 0;
}
static struct {
enum test_llvm__testcase prog_id;
const char *desc;
const char *name;
const char *msg_compile_fail;
const char *msg_load_fail;
int (*target_func)(void);
int expect_result;
} bpf_testcase_table[] = {
{
LLVM_TESTCASE_BASE,
"Test basic BPF filtering",
"[basic_bpf_test]",
"fix 'perf test LLVM' first",
"load bpf object failed",
&epoll_pwait_loop,
(NR_ITERS + 1) / 2,
},
};
static int do_test(struct bpf_object *obj, int (*func)(void),
int expect)
{
struct record_opts opts = {
.target = {
.uid = UINT_MAX,
.uses_mmap = true,
},
.freq = 0,
.mmap_pages = 256,
.default_interval = 1,
};
char pid[16];
char sbuf[STRERR_BUFSIZE];
struct perf_evlist *evlist;
int i, ret = TEST_FAIL, err = 0, count = 0;
struct parse_events_evlist parse_evlist;
struct parse_events_error parse_error;
bzero(&parse_error, sizeof(parse_error));
bzero(&parse_evlist, sizeof(parse_evlist));
parse_evlist.error = &parse_error;
INIT_LIST_HEAD(&parse_evlist.list);
err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
if (err || list_empty(&parse_evlist.list)) {
pr_debug("Failed to add events selected by BPF\n");
if (!err)
return TEST_FAIL;
}
snprintf(pid, sizeof(pid), "%d", getpid());
pid[sizeof(pid) - 1] = '\0';
opts.target.tid = opts.target.pid = pid;
/* Instead of perf_evlist__new_default, don't add default events */
evlist = perf_evlist__new();
if (!evlist) {
pr_debug("No ehough memory to create evlist\n");
return TEST_FAIL;
}
err = perf_evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_evlist;
}
perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
evlist->nr_groups = parse_evlist.nr_groups;
perf_evlist__config(evlist, &opts);
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
perf_evlist__enable(evlist);
(*func)();
perf_evlist__disable(evlist);
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
const u32 type = event->header.type;
if (type == PERF_RECORD_SAMPLE)
count ++;
}
}
if (count != expect)
pr_debug("BPF filter result incorrect\n");
ret = TEST_OK;
out_delete_evlist:
perf_evlist__delete(evlist);
return ret;
}
static struct bpf_object *
prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
{
struct bpf_object *obj;
obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
if (IS_ERR(obj)) {
pr_debug("Compile BPF program failed.\n");
return NULL;
}
return obj;
}
static int __test__bpf(int index)
{
int ret;
void *obj_buf;
size_t obj_buf_sz;
struct bpf_object *obj;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
bpf_testcase_table[index].prog_id,
true);
if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
pr_debug("Unable to get BPF object, %s\n",
bpf_testcase_table[index].msg_compile_fail);
if (index == 0)
return TEST_SKIP;
else
return TEST_FAIL;
}
obj = prepare_bpf(obj_buf, obj_buf_sz,
bpf_testcase_table[index].name);
if (!obj) {
ret = TEST_FAIL;
goto out;
}
ret = do_test(obj,
bpf_testcase_table[index].target_func,
bpf_testcase_table[index].expect_result);
out:
bpf__clear();
return ret;
}
int test__bpf(void)
{
unsigned int i;
int err;
if (geteuid() != 0) {
pr_debug("Only root can run BPF test\n");
return TEST_SKIP;
}
for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
err = __test__bpf(i);
if (err != TEST_OK)
return err;
}
return TEST_OK;
}
#else
int test__bpf(void)
{
pr_debug("Skip BPF test because BPF support is not compiled\n");
return TEST_SKIP;
}
#endif

View File

@ -165,6 +165,10 @@ static struct test generic_tests[] = {
.desc = "Test topology in session",
.func = test_session_topology,
},
{
.desc = "Test BPF filter",
.func = test__bpf,
},
{
.func = NULL,
},
@ -192,7 +196,7 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char
continue;
}
if (strstr(test->desc, argv[i]))
if (strcasestr(test->desc, argv[i]))
return true;
}

View File

@ -613,16 +613,16 @@ int test__code_reading(void)
case TEST_CODE_READING_OK:
return 0;
case TEST_CODE_READING_NO_VMLINUX:
fprintf(stderr, " (no vmlinux)");
pr_debug("no vmlinux\n");
return 0;
case TEST_CODE_READING_NO_KCORE:
fprintf(stderr, " (no kcore)");
pr_debug("no kcore\n");
return 0;
case TEST_CODE_READING_NO_ACCESS:
fprintf(stderr, " (no access)");
pr_debug("no access\n");
return 0;
case TEST_CODE_READING_NO_KERNEL_OBJ:
fprintf(stderr, " (no kernel obj)");
pr_debug("no kernel obj\n");
return 0;
default:
return -1;

View File

@ -90,8 +90,8 @@ int test__keep_tracking(void)
evsel->attr.enable_on_exec = 0;
if (perf_evlist__open(evlist) < 0) {
fprintf(stderr, " (not supported)");
err = 0;
pr_debug("Unable to open dummy and cycles event\n");
err = TEST_SKIP;
goto out_err;
}

View File

@ -2,6 +2,7 @@
#include <bpf/libbpf.h>
#include <util/llvm-utils.h>
#include <util/cache.h>
#include "llvm.h"
#include "tests.h"
#include "debug.h"
@ -11,42 +12,58 @@ static int perf_config_cb(const char *var, const char *val,
return perf_default_config(var, val, arg);
}
/*
* Randomly give it a "version" section since we don't really load it
* into kernel
*/
static const char test_bpf_prog[] =
"__attribute__((section(\"do_fork\"), used)) "
"int fork(void *ctx) {return 0;} "
"char _license[] __attribute__((section(\"license\"), used)) = \"GPL\";"
"int _version __attribute__((section(\"version\"), used)) = 0x40100;";
#ifdef HAVE_LIBBPF_SUPPORT
static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
{
struct bpf_object *obj;
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
if (!obj)
return -1;
if (IS_ERR(obj))
return TEST_FAIL;
bpf_object__close(obj);
return 0;
return TEST_OK;
}
#else
static int test__bpf_parsing(void *obj_buf __maybe_unused,
size_t obj_buf_sz __maybe_unused)
{
fprintf(stderr, " (skip bpf parsing)");
return 0;
pr_debug("Skip bpf parsing\n");
return TEST_OK;
}
#endif
int test__llvm(void)
static struct {
const char *source;
const char *desc;
} bpf_source_table[__LLVM_TESTCASE_MAX] = {
[LLVM_TESTCASE_BASE] = {
.source = test_llvm__bpf_base_prog,
.desc = "Basic BPF llvm compiling test",
},
[LLVM_TESTCASE_KBUILD] = {
.source = test_llvm__bpf_test_kbuild_prog,
.desc = "Test kbuild searching",
},
};
int
test_llvm__fetch_bpf_obj(void **p_obj_buf,
size_t *p_obj_buf_sz,
enum test_llvm__testcase index,
bool force)
{
char *tmpl_new, *clang_opt_new;
void *obj_buf;
size_t obj_buf_sz;
int err, old_verbose;
const char *source;
const char *desc;
const char *tmpl_old, *clang_opt_old;
char *tmpl_new = NULL, *clang_opt_new = NULL;
int err, old_verbose, ret = TEST_FAIL;
if (index >= __LLVM_TESTCASE_MAX)
return TEST_FAIL;
source = bpf_source_table[index].source;
desc = bpf_source_table[index].desc;
perf_config(perf_config_cb, NULL);
@ -54,45 +71,100 @@ int test__llvm(void)
* Skip this test if user's .perfconfig doesn't set [llvm] section
* and clang is not found in $PATH, and this is not perf test -v
*/
if (verbose == 0 && !llvm_param.user_set_param && llvm__search_clang()) {
fprintf(stderr, " (no clang, try 'perf test -v LLVM')");
if (!force && (verbose == 0 &&
!llvm_param.user_set_param &&
llvm__search_clang())) {
pr_debug("No clang and no verbosive, skip this test\n");
return TEST_SKIP;
}
old_verbose = verbose;
/*
* llvm is verbosity when error. Suppress all error output if
* not 'perf test -v'.
*/
old_verbose = verbose;
if (verbose == 0)
verbose = -1;
*p_obj_buf = NULL;
*p_obj_buf_sz = 0;
if (!llvm_param.clang_bpf_cmd_template)
return -1;
goto out;
if (!llvm_param.clang_opt)
llvm_param.clang_opt = strdup("");
err = asprintf(&tmpl_new, "echo '%s' | %s", test_bpf_prog,
llvm_param.clang_bpf_cmd_template);
err = asprintf(&tmpl_new, "echo '%s' | %s%s", source,
llvm_param.clang_bpf_cmd_template,
old_verbose ? "" : " 2>/dev/null");
if (err < 0)
return -1;
goto out;
err = asprintf(&clang_opt_new, "-xc %s", llvm_param.clang_opt);
if (err < 0)
return -1;
goto out;
tmpl_old = llvm_param.clang_bpf_cmd_template;
llvm_param.clang_bpf_cmd_template = tmpl_new;
clang_opt_old = llvm_param.clang_opt;
llvm_param.clang_opt = clang_opt_new;
err = llvm__compile_bpf("-", &obj_buf, &obj_buf_sz);
err = llvm__compile_bpf("-", p_obj_buf, p_obj_buf_sz);
llvm_param.clang_bpf_cmd_template = tmpl_old;
llvm_param.clang_opt = clang_opt_old;
verbose = old_verbose;
if (err) {
if (!verbose)
fprintf(stderr, " (use -v to see error message)");
return -1;
}
if (err)
goto out;
err = test__bpf_parsing(obj_buf, obj_buf_sz);
free(obj_buf);
return err;
ret = TEST_OK;
out:
free(tmpl_new);
free(clang_opt_new);
if (ret != TEST_OK)
pr_debug("Failed to compile test case: '%s'\n", desc);
return ret;
}
int test__llvm(void)
{
enum test_llvm__testcase i;
for (i = 0; i < __LLVM_TESTCASE_MAX; i++) {
int ret;
void *obj_buf = NULL;
size_t obj_buf_sz = 0;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
i, false);
if (ret == TEST_OK) {
ret = test__bpf_parsing(obj_buf, obj_buf_sz);
if (ret != TEST_OK)
pr_debug("Failed to parse test case '%s'\n",
bpf_source_table[i].desc);
}
free(obj_buf);
switch (ret) {
case TEST_SKIP:
return TEST_SKIP;
case TEST_OK:
break;
default:
/*
* Test 0 is the basic LLVM test. If test 0
* fail, the basic LLVM support not functional
* so the whole test should fail. If other test
* case fail, it can be fixed by adjusting
* config so don't report error.
*/
if (i == 0)
return TEST_FAIL;
else
return TEST_SKIP;
}
}
return TEST_OK;
}

View File

@ -0,0 +1,18 @@
#ifndef PERF_TEST_LLVM_H
#define PERF_TEST_LLVM_H
#include <stddef.h> /* for size_t */
#include <stdbool.h> /* for bool */
extern const char test_llvm__bpf_base_prog[];
extern const char test_llvm__bpf_test_kbuild_prog[];
enum test_llvm__testcase {
LLVM_TESTCASE_BASE,
LLVM_TESTCASE_KBUILD,
__LLVM_TESTCASE_MAX,
};
int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
enum test_llvm__testcase index, bool force);
#endif

View File

@ -221,6 +221,11 @@ test_O = $(if $(test_$1),$(test_$1),$(test_default_O))
all:
ifdef SHUF
run := $(shell shuf -e $(run))
run_O := $(shell shuf -e $(run_O))
endif
ifdef DEBUG
d := $(info run $(run))
d := $(info run_O $(run_O))

View File

@ -366,7 +366,7 @@ int test__switch_tracking(void)
/* Third event */
if (!perf_evlist__can_select_event(evlist, sched_switch)) {
fprintf(stderr, " (no sched_switch)");
pr_debug("No sched_switch\n");
err = 0;
goto out;
}
@ -442,7 +442,7 @@ int test__switch_tracking(void)
}
if (perf_evlist__open(evlist) < 0) {
fprintf(stderr, " (not supported)");
pr_debug("Not supported\n");
err = 0;
goto out;
}

View File

@ -66,6 +66,7 @@ int test__fdarray__add(void);
int test__kmod_path__parse(void);
int test__thread_map(void);
int test__llvm(void);
int test__bpf(void);
int test_session_topology(void);
#if defined(__arm__) || defined(__aarch64__)

View File

@ -1084,6 +1084,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
struct kcore_extract kce;
bool delete_extract = false;
int lineno = 0;
int nline;
if (filename)
symbol__join_symfs(symfs_filename, filename);
@ -1179,6 +1180,9 @@ fallback:
ret = decompress_to_file(m.ext, symfs_filename, fd);
if (ret)
pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
free(m.ext);
close(fd);
@ -1204,13 +1208,25 @@ fallback:
pr_debug("Executing: %s\n", command);
file = popen(command, "r");
if (!file)
if (!file) {
pr_err("Failure running %s\n", command);
/*
* If we were using debug info should retry with
* original binary.
*/
goto out_remove_tmp;
}
while (!feof(file))
nline = 0;
while (!feof(file)) {
if (symbol__parse_objdump_line(sym, map, file, privsize,
&lineno) < 0)
break;
nline++;
}
if (nline == 0)
pr_err("No output from %s\n", command);
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
@ -1604,6 +1620,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
len = symbol__size(sym);
if (print_lines) {
srcline_full_filename = full_paths;
symbol__get_source_line(sym, map, evsel, &source_line, len);
print_summary(&source_line, dso->long_name);
}

View File

@ -26,18 +26,40 @@ static int libbpf_##name(const char *fmt, ...) \
return ret; \
}
DEFINE_PRINT_FN(warning, 0)
DEFINE_PRINT_FN(info, 0)
DEFINE_PRINT_FN(warning, 1)
DEFINE_PRINT_FN(info, 1)
DEFINE_PRINT_FN(debug, 1)
struct bpf_prog_priv {
struct perf_probe_event pev;
};
static bool libbpf_initialized;
struct bpf_object *
bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
{
struct bpf_object *obj;
if (!libbpf_initialized) {
libbpf_set_print(libbpf_warning,
libbpf_info,
libbpf_debug);
libbpf_initialized = true;
}
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
if (IS_ERR(obj)) {
pr_debug("bpf: failed to load buffer\n");
return ERR_PTR(-EINVAL);
}
return obj;
}
struct bpf_object *bpf__prepare_load(const char *filename, bool source)
{
struct bpf_object *obj;
static bool libbpf_initialized;
if (!libbpf_initialized) {
libbpf_set_print(libbpf_warning,
@ -53,15 +75,15 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
if (err)
return ERR_PTR(err);
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
free(obj_buf);
} else
obj = bpf_object__open(filename);
if (!obj) {
if (IS_ERR(obj)) {
pr_debug("bpf: failed to load %s\n", filename);
return ERR_PTR(-EINVAL);
return obj;
}
return obj;
@ -96,9 +118,9 @@ config_bpf_program(struct bpf_program *prog)
int err;
config_str = bpf_program__title(prog, false);
if (!config_str) {
if (IS_ERR(config_str)) {
pr_debug("bpf: unable to get title for program\n");
return -EINVAL;
return PTR_ERR(config_str);
}
priv = calloc(sizeof(*priv), 1);
@ -113,14 +135,14 @@ config_bpf_program(struct bpf_program *prog)
if (err < 0) {
pr_debug("bpf: '%s' is not a valid config string\n",
config_str);
err = -EINVAL;
err = -BPF_LOADER_ERRNO__CONFIG;
goto errout;
}
if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
config_str, PERF_BPF_PROBE_GROUP);
err = -EINVAL;
err = -BPF_LOADER_ERRNO__GROUP;
goto errout;
} else if (!pev->group)
pev->group = strdup(PERF_BPF_PROBE_GROUP);
@ -132,9 +154,9 @@ config_bpf_program(struct bpf_program *prog)
}
if (!pev->event) {
pr_debug("bpf: '%s': event name is missing\n",
pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
config_str);
err = -EINVAL;
err = -BPF_LOADER_ERRNO__EVENTNAME;
goto errout;
}
pr_debug("bpf: config '%s' is ok\n", config_str);
@ -285,7 +307,7 @@ int bpf__foreach_tev(struct bpf_object *obj,
(void **)&priv);
if (err || !priv) {
pr_debug("bpf: failed to get private field\n");
return -EINVAL;
return -BPF_LOADER_ERRNO__INTERNAL;
}
pev = &priv->pev;
@ -308,13 +330,57 @@ int bpf__foreach_tev(struct bpf_object *obj,
return 0;
}
#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
static const char *bpf_loader_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
[ERRCODE_OFFSET(GROUP)] = "Invalid group name",
[ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
[ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
[ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
};
static int
bpf_loader_strerror(int err, char *buf, size_t size)
{
char sbuf[STRERR_BUFSIZE];
const char *msg;
if (!buf || !size)
return -1;
err = err > 0 ? err : -err;
if (err >= __LIBBPF_ERRNO__START)
return libbpf_strerror(err, buf, size);
if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
snprintf(buf, size, "%s", msg);
buf[size - 1] = '\0';
return 0;
}
if (err >= __BPF_LOADER_ERRNO__END)
snprintf(buf, size, "Unknown bpf loader error %d", err);
else
snprintf(buf, size, "%s",
strerror_r(err, sbuf, sizeof(sbuf)));
buf[size - 1] = '\0';
return -1;
}
#define bpf__strerror_head(err, buf, size) \
char sbuf[STRERR_BUFSIZE], *emsg;\
if (!size)\
return 0;\
if (err < 0)\
err = -err;\
emsg = strerror_r(err, sbuf, sizeof(sbuf));\
bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
emsg = sbuf;\
switch (err) {\
default:\
scnprintf(buf, size, "%s", emsg);\
@ -330,23 +396,62 @@ int bpf__foreach_tev(struct bpf_object *obj,
}\
buf[size - 1] = '\0';
int bpf__strerror_prepare_load(const char *filename, bool source,
int err, char *buf, size_t size)
{
size_t n;
int ret;
n = snprintf(buf, size, "Failed to load %s%s: ",
filename, source ? " from source" : "");
if (n >= size) {
buf[size - 1] = '\0';
return 0;
}
buf += n;
size -= n;
ret = bpf_loader_strerror(err, buf, size);
buf[size - 1] = '\0';
return ret;
}
int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
int err, char *buf, size_t size)
{
bpf__strerror_head(err, buf, size);
bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'");
bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0\n");
bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file\n");
bpf__strerror_entry(EACCES, "You need to be root");
bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
bpf__strerror_end(buf, size);
return 0;
}
int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
int bpf__strerror_load(struct bpf_object *obj,
int err, char *buf, size_t size)
{
bpf__strerror_head(err, buf, size);
bpf__strerror_entry(EINVAL, "%s: Are you root and runing a CONFIG_BPF_SYSCALL kernel?",
emsg)
case LIBBPF_ERRNO__KVER: {
unsigned int obj_kver = bpf_object__get_kversion(obj);
unsigned int real_kver;
if (fetch_kernel_version(&real_kver, NULL, 0)) {
scnprintf(buf, size, "Unable to fetch kernel version");
break;
}
if (obj_kver != real_kver) {
scnprintf(buf, size,
"'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
KVER_PARAM(obj_kver),
KVER_PARAM(real_kver));
break;
}
scnprintf(buf, size, "Failed to load program for unknown reason");
break;
}
bpf__strerror_end(buf, size);
return 0;
}

View File

@ -8,9 +8,21 @@
#include <linux/compiler.h>
#include <linux/err.h>
#include <string.h>
#include <bpf/libbpf.h>
#include "probe-event.h"
#include "debug.h"
enum bpf_loader_errno {
__BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
/* Invalid config string */
BPF_LOADER_ERRNO__CONFIG = __BPF_LOADER_ERRNO__START,
BPF_LOADER_ERRNO__GROUP, /* Invalid group name */
BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */
BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */
BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */
__BPF_LOADER_ERRNO__END,
};
struct bpf_object;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@ -19,6 +31,11 @@ typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev,
#ifdef HAVE_LIBBPF_SUPPORT
struct bpf_object *bpf__prepare_load(const char *filename, bool source);
int bpf__strerror_prepare_load(const char *filename, bool source,
int err, char *buf, size_t size);
struct bpf_object *bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz,
const char *name);
void bpf__clear(void);
@ -41,6 +58,13 @@ bpf__prepare_load(const char *filename __maybe_unused,
return ERR_PTR(-ENOTSUP);
}
static inline struct bpf_object *
bpf__prepare_load_buffer(void *obj_buf __maybe_unused,
size_t obj_buf_sz __maybe_unused)
{
return ERR_PTR(-ENOTSUP);
}
static inline void bpf__clear(void) { }
static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;}
@ -67,6 +91,15 @@ __bpf_strerror(char *buf, size_t size)
return 0;
}
static inline
int bpf__strerror_prepare_load(const char *filename __maybe_unused,
bool source __maybe_unused,
int err __maybe_unused,
char *buf, size_t size)
{
return __bpf_strerror(buf, size);
}
static inline int
bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
int err __maybe_unused,

View File

@ -4,17 +4,18 @@
*/
#include <stdio.h>
#include <sys/utsname.h>
#include "util.h"
#include "debug.h"
#include "llvm-utils.h"
#include "cache.h"
#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \
"$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS " \
"$KERNEL_INC_OPTIONS -Wno-unused-value " \
"-Wno-pointer-sign -working-directory " \
"$WORKING_DIR -c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
"$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
"-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
"$CLANG_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
"-c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
struct llvm_param llvm_param = {
.clang_path = "clang",
@ -214,18 +215,19 @@ static int detect_kbuild_dir(char **kbuild_dir)
const char *suffix_dir = "";
char *autoconf_path;
struct utsname utsname;
int err;
if (!test_dir) {
err = uname(&utsname);
if (err) {
pr_warning("uname failed: %s\n", strerror(errno));
return -EINVAL;
}
/* _UTSNAME_LENGTH is 65 */
char release[128];
test_dir = utsname.release;
err = fetch_kernel_version(NULL, release,
sizeof(release));
if (err)
return -EINVAL;
test_dir = release;
prefix_dir = "/lib/modules/";
suffix_dir = "/build";
}
@ -326,13 +328,15 @@ get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
int llvm__compile_bpf(const char *path, void **p_obj_buf,
size_t *p_obj_buf_sz)
{
int err;
char clang_path[PATH_MAX];
const char *clang_opt = llvm_param.clang_opt;
const char *template = llvm_param.clang_bpf_cmd_template;
char *kbuild_dir = NULL, *kbuild_include_opts = NULL;
void *obj_buf = NULL;
size_t obj_buf_sz;
void *obj_buf = NULL;
int err, nr_cpus_avail;
unsigned int kernel_version;
char linux_version_code_str[64];
const char *clang_opt = llvm_param.clang_opt;
char clang_path[PATH_MAX], nr_cpus_avail_str[64];
char *kbuild_dir = NULL, *kbuild_include_opts = NULL;
const char *template = llvm_param.clang_bpf_cmd_template;
if (!template)
template = CLANG_BPF_CMD_DEFAULT_TEMPLATE;
@ -354,6 +358,24 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
*/
get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
if (nr_cpus_avail <= 0) {
pr_err(
"WARNING:\tunable to get available CPUs in this system: %s\n"
" \tUse 128 instead.\n", strerror(errno));
nr_cpus_avail = 128;
}
snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d",
nr_cpus_avail);
if (fetch_kernel_version(&kernel_version, NULL, 0))
kernel_version = 0;
snprintf(linux_version_code_str, sizeof(linux_version_code_str),
"0x%x", kernel_version);
force_set_env("NR_CPUS", nr_cpus_avail_str);
force_set_env("LINUX_VERSION_CODE", linux_version_code_str);
force_set_env("CLANG_EXEC", clang_path);
force_set_env("CLANG_OPTIONS", clang_opt);
force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);

View File

@ -644,6 +644,12 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
return printed;
}
static void __map_groups__insert(struct map_groups *mg, struct map *map)
{
__maps__insert(&mg->maps[map->type], map);
map->groups = mg;
}
static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
{
struct rb_root *root;
@ -682,7 +688,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
before->end = map->start;
__maps__insert(maps, before);
__map_groups__insert(pos->groups, before);
if (verbose >= 2)
map__fprintf(before, fp);
}
@ -696,7 +702,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
after->start = map->end;
__maps__insert(maps, after);
__map_groups__insert(pos->groups, after);
if (verbose >= 2)
map__fprintf(after, fp);
}

View File

@ -632,19 +632,20 @@ int parse_events_load_bpf(struct parse_events_evlist *data,
struct bpf_object *obj;
obj = bpf__prepare_load(bpf_file_name, source);
if (IS_ERR(obj) || !obj) {
if (IS_ERR(obj)) {
char errbuf[BUFSIZ];
int err;
err = obj ? PTR_ERR(obj) : -EINVAL;
err = PTR_ERR(obj);
if (err == -ENOTSUP)
snprintf(errbuf, sizeof(errbuf),
"BPF support is not compiled");
else
snprintf(errbuf, sizeof(errbuf),
"BPF object file '%s' is invalid",
bpf_file_name);
bpf__strerror_prepare_load(bpf_file_name,
source,
-err, errbuf,
sizeof(errbuf));
data->error->help = strdup("(add -v to see detail)");
data->error->str = strdup(errbuf);

View File

@ -1895,9 +1895,8 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
sym = map__find_symbol(map, addr, NULL);
} else {
if (tp->symbol && !addr) {
ret = kernel_get_symbol_address_by_name(tp->symbol,
&addr, true, false);
if (ret < 0)
if (kernel_get_symbol_address_by_name(tp->symbol,
&addr, true, false) < 0)
goto out;
}
if (addr) {
@ -1905,6 +1904,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
sym = __find_kernel_function(addr, &map);
}
}
if (!sym)
goto out;

View File

@ -138,6 +138,9 @@ struct strlist *probe_file__get_rawlist(int fd)
char *p;
struct strlist *sl;
if (fd < 0)
return NULL;
sl = strlist__new(NULL, NULL);
fp = fdopen(dup(fd), "r");
@ -271,6 +274,9 @@ int probe_file__get_events(int fd, struct strfilter *filter,
const char *p;
int ret = -ENOENT;
if (!plist)
return -EINVAL;
namelist = __probe_file__get_namelist(fd, true);
if (!namelist)
return -ENOENT;

View File

@ -29,7 +29,7 @@ static int perf_session__open(struct perf_session *session)
struct perf_data_file *file = session->file;
if (perf_session__read_header(session) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)");
pr_err("incompatible file format (rerun with -v to learn more)\n");
return -1;
}
@ -37,17 +37,17 @@ static int perf_session__open(struct perf_session *session)
return 0;
if (!perf_evlist__valid_sample_type(session->evlist)) {
pr_err("non matching sample_type");
pr_err("non matching sample_type\n");
return -1;
}
if (!perf_evlist__valid_sample_id_all(session->evlist)) {
pr_err("non matching sample_id_all");
pr_err("non matching sample_id_all\n");
return -1;
}
if (!perf_evlist__valid_read_format(session->evlist)) {
pr_err("non matching read_format");
pr_err("non matching read_format\n");
return -1;
}

View File

@ -413,6 +413,11 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
ratio = total / avg;
fprintf(out, " # %8.0f cycles / elision ", ratio);
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
fprintf(out, " # %8.3f CPUs utilized ", avg / ratio);
else
fprintf(out, " ");
} else if (runtime_nsecs_stats[cpu].n != 0) {
char unit = 'M';

View File

@ -3,6 +3,7 @@
#include "debug.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
@ -665,3 +666,32 @@ bool find_process(const char *name)
closedir(dir);
return ret ? false : true;
}
int
fetch_kernel_version(unsigned int *puint, char *str,
size_t str_size)
{
struct utsname utsname;
int version, patchlevel, sublevel, err;
if (uname(&utsname))
return -1;
if (str && str_size) {
strncpy(str, utsname.release, str_size);
str[str_size - 1] = '\0';
}
err = sscanf(utsname.release, "%d.%d.%d",
&version, &patchlevel, &sublevel);
if (err != 3) {
pr_debug("Unablt to get kernel version from uname '%s'\n",
utsname.release);
return -1;
}
if (puint)
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}

View File

@ -350,4 +350,12 @@ static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int
int get_stack_size(const char *str, unsigned long *_size);
int fetch_kernel_version(unsigned int *puint,
char *str, size_t str_sz);
#define KVER_VERSION(x) (((x) >> 16) & 0xff)
#define KVER_PATCHLEVEL(x) (((x) >> 8) & 0xff)
#define KVER_SUBLEVEL(x) ((x) & 0xff)
#define KVER_FMT "%d.%d.%d"
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
#endif /* GIT_COMPAT_UTIL_H */