1
0
Fork 0
alistair23-linux/tools/perf/include/bpf/bpf.h

71 lines
2.5 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
#ifndef _PERF_BPF_H
#define _PERF_BPF_H
perf bpf: Include uapi/linux/bpf.h from the 'perf trace' script's bpf.h The next example scripts need the definition for the BPF functions, i.e. things like BPF_FUNC_probe_read, and in time will require lots of other definitions found in uapi/linux/bpf.h, so include it from the bpf.h file included from the eBPF scripts build with clang via '-e bpf_script.c' like in this example: $ tail -8 tools/perf/examples/bpf/5sec.c #include <bpf.h> int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec) { return sec == 5; } license(GPL); $ That 'bpf.h' include in the 5sec.c eBPF example will come from a set of header files crafted for building eBPF objects, that in a end-user system will come from: /usr/lib/perf/include/bpf/bpf.h And will include <uapi/linux/bpf.h> either from the place where the kernel was built, or from a kernel-devel rpm package like: -working-directory /lib/modules/4.17.9-100.fc27.x86_64/build That is set up by tools/perf/util/llvm-utils.c, and can be overriden by setting the 'kbuild-dir' variable in the "llvm" ~/.perfconfig file, like: # cat ~/.perfconfig [llvm] kbuild-dir = /home/foo/git/build/linux This usually doesn't need any change, just documenting here my findings while working with this code. In the future we may want to instead just use what is in /usr/include/linux/bpf.h, that comes from the UAPI provided from the kernel sources, for now, to avoid getting the kernel's non-UAPI "linux/bpf.h" file, that will cause clang to fail and is not what we want anyway (no BPF function definitions, etc), do it explicitely by asking for "uapi/linux/bpf.h". Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-zd8zeyhr2sappevojdem9xxt@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-01 07:50:51 -06:00
#include <uapi/linux/bpf.h>
perf bpf: Add struct bpf_map struct A helper structure used by eBPF C program to describe map attributes to elf_bpf loader, to be used initially by the special __bpf_stdout__ map used to print strings into the perf ring buffer in BPF scripts, e.g.: Using the upcoming stdio.h and puts() macros to use the __bpf_stdout__ map to add strings to the ring buffer: # cat tools/perf/examples/bpf/hello.c #include <stdio.h> int syscall_enter(openat)(void *args) { puts("Hello, world\n"); return 0; } license(GPL); # # cat ~/.perfconfig [llvm] dump-obj = true # perf trace -e openat,tools/perf/examples/bpf/hello.c/call-graph=dwarf/ cat /etc/passwd > /dev/null LLVM: dumping tools/perf/examples/bpf/hello.o 0.016 ( ): __bpf_stdout__:Hello, world 0.018 ( 0.010 ms): cat/9079 openat(dfd: CWD, filename: /etc/ld.so.cache, flags: CLOEXEC ) = 3 0.057 ( ): __bpf_stdout__:Hello, world 0.059 ( 0.011 ms): cat/9079 openat(dfd: CWD, filename: /lib64/libc.so.6, flags: CLOEXEC ) = 3 0.417 ( ): __bpf_stdout__:Hello, world 0.419 ( 0.009 ms): cat/9079 openat(dfd: CWD, filename: /etc/passwd ) = 3 # # file tools/perf/examples/bpf/hello.o tools/perf/examples/bpf/hello.o: ELF 64-bit LSB relocatable, *unknown arch 0xf7* version 1 (SYSV), not stripped # readelf -SW tools/perf/examples/bpf/hello.o There are 10 section headers, starting at offset 0x208: Section Headers: [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 0] NULL 0000000000000000 000000 000000 00 0 0 0 [ 1] .strtab STRTAB 0000000000000000 000188 00007f 00 0 0 1 [ 2] .text PROGBITS 0000000000000000 000040 000000 00 AX 0 0 4 [ 3] syscalls:sys_enter_openat PROGBITS 0000000000000000 000040 000088 00 AX 0 0 8 [ 4] .relsyscalls:sys_enter_openat REL 0000000000000000 000178 000010 10 9 3 8 [ 5] maps PROGBITS 0000000000000000 0000c8 00001c 00 WA 0 0 4 [ 6] .rodata.str1.1 PROGBITS 0000000000000000 0000e4 00000e 01 AMS 0 0 1 [ 7] license PROGBITS 0000000000000000 0000f2 000004 00 WA 0 0 1 [ 8] version PROGBITS 0000000000000000 0000f8 000004 00 WA 0 0 4 [ 9] .symtab SYMTAB 0000000000000000 000100 000078 18 1 1 8 Key to Flags: W (write), A (alloc), X (execute), M (merge), S (strings), I (info), L (link order), O (extra OS processing required), G (group), T (TLS), C (compressed), x (unknown), o (OS specific), E (exclude), p (processor specific) # readelf -s tools/perf/examples/bpf/hello.o Symbol table '.symtab' contains 5 entries: Num: Value Size Type Bind Vis Ndx Name 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND 1: 0000000000000000 0 NOTYPE GLOBAL DEFAULT 5 __bpf_stdout__ 2: 0000000000000000 0 NOTYPE GLOBAL DEFAULT 7 _license 3: 0000000000000000 0 NOTYPE GLOBAL DEFAULT 8 _version 4: 0000000000000000 0 NOTYPE GLOBAL DEFAULT 3 syscall_enter_openat # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-81fg60om2ifnatsybzwmiga3@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-06 06:02:26 -06:00
/*
* A helper structure used by eBPF C program to describe map attributes to
* elf_bpf loader, taken from tools/testing/selftests/bpf/bpf_helpers.h:
*/
struct bpf_map {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int inner_map_idx;
unsigned int numa_node;
};
#define bpf_map(name, _type, type_key, type_val, _max_entries) \
struct bpf_map SEC("maps") name = { \
.type = BPF_MAP_TYPE_##_type, \
.key_size = sizeof(type_key), \
.value_size = sizeof(type_val), \
.max_entries = _max_entries, \
perf bpf: Automatically add BTF ELF markers The libbpf loader expects that some __btf_map_<MAP_NAME> structs be in place with the keys and values types of maps so that one can store the struct definitions and have them sent to the kernel via sys_bpf(fd, cmd = BTF_LOAD) and then later be retrievable via sys_bpf(fd, cmd = BPF_OBJ_GET_INFO_BY_FD) for use by tools such as 'bpftool map dump id MAP_ID'. Since we already have this for defining maps in 'perf trace' BPF events: bpf_map(name, _type, type_key, type_val, _max_entries) As used in the tools/perf/examples/bpf/augmented_raw_syscalls.c: --- 8< --- struct syscall { bool enabled; }; bpf_map(syscalls, ARRAY, int, struct syscall, 512); --- 8< --- All we need is to get all that already available info, piggyback on the 'bpf_map' define in tools/perf/include/bpf/bpf.h, that is included by 'perf trace' BPF programs and do that without requiring changes to the BPF programs already defining maps using 'bpf_map()'. So this is what we have before this patch: 1) With this in ~/.perfconfig to dump .c events as .o, aka save a copy so that we can use the .o later as a pre-compiled BPF bytecode: # grep '\[llvm\]' -A2 ~/.perfconfig [llvm] dump-obj = true clang-opt = -g # # clang --version clang version 9.0.0 (https://git.llvm.org/git/clang.git/ 7906282d3afec5dfdc2b27943fd6c0309086c507) (https://git.llvm.org/git/llvm.git/ a1b5de1ff8ae8bc79dc8e86e1f82565229bd0500) Target: x86_64-unknown-linux-gnu Thread model: posix InstalledDir: /opt/llvm/bin 2) Note the -g there so that we get clang to generate debuginfo, and since the target is 'bpf' it will generate the BTF info in this clang version (9.0). 3) Run a simple 'perf record' specifiying as an event the augmented_raw_syscalls.c source code: # perf record -e /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c sleep 1 LLVM: dumping /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.025 MB perf.data ] # file /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o: ELF 64-bit LSB relocatable, eBPF, version 1 (SYSV), with debug_info, not stripped 4) Look at the BTF structs encoded in it: # pahole -F btf --sizes /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o syscall_enter_args 64 0 augmented_filename 264 0 syscall 1 0 syscall_exit_args 24 0 bpf_map 28 0 # # pahole -F btf -C syscalls /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o # pahole -F btf -C syscall /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o struct syscall { bool enabled; /* 0 1 */ /* size: 1, cachelines: 1, members: 1 */ /* last cacheline: 1 bytes */ }; # 5) Ok, with just this we don't have the markers expected by the libbpf loader and when we run with this BPF bytecode, because we have: # grep '\[trace\]' -A1 ~/.perfconfig [trace] add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o # 6) Lets do a 'perf trace' system wide session using this BPF program: # perf trace -e *mmsg,open* Cache2 I/O/6885 openat(AT_FDCWD, "/home/acme/.cache/mozilla/firefox/ina67tev.default/cache2/entries/BA220AB2914006A7AE96D27BE6EA13DD77519FCA", O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR) = 106 Cache2 I/O/6885 openat(AT_FDCWD, "/proc/self/mountinfo", O_RDONLY) = 121 Cache2 I/O/6885 openat(AT_FDCWD, "/proc/self/mountinfo", O_RDONLY) = 121 Cache2 I/O/6885 openat(AT_FDCWD, "/proc/self/mountinfo", O_RDONLY) = 121 Cache2 I/O/6885 openat(AT_FDCWD, "/proc/self/mountinfo", O_RDONLY) = 121 DNS Res~ver #3/23340 openat(AT_FDCWD, "/etc/hosts", O_RDONLY|O_CLOEXEC) = 106 DNS Res~ver #3/23340 sendmmsg(106<socket:[3482690]>, 0x7f252f1fcaf0, 2, MSG_NOSIGNAL) = 2 Cache2 I/O/6885 openat(AT_FDCWD, "/home/acme/.cache/mozilla/firefox/ina67tev.default/cache2/entries/BA220AB2914006A7AE96D27BE6EA13DD77519FCA", O_RDWR) = 106 lighttpd/18915 openat(AT_FDCWD, "/proc/loadavg", O_RDONLY) = 12 7) While it runs lets see the maps that 'perf trace' + libbpf's BPF loader loaded into the kernel via sys_bpf(fd, BPF_BTF_LOAD, ...): # bpftool map list | tail -6 149: perf_event_array name __augmented_sys flags 0x0 key 4B value 4B max_entries 8 memlock 4096B 150: array name syscalls flags 0x0 key 4B value 1B max_entries 512 memlock 8192B 151: hash name pids_filtered flags 0x0 key 4B value 1B max_entries 64 memlock 8192B # 8) Dump the "pids_filtered", map, that will have one entry per PID that 'perf trace' wants filtered, which includes its own, to avoid a tracing feedback loop (perf trace shows the syscalls it does which generates more syscalls that it has to show that...), it also auto-filters the 'gnome-terminal' and 'sshd' parent PIDs, for the same reason: # bpftool map dump id 151 key: a5 0c 00 00 value: 01 key: 14 63 00 00 value: 01 Found 2 elements # 9) Since there is no BTF info available, it does a generic hex dump :-\ 10) Now, with this patch applied, we'll do steps 3 to 6 again and look with pahole if there are extra structs encoded in BTF: # pahole -F btf --sizes /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o syscall_enter_args 64 0 augmented_filename 264 0 syscall 1 0 syscall_exit_args 24 0 bpf_map 28 0 ____btf_map___augmented_syscalls__ 8 0 ____btf_map_syscalls 8 0 ____btf_map_pids_filtered 8 0 # 11) Yes, those __btf_map_ + the map names, lets see how they look like: # pahole -F btf -C ____btf_map_syscalls /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.o struct ____btf_map_syscalls { int key; /* 0 4 */ struct syscall value; /* 4 1 */ /* size: 8, cachelines: 1, members: 2 */ /* padding: 3 */ /* last cacheline: 8 bytes */ }; # 12) Lets repeat step 7 to get the new map ids: # bpftool map list | tail -6 155: perf_event_array name __augmented_sys flags 0x0 key 4B value 4B max_entries 8 memlock 4096B 156: array name syscalls flags 0x0 key 4B value 1B max_entries 512 memlock 8192B 157: hash name pids_filtered flags 0x0 key 4B value 1B max_entries 64 memlock 8192B # 13) And finally lets dump the 'pids_filtered': # bpftool map dump id 157 [{ "key": 3237, "value": true },{ "key": 26435, "value": true } ] # Looks much better! BTF info was used to interpret the key as an integer and the value as a struct with just one boolean member, so to make it more compact, show just the 'true' value where we saw '01'. Now to make 'perf trace --dump-map' to use BTF! Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexei Starovoitov <ast@fb.com> Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Luis Cláudio Gonçalves <lclaudio@redhat.com> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Song Liu <songliubraving@fb.com> Cc: Wang Nan <wangnan0@huawei.com> Cc: Yonghong Song <yhs@fb.com> Link: https://lkml.kernel.org/n/tip-ybuf9wpkm30xk28iq7jbwb40@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-01 12:09:31 -07:00
}; \
struct ____btf_map_##name { \
type_key key; \
type_val value; \
}; \
struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
____btf_map_##name = { }
perf bpf: Reduce the hardcoded .max_entries for pid_maps While working on augmented syscalls I got into this error: # trace -vv --filter-pids 2469,1663 -e tools/perf/examples/bpf/augmented_raw_syscalls.c sleep 1 <SNIP> libbpf: map 0 is "__augmented_syscalls__" libbpf: map 1 is "__bpf_stdout__" libbpf: map 2 is "pids_filtered" libbpf: map 3 is "syscalls" libbpf: collecting relocating info for: '.text' libbpf: relo for 13 value 84 name 133 libbpf: relocation: insn_idx=3 libbpf: relocation: find map 3 (pids_filtered) for insn 3 libbpf: collecting relocating info for: 'raw_syscalls:sys_enter' libbpf: relo for 8 value 0 name 0 libbpf: relocation: insn_idx=1 libbpf: relo for 8 value 0 name 0 libbpf: relocation: insn_idx=3 libbpf: relo for 9 value 28 name 178 libbpf: relocation: insn_idx=36 libbpf: relocation: find map 1 (__augmented_syscalls__) for insn 36 libbpf: collecting relocating info for: 'raw_syscalls:sys_exit' libbpf: relo for 8 value 0 name 0 libbpf: relocation: insn_idx=0 libbpf: relo for 8 value 0 name 0 libbpf: relocation: insn_idx=2 bpf: config program 'raw_syscalls:sys_enter' bpf: config program 'raw_syscalls:sys_exit' libbpf: create map __bpf_stdout__: fd=3 libbpf: create map __augmented_syscalls__: fd=4 libbpf: create map syscalls: fd=5 libbpf: create map pids_filtered: fd=6 libbpf: added 13 insn from .text to prog raw_syscalls:sys_enter libbpf: added 13 insn from .text to prog raw_syscalls:sys_exit libbpf: load bpf program failed: Operation not permitted libbpf: failed to load program 'raw_syscalls:sys_exit' libbpf: failed to load object 'tools/perf/examples/bpf/augmented_raw_syscalls.c' bpf: load objects failed: err=-4009: (Incorrect kernel version) event syntax error: 'tools/perf/examples/bpf/augmented_raw_syscalls.c' \___ Failed to load program for unknown reason (add -v to see detail) Run 'perf list' for a list of valid events Usage: perf trace [<options>] [<command>] or: perf trace [<options>] -- <command> [<options>] or: perf trace record [<options>] [<command>] or: perf trace record [<options>] -- <command> [<options>] -e, --event <event> event/syscall selector. use 'perf list' to list available events If I then try to use strace (perf trace'ing 'perf trace' needs some more work before its possible) to get a bit more info I get: # strace -e bpf trace --filter-pids 2469,1663 -e tools/perf/examples/bpf/augmented_raw_syscalls.c sleep 1 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size=4, value_size=4, max_entries=4, map_flags=0, inner_map_fd=0, map_name="__bpf_stdout__", map_ifindex=0}, 72) = 3 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size=4, value_size=4, max_entries=4, map_flags=0, inner_map_fd=0, map_name="__augmented_sys", map_ifindex=0}, 72) = 4 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_ARRAY, key_size=4, value_size=1, max_entries=500, map_flags=0, inner_map_fd=0, map_name="syscalls", map_ifindex=0}, 72) = 5 bpf(BPF_MAP_CREATE, {map_type=BPF_MAP_TYPE_HASH, key_size=4, value_size=1, max_entries=512, map_flags=0, inner_map_fd=0, map_name="pids_filtered", map_ifindex=0}, 72) = 6 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_TRACEPOINT, insn_cnt=57, insns=0x1223f50, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(4, 18, 10), prog_flags=0, prog_name="sys_enter", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS}, 72) = 7 bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_TRACEPOINT, insn_cnt=18, insns=0x1224120, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(4, 18, 10), prog_flags=0, prog_name="sys_exit", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS}, 72) = -1 EPERM (Operation not permitted) bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_TRACEPOINT, insn_cnt=18, insns=0x1224120, license="GPL", log_level=1, log_size=262144, log_buf="", kern_version=KERNEL_VERSION(4, 18, 10), prog_flags=0, prog_name="sys_exit", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS}, 72) = -1 EPERM (Operation not permitted) bpf(BPF_PROG_LOAD, {prog_type=BPF_PROG_TYPE_KPROBE, insn_cnt=18, insns=0x1224120, license="GPL", log_level=0, log_size=0, log_buf=NULL, kern_version=KERNEL_VERSION(4, 18, 10), prog_flags=0, prog_name="sys_exit", prog_ifindex=0, expected_attach_type=BPF_CGROUP_INET_INGRESS}, 72) = -1 EPERM (Operation not permitted) event syntax error: 'tools/perf/examples/bpf/augmented_raw_syscalls.c' \___ Failed to load program for unknown reason <SNIP similar output as without 'strace'> # I managed to create the maps, etc, but then installing the "sys_exit" hook into the "raw_syscalls:sys_exit" tracepoint somehow gets -EPERMed... I then go and try reducing the size of this new table: +++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c @@ -47,6 +47,17 @@ struct augmented_filename { #define SYS_OPEN 2 #define SYS_OPENAT 257 +struct syscall { + bool filtered; +}; + +struct bpf_map SEC("maps") syscalls = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(struct syscall), + .max_entries = 500, +}; And after reducing that .max_entries a tad, it works. So yeah, the "unknown reason" should be related to the number of bytes all this is taking, reduce the default for pid_map()s so that we can have a "syscalls" map with enough slots for all syscalls in most arches. And take notes about this error message, improve it :-) Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: David Ahern <dsahern@gmail.com> Cc: Edward Cree <ecree@solarflare.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Cc: Yonghong Song <yhs@fb.com> Link: https://lkml.kernel.org/n/tip-yjzhak8asumz9e9hts2dgplp@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-07 10:49:27 -07:00
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
* prevents other BPF constructs, such as tracepoint handlers,
* to get installed, with cryptic messages from libbpf, etc.
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
static void (*bpf_tail_call)(void *ctx, void *map, int index) = (void *)BPF_FUNC_tail_call;
#define SEC(NAME) __attribute__((section(NAME), used))
#define probe(function, vars) \
SEC(#function "=" #function " " #vars) function
#define syscall_enter(name) \
SEC("syscalls:sys_enter_" #name) syscall_enter_ ## name
#define syscall_exit(name) \
SEC("syscalls:sys_exit_" #name) syscall_exit_ ## name
#define license(name) \
char _license[] SEC("license") = #name; \
int _version SEC("version") = LINUX_VERSION_CODE;
static int (*probe_read)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read;
static int (*probe_read_str)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read_str;
static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) = (void *)BPF_FUNC_perf_event_output;
#endif /* _PERF_BPF_H */