Merge branch 'bpf-obj-skel'

Andrii Nakryiko says:

====================
This patch set introduces an alternative and complimentary to existing libbpf
API interface for working with BPF objects, maps, programs, and global data
from userspace side. This approach is relying on code generation. bpftool
produces a struct (a.k.a. skeleton) tailored and specific to provided BPF
object file. It includes hard-coded fields and data structures for every map,
program, link, and global data present.

Altogether this approach significantly reduces amount of userspace boilerplate
code required to open, load, attach, and work with BPF objects. It improves
attach/detach story, by providing pre-allocated space for bpf_links, and
ensuring they are properly detached on shutdown. It allows to do away with by
name/title lookups of maps and programs, because libbpf's skeleton API, in
conjunction with generated code from bpftool, is filling in hard-coded fields
with actual pointers to corresponding struct bpf_map/bpf_program/bpf_link.

Also, thanks to BPF array mmap() support, working with global data (variables)
from userspace is now as natural as it is from BPF side: each variable is just
a struct field inside skeleton struct. Furthermore, this allows to have
a natural way for userspace to pre-initialize global data (including
previously impossible to initialize .rodata) by just assigning values to the
same per-variable fields. Libbpf will carefully take into account this
initialization image, will use it to pre-populate BPF maps at creation time,
and will re-mmap() BPF map's contents at exactly the same userspace memory
address such that it can continue working with all the same pointers without
any interruptions. If kernel doesn't support mmap(), global data will still be
successfully initialized, but after map creation global data structures inside
skeleton will be NULL-ed out. This allows userspace application to gracefully
handle lack of mmap() support, if necessary.

A bunch of selftests are also converted to using skeletons, demonstrating
significant simplification of userspace part of test and reduction in amount
of code necessary.

v3->v4:
- add OPTS_VALID check to btf_dump__emit_type_decl (Alexei);
- expose skeleton as LIBBPF_API functions (Alexei);
- copyright clean up, update internal map init refactor (Alexei);

v2->v3:
- make skeleton part of public API;
- expose btf_dump__emit_type_decl and btf__align_of APIs;
- move LIBBPF_API and DECLARE_LIBBPF_OPTS into libbpf_common.h for reuse;

v1->v2:
- checkpatch.pl and reverse Christmas tree styling (Jakub);
- sanitize variable names to accomodate in-function static vars;

rfc->v1:
- runqslower moved out into separate patch set waiting for vmlinux.h
  improvements;
- skeleton generation code deals with unknown internal maps more gracefully.
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2019-12-15 15:58:06 -08:00
commit f7c0bbf27e
27 changed files with 1599 additions and 671 deletions

View file

@ -839,6 +839,17 @@ _bpftool()
;;
esac
;;
gen)
case $command in
skeleton)
_filedir
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
;;
esac
;;
cgroup)
case $command in
show|list|tree)

551
tools/bpf/bpftool/gen.c Normal file
View file

@ -0,0 +1,551 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Facebook */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <bpf.h>
#include <libbpf.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "btf.h"
#include "libbpf_internal.h"
#include "json_writer.h"
#include "main.h"
#define MAX_OBJ_NAME_LEN 64
static void sanitize_identifier(char *name)
{
int i;
for (i = 0; name[i]; i++)
if (!isalnum(name[i]) && name[i] != '_')
name[i] = '_';
}
static bool str_has_suffix(const char *str, const char *suffix)
{
size_t i, n1 = strlen(str), n2 = strlen(suffix);
if (n1 < n2)
return false;
for (i = 0; i < n2; i++) {
if (str[n1 - i - 1] != suffix[n2 - i - 1])
return false;
}
return true;
}
static void get_obj_name(char *name, const char *file)
{
/* Using basename() GNU version which doesn't modify arg. */
strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
name[MAX_OBJ_NAME_LEN - 1] = '\0';
if (str_has_suffix(name, ".o"))
name[strlen(name) - 2] = '\0';
sanitize_identifier(name);
}
static void get_header_guard(char *guard, const char *obj_name)
{
int i;
sprintf(guard, "__%s_SKEL_H__", obj_name);
for (i = 0; guard[i]; i++)
guard[i] = toupper(guard[i]);
}
static const char *get_map_ident(const struct bpf_map *map)
{
const char *name = bpf_map__name(map);
if (!bpf_map__is_internal(map))
return name;
if (str_has_suffix(name, ".data"))
return "data";
else if (str_has_suffix(name, ".rodata"))
return "rodata";
else if (str_has_suffix(name, ".bss"))
return "bss";
else
return NULL;
}
static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args)
{
vprintf(fmt, args);
}
static int codegen_datasec_def(struct bpf_object *obj,
struct btf *btf,
struct btf_dump *d,
const struct btf_type *sec,
const char *obj_name)
{
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
const char *sec_ident;
char var_ident[256];
if (strcmp(sec_name, ".data") == 0)
sec_ident = "data";
else if (strcmp(sec_name, ".bss") == 0)
sec_ident = "bss";
else if (strcmp(sec_name, ".rodata") == 0)
sec_ident = "rodata";
else
return 0;
printf(" struct %s__%s {\n", obj_name, sec_ident);
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
const char *var_name = btf__name_by_offset(btf, var->name_off);
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
.field_name = var_ident,
.indent_level = 2,
);
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
const struct btf_type *t;
t = btf__type_by_id(btf, var_type_id);
while (btf_is_mod(t)) {
var_type_id = t->type;
t = btf__type_by_id(btf, var_type_id);
}
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off);
return -EINVAL;
}
align = btf__align_of(btf, var->type);
if (align <= 0) {
p_err("Failed to determine alignment of variable '%s': %d",
var_name, align);
return -EINVAL;
}
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
printf("\t\tchar __pad%d[%d];\n",
pad_cnt, need_off - off);
pad_cnt++;
}
/* sanitize variable name, e.g., for static vars inside
* a function, it's name is '<function name>.<variable name>',
* which we'll turn into a '<function name>_<variable name>'
*/
var_ident[0] = '\0';
strncat(var_ident, var_name, sizeof(var_ident) - 1);
sanitize_identifier(var_ident);
printf("\t\t");
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
if (err)
return err;
printf(";\n");
off = sec_var->offset + sec_var->size;
}
printf(" } *%s;\n", sec_ident);
return 0;
}
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
int n = btf__get_nr_types(btf);
struct btf_dump *d;
int i, err = 0;
d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
if (IS_ERR(d))
return PTR_ERR(d);
for (i = 1; i <= n; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
if (!btf_is_datasec(t))
continue;
err = codegen_datasec_def(obj, btf, d, t, obj_name);
if (err)
goto out;
}
out:
btf_dump__free(d);
return err;
}
static int codegen(const char *template, ...)
{
const char *src, *end;
int skip_tabs = 0, n;
char *s, *dst;
va_list args;
char c;
n = strlen(template);
s = malloc(n + 1);
if (!s)
return -ENOMEM;
src = template;
dst = s;
/* find out "baseline" indentation to skip */
while ((c = *src++)) {
if (c == '\t') {
skip_tabs++;
} else if (c == '\n') {
break;
} else {
p_err("unrecognized character at pos %td in template '%s'",
src - template - 1, template);
return -EINVAL;
}
}
while (*src) {
/* skip baseline indentation tabs */
for (n = skip_tabs; n > 0; n--, src++) {
if (*src != '\t') {
p_err("not enough tabs at pos %td in template '%s'",
src - template - 1, template);
return -EINVAL;
}
}
/* trim trailing whitespace */
end = strchrnul(src, '\n');
for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
;
memcpy(dst, src, n);
dst += n;
if (*end)
*dst++ = '\n';
src = *end ? end + 1 : end;
}
*dst++ = '\0';
/* print out using adjusted template */
va_start(args, template);
n = vprintf(s, args);
va_end(args);
free(s);
return n;
}
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
size_t i, map_cnt = 0, prog_cnt = 0;
char obj_name[MAX_OBJ_NAME_LEN];
const char *file, *ident;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_map *map;
struct btf *btf;
int err = -1;
if (!REQ_ARGS(1)) {
usage();
return -1;
}
file = GET_ARG();
if (argc) {
p_err("extra unknown arguments");
return -1;
}
obj = bpf_object__open_file(file, NULL);
if (IS_ERR(obj)) {
p_err("failed to open BPF object file: %ld", PTR_ERR(obj));
return -1;
}
get_obj_name(obj_name, file);
get_header_guard(header_guard, obj_name);
bpf_object__for_each_map(map, obj) {
ident = get_map_ident(map);
if (!ident) {
p_err("ignoring unrecognized internal map '%s'...",
bpf_map__name(map));
continue;
}
map_cnt++;
}
bpf_object__for_each_program(prog, obj) {
prog_cnt++;
}
codegen("\
\n\
/* THIS FILE IS AUTOGENERATED! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <stdlib.h> \n\
#include <libbpf.h> \n\
\n\
struct %1$s { \n\
struct bpf_object_skeleton *skeleton; \n\
struct bpf_object *obj; \n\
",
obj_name, header_guard
);
if (map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_map(map, obj) {
ident = get_map_ident(map);
if (!ident)
continue;
printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
}
printf("\t} links;\n");
}
btf = bpf_object__btf(obj);
if (btf) {
err = codegen_datasecs(obj, obj_name);
if (err)
goto out;
}
codegen("\
\n\
}; \n\
\n\
static inline struct bpf_object_skeleton * \n\
%1$s__create_skeleton(struct %1$s *obj, struct bpf_embed_data *embed)\n\
{ \n\
struct bpf_object_skeleton *s; \n\
\n\
s = calloc(1, sizeof(*s)); \n\
if (!s) \n\
return NULL; \n\
\n\
s->sz = sizeof(*s); \n\
s->name = \"%1$s\"; \n\
s->data = embed->data; \n\
s->data_sz = embed->size; \n\
s->obj = &obj->obj; \n\
",
obj_name
);
if (map_cnt) {
codegen("\
\n\
\n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
map_cnt
);
i = 0;
bpf_object__for_each_map(map, obj) {
const char *ident = get_map_ident(map);
if (!ident)
continue;
codegen("\
\n\
\n\
s->maps[%zu].name = \"%s\"; \n\
s->maps[%zu].map = &obj->maps.%s; \n\
",
i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */
if (bpf_map__is_internal(map) &&
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
}
i++;
}
}
if (prog_cnt) {
codegen("\
\n\
\n\
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",
prog_cnt
);
i = 0;
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
\n\
s->progs[%1$zu].name = \"%2$s\"; \n\
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
s->progs[%1$zu].link = &obj->links.%2$s;\n\
",
i, bpf_program__name(prog));
i++;
}
}
codegen("\
\n\
\n\
return s; \n\
err: \n\
bpf_object__destroy_skeleton(s); \n\
return NULL; \n\
} \n\
\n\
static void \n\
%1$s__destroy(struct %1$s *obj) \n\
{ \n\
if (!obj) \n\
return; \n\
if (obj->skeleton) \n\
bpf_object__destroy_skeleton(obj->skeleton);\n\
free(obj); \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open_opts(struct bpf_embed_data *embed, const struct bpf_object_open_opts *opts)\n\
{ \n\
struct %1$s *obj; \n\
\n\
obj = calloc(1, sizeof(*obj)); \n\
if (!obj) \n\
return NULL; \n\
\n\
obj->skeleton = %1$s__create_skeleton(obj, embed); \n\
if (!obj->skeleton) \n\
goto err; \n\
\n\
if (bpf_object__open_skeleton(obj->skeleton, opts)) \n\
goto err; \n\
\n\
return obj; \n\
err: \n\
%1$s__destroy(obj); \n\
return NULL; \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open(struct bpf_embed_data *embed) \n\
{ \n\
return %1$s__open_opts(embed, NULL); \n\
} \n\
\n\
static inline int \n\
%1$s__load(struct %1$s *obj) \n\
{ \n\
return bpf_object__load_skeleton(obj->skeleton); \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open_and_load(struct bpf_embed_data *embed) \n\
{ \n\
struct %1$s *obj; \n\
\n\
obj = %1$s__open(embed); \n\
if (!obj) \n\
return NULL; \n\
if (%1$s__load(obj)) { \n\
%1$s__destroy(obj); \n\
return NULL; \n\
} \n\
return obj; \n\
} \n\
\n\
static inline int \n\
%1$s__attach(struct %1$s *obj) \n\
{ \n\
return bpf_object__attach_skeleton(obj->skeleton); \n\
} \n\
\n\
static inline void \n\
%1$s__detach(struct %1$s *obj) \n\
{ \n\
return bpf_object__detach_skeleton(obj->skeleton); \n\
} \n\
\n\
#endif /* %2$s */ \n\
",
obj_name, header_guard
);
err = 0;
out:
bpf_object__close(obj);
return err;
}
static int do_help(int argc, char **argv)
{
if (json_output) {
jsonw_null(json_wtr);
return 0;
}
fprintf(stderr,
"Usage: %1$s gen skeleton FILE\n"
" %1$s gen help\n"
"\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name);
return 0;
}
static const struct cmd cmds[] = {
{ "skeleton", do_skeleton },
{ "help", do_help },
{ 0 }
};
int do_gen(int argc, char **argv)
{
return cmd_select(cmds, argc, argv, do_help);
}

View file

@ -58,7 +58,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
" OBJECT := { prog | map | cgroup | perf | net | feature | btf }\n"
" OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@ -227,6 +227,7 @@ static const struct cmd cmds[] = {
{ "net", do_net },
{ "feature", do_feature },
{ "btf", do_btf },
{ "gen", do_gen },
{ "version", do_version },
{ 0 }
};

View file

@ -155,6 +155,7 @@ int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
int do_feature(int argc, char **argv);
int do_btf(int argc, char **argv);
int do_gen(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);

View file

@ -18,6 +18,7 @@
#include <bpf.h>
#include <nlattr.h>
#include "libbpf_internal.h"
#include "main.h"
#include "netlink_dumper.h"

View file

@ -28,14 +28,12 @@
#include <stddef.h>
#include <stdint.h>
#include "libbpf_common.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
struct bpf_create_map_attr {
const char *name;
enum bpf_map_type map_type;

View file

@ -278,6 +278,45 @@ done:
return nelems * size;
}
int btf__align_of(const struct btf *btf, __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
__u16 kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
return min(sizeof(void *), t->size);
case BTF_KIND_PTR:
return sizeof(void *);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf__align_of(btf, t->type);
case BTF_KIND_ARRAY:
return btf__align_of(btf, btf_array(t)->type);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = btf_members(t);
__u16 vlen = btf_vlen(t);
int i, align = 1, t;
for (i = 0; i < vlen; i++, m++) {
t = btf__align_of(btf, m->type);
if (t <= 0)
return t;
align = max(align, t);
}
return align;
}
default:
pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
return 0;
}
}
int btf__resolve_type(const struct btf *btf, __u32 type_id)
{
const struct btf_type *t;

View file

@ -8,14 +8,12 @@
#include <linux/btf.h>
#include <linux/types.h>
#include "libbpf_common.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
#define MAPS_ELF_SEC ".maps"
@ -79,6 +77,7 @@ LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
@ -127,6 +126,28 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
struct btf_dump_emit_type_decl_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
/* optional field name for type declaration, e.g.:
* - struct my_struct <FNAME>
* - void (*<FNAME>)(int)
* - char (*<FNAME>)[123]
*/
const char *field_name;
/* extra indentation level (in number of tabs) to emit for multi-line
* type declarations (e.g., anonymous struct); applies for lines
* starting from the second one (first line is assumed to have
* necessary indentation already
*/
int indent_level;
};
#define btf_dump_emit_type_decl_opts__last_field indent_level
LIBBPF_API int
btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts);
/*
* A set of helpers for easier BTF types handling
*/

View file

@ -116,6 +116,8 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
va_end(args);
}
static int btf_dump_mark_referenced(struct btf_dump *d);
struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
@ -137,18 +139,39 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
if (IS_ERR(d->type_names)) {
err = PTR_ERR(d->type_names);
d->type_names = NULL;
btf_dump__free(d);
return ERR_PTR(err);
}
d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->ident_names)) {
err = PTR_ERR(d->ident_names);
d->ident_names = NULL;
btf_dump__free(d);
return ERR_PTR(err);
goto err;
}
d->type_states = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->type_states[0]));
if (!d->type_states) {
err = -ENOMEM;
goto err;
}
d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->cached_names[0]));
if (!d->cached_names) {
err = -ENOMEM;
goto err;
}
/* VOID is special */
d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED;
/* eagerly determine referenced types for anon enums */
err = btf_dump_mark_referenced(d);
if (err)
goto err;
return d;
err:
btf_dump__free(d);
return ERR_PTR(err);
}
void btf_dump__free(struct btf_dump *d)
@ -175,7 +198,6 @@ void btf_dump__free(struct btf_dump *d)
free(d);
}
static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
@ -202,27 +224,6 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
if (id > btf__get_nr_types(d->btf))
return -EINVAL;
/* type states are lazily allocated, as they might not be needed */
if (!d->type_states) {
d->type_states = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->type_states[0]));
if (!d->type_states)
return -ENOMEM;
d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->cached_names[0]));
if (!d->cached_names)
return -ENOMEM;
/* VOID is special */
d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED;
/* eagerly determine referenced types for anon enums */
err = btf_dump_mark_referenced(d);
if (err)
return err;
}
d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false);
if (err < 0)
@ -752,41 +753,6 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
}
}
static int btf_align_of(const struct btf *btf, __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
__u16 kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
return min(sizeof(void *), t->size);
case BTF_KIND_PTR:
return sizeof(void *);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_align_of(btf, t->type);
case BTF_KIND_ARRAY:
return btf_align_of(btf, btf_array(t)->type);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = btf_members(t);
__u16 vlen = btf_vlen(t);
int i, align = 1;
for (i = 0; i < vlen; i++, m++)
align = max(align, btf_align_of(btf, m->type));
return align;
}
default:
pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
return 1;
}
}
static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
@ -794,18 +760,18 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
int align, i, bit_sz;
__u16 vlen;
align = btf_align_of(btf, id);
align = btf__align_of(btf, id);
/* size of a non-packed struct has to be a multiple of its alignment*/
if (t->size % align)
if (align && t->size % align)
return true;
m = btf_members(t);
vlen = btf_vlen(t);
/* all non-bitfield fields have to be naturally aligned */
for (i = 0; i < vlen; i++, m++) {
align = btf_align_of(btf, m->type);
align = btf__align_of(btf, m->type);
bit_sz = btf_member_bitfield_size(t, i);
if (bit_sz == 0 && m->offset % (8 * align) != 0)
if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
return true;
}
@ -889,7 +855,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
fname = btf_name_of(d, m->name_off);
m_sz = btf_member_bitfield_size(t, i);
m_off = btf_member_bit_offset(t, i);
align = packed ? 1 : btf_align_of(d->btf, m->type);
align = packed ? 1 : btf__align_of(d->btf, m->type);
btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
btf_dump_printf(d, "\n%s", pfx(lvl + 1));
@ -907,7 +873,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
/* pad at the end, if necessary */
if (is_struct) {
align = packed ? 1 : btf_align_of(d->btf, id);
align = packed ? 1 : btf__align_of(d->btf, id);
btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
lvl + 1);
}
@ -1051,6 +1017,21 @@ static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
* of a stack frame. Some care is required to "pop" stack frames after
* processing type declaration chain.
*/
int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts)
{
const char *fname;
int lvl;
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL;
fname = OPTS_GET(opts, field_name, NULL);
lvl = OPTS_GET(opts, indent_level, 0);
btf_dump_emit_type_decl(d, id, fname, lvl);
return 0;
}
static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
const char *fname, int lvl)
{

View file

@ -101,13 +101,6 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
#define STRERR_BUFSIZE 128
#define CHECK_ERR(action, err, out) do { \
err = action; \
if (err) \
goto out; \
} while (0)
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
@ -216,8 +209,8 @@ static const char * const libbpf_type_to_btf_name[] = {
};
struct bpf_map {
int fd;
char *name;
int fd;
int sec_idx;
size_t sec_offset;
int map_ifindex;
@ -228,16 +221,12 @@ struct bpf_map {
void *priv;
bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
void *mmaped;
char *pin_path;
bool pinned;
bool reused;
};
struct bpf_secdata {
void *rodata;
void *data;
};
static LIST_HEAD(bpf_objects_list);
struct bpf_object {
@ -250,7 +239,6 @@ struct bpf_object {
struct bpf_map *maps;
size_t nr_maps;
size_t maps_cap;
struct bpf_secdata sections;
bool loaded;
bool has_pseudo_calls;
@ -835,13 +823,24 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return &obj->maps[obj->nr_maps++];
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
{
long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
map_sz = roundup(map->def.value_size, 8) * map->def.max_entries;
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
int sec_idx, Elf_Data *data, void **data_buff)
int sec_idx, void *data, size_t data_sz)
{
char map_name[BPF_OBJ_NAME_LEN];
struct bpf_map_def *def;
struct bpf_map *map;
int err;
map = bpf_object__add_map(obj);
if (IS_ERR(map))
@ -861,25 +860,28 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def = &map->def;
def->type = BPF_MAP_TYPE_ARRAY;
def->key_size = sizeof(int);
def->value_size = data->d_size;
def->value_size = data_sz;
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
if (obj->caps.array_mmap)
def->map_flags |= BPF_F_MMAPABLE;
def->map_flags |= BPF_F_MMAPABLE;
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map_name, map->sec_idx, map->sec_offset, def->map_flags);
if (data_buff) {
*data_buff = malloc(data->d_size);
if (!*data_buff) {
zfree(&map->name);
pr_warn("failed to alloc map content buffer\n");
return -ENOMEM;
}
memcpy(*data_buff, data->d_buf, data->d_size);
map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
err = -errno;
map->mmaped = NULL;
pr_warn("failed to alloc map '%s' content buffer: %d\n",
map->name, err);
zfree(&map->name);
return err;
}
if (type != LIBBPF_MAP_BSS)
memcpy(map->mmaped, data, data_sz);
pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
return 0;
}
@ -888,31 +890,30 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
int err;
if (!obj->caps.global_data)
return 0;
/*
* Populate obj->maps with libbpf internal maps.
*/
if (obj->efile.data_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
obj->efile.data_shndx,
obj->efile.data,
&obj->sections.data);
obj->efile.data->d_buf,
obj->efile.data->d_size);
if (err)
return err;
}
if (obj->efile.rodata_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
obj->efile.rodata_shndx,
obj->efile.rodata,
&obj->sections.rodata);
obj->efile.rodata->d_buf,
obj->efile.rodata->d_size);
if (err)
return err;
}
if (obj->efile.bss_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
obj->efile.bss_shndx,
obj->efile.bss, NULL);
NULL,
obj->efile.bss->d_size);
if (err)
return err;
}
@ -1393,10 +1394,11 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return 0;
}
static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps,
const char *pin_root_path)
static int bpf_object__init_maps(struct bpf_object *obj,
const struct bpf_object_open_opts *opts)
{
bool strict = !relaxed_maps;
const char *pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
bool strict = !OPTS_GET(opts, relaxed_maps, false);
int err;
err = bpf_object__init_user_maps(obj, strict);
@ -1592,8 +1594,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
return 0;
}
static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
const char *pin_root_path)
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
@ -1728,14 +1729,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps,
pr_warn("Corrupted ELF file: index of strtab invalid\n");
return -LIBBPF_ERRNO__FORMAT;
}
err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
if (!err)
err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path);
if (!err)
err = bpf_object__sanitize_and_load_btf(obj);
if (!err)
err = bpf_object__init_prog_names(obj);
return err;
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
static struct bpf_program *
@ -1765,6 +1759,19 @@ bpf_object__find_program_by_title(const struct bpf_object *obj,
return NULL;
}
struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name)
{
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
if (!strcmp(prog->name, name))
return prog;
}
return NULL;
}
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
int shndx)
{
@ -1876,11 +1883,6 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
pr_warn("bad data relo against section %u\n", shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
if (!obj->caps.global_data) {
pr_warn("relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
name, insn_idx);
return -LIBBPF_ERRNO__RELOC;
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
map = &obj->maps[map_idx];
if (map->libbpf_type != type)
@ -2301,27 +2303,32 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
{
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
__u8 *data;
/* Nothing to do here since kernel already zero-initializes .bss map. */
if (map->libbpf_type == LIBBPF_MAP_BSS)
return 0;
data = map->libbpf_type == LIBBPF_MAP_DATA ?
obj->sections.data : obj->sections.rodata;
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("Error setting initial map(%s) contents: %s\n",
map->name, cp);
return err;
}
err = bpf_map_update_elem(map->fd, &zero, data, 0);
/* Freeze .rodata map as read-only from syscall side. */
if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
if (map->libbpf_type == LIBBPF_MAP_RODATA) {
err = bpf_map_freeze(map->fd);
if (err) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("Error freezing map(%s) as read-only: %s\n",
map->name, cp);
err = 0;
return err;
}
}
return err;
return 0;
}
static int
@ -3808,11 +3815,22 @@ out:
return ret;
}
int
bpf_program__load(struct bpf_program *prog,
char *license, __u32 kern_version)
static int libbpf_find_attach_btf_id(const char *name,
enum bpf_attach_type attach_type,
__u32 attach_prog_fd);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i;
int err = 0, fd, i, btf_id;
if (prog->type == BPF_PROG_TYPE_TRACING) {
btf_id = libbpf_find_attach_btf_id(prog->section_name,
prog->expected_attach_type,
prog->attach_prog_fd);
if (btf_id <= 0)
return btf_id;
prog->attach_btf_id = btf_id;
}
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
@ -3836,7 +3854,7 @@ bpf_program__load(struct bpf_program *prog,
prog->section_name, prog->instances.nr);
}
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_version, &fd);
license, kern_ver, &fd);
if (!err)
prog->instances.fds[0] = fd;
goto out;
@ -3865,9 +3883,7 @@ bpf_program__load(struct bpf_program *prog,
}
err = load_program(prog, result.new_insn_ptr,
result.new_insn_cnt,
license, kern_version, &fd);
result.new_insn_cnt, license, kern_ver, &fd);
if (err) {
pr_warn("Loading the %dth instance of program '%s' failed\n",
i, prog->section_name);
@ -3911,19 +3927,14 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
return 0;
}
static int libbpf_find_attach_btf_id(const char *name,
enum bpf_attach_type attach_type,
__u32 attach_prog_fd);
static struct bpf_object *
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts)
const struct bpf_object_open_opts *opts)
{
const char *pin_root_path;
struct bpf_program *prog;
struct bpf_object *obj;
const char *obj_name;
char tmp_name[64];
bool relaxed_maps;
__u32 attach_prog_fd;
int err;
@ -3953,16 +3964,16 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
return obj;
obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false);
relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path),
err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
err = bpf_object__elf_init(obj);
err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__init_maps(obj, opts);
err = err ? : bpf_object__init_prog_names(obj);
err = err ? : bpf_object__collect_reloc(obj);
if (err)
goto out;
bpf_object__elf_finish(obj);
bpf_object__for_each_program(prog, obj) {
@ -3979,15 +3990,8 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog, attach_type);
if (prog_type == BPF_PROG_TYPE_TRACING) {
err = libbpf_find_attach_btf_id(prog->section_name,
attach_type,
attach_prog_fd);
if (err <= 0)
goto out;
prog->attach_btf_id = err;
if (prog_type == BPF_PROG_TYPE_TRACING)
prog->attach_prog_fd = attach_prog_fd;
}
}
return obj;
@ -4027,7 +4031,7 @@ struct bpf_object *bpf_object__open(const char *path)
}
struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
{
if (!path)
return ERR_PTR(-EINVAL);
@ -4039,7 +4043,7 @@ bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts)
const struct bpf_object_open_opts *opts)
{
if (!obj_buf || obj_buf_sz == 0)
return ERR_PTR(-EINVAL);
@ -4080,6 +4084,24 @@ int bpf_object__unload(struct bpf_object *obj)
return 0;
}
static int bpf_object__sanitize_maps(struct bpf_object *obj)
{
struct bpf_map *m;
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
if (!obj->caps.global_data) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
if (!obj->caps.array_mmap)
m->def.map_flags ^= BPF_F_MMAPABLE;
}
return 0;
}
int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
{
struct bpf_object *obj;
@ -4098,9 +4120,14 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
obj->loaded = true;
CHECK_ERR(bpf_object__create_maps(obj), err, out);
CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
err = bpf_object__probe_caps(obj);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
if (err)
goto out;
return 0;
out:
@ -4671,17 +4698,22 @@ void bpf_object__close(struct bpf_object *obj)
btf_ext__free(obj->btf_ext);
for (i = 0; i < obj->nr_maps; i++) {
zfree(&obj->maps[i].name);
zfree(&obj->maps[i].pin_path);
if (obj->maps[i].clear_priv)
obj->maps[i].clear_priv(&obj->maps[i],
obj->maps[i].priv);
obj->maps[i].priv = NULL;
obj->maps[i].clear_priv = NULL;
struct bpf_map *map = &obj->maps[i];
if (map->clear_priv)
map->clear_priv(map, map->priv);
map->priv = NULL;
map->clear_priv = NULL;
if (map->mmaped) {
munmap(map->mmaped, bpf_map_mmap_sz(map));
map->mmaped = NULL;
}
zfree(&map->name);
zfree(&map->pin_path);
}
zfree(&obj->sections.rodata);
zfree(&obj->sections.data);
zfree(&obj->maps);
obj->nr_maps = 0;
@ -4821,6 +4853,11 @@ void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
prog->prog_ifindex = ifindex;
}
const char *bpf_program__name(const struct bpf_program *prog)
{
return prog->name;
}
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
{
const char *title;
@ -4973,7 +5010,28 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
*/
#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
static const struct {
#define SEC_DEF(sec_pfx, ptype, ...) { \
.sec = sec_pfx, \
.len = sizeof(sec_pfx) - 1, \
.prog_type = BPF_PROG_TYPE_##ptype, \
__VA_ARGS__ \
}
struct bpf_sec_def;
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog);
struct bpf_sec_def {
const char *sec;
size_t len;
enum bpf_prog_type prog_type;
@ -4981,25 +5039,40 @@ static const struct {
bool is_attachable;
bool is_attach_btf;
enum bpf_attach_type attach_type;
} section_names[] = {
attach_fn_t attach_fn;
};
static const struct bpf_sec_def section_defs[] = {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
SEC_DEF("kprobe/", KPROBE,
.attach_fn = attach_kprobe),
BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
SEC_DEF("kretprobe/", KPROBE,
.attach_fn = attach_kprobe),
BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT),
BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING,
BPF_TRACE_RAW_TP),
BPF_PROG_BTF("fentry/", BPF_PROG_TYPE_TRACING,
BPF_TRACE_FENTRY),
BPF_PROG_BTF("fexit/", BPF_PROG_TYPE_TRACING,
BPF_TRACE_FEXIT),
SEC_DEF("tracepoint/", TRACEPOINT,
.attach_fn = attach_tp),
SEC_DEF("tp/", TRACEPOINT,
.attach_fn = attach_tp),
SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
.attach_fn = attach_raw_tp),
SEC_DEF("raw_tp/", RAW_TRACEPOINT,
.attach_fn = attach_raw_tp),
SEC_DEF("tp_btf/", TRACING,
.expected_attach_type = BPF_TRACE_RAW_TP,
.is_attach_btf = true,
.attach_fn = attach_trace),
SEC_DEF("fentry/", TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.is_attach_btf = true,
.attach_fn = attach_trace),
SEC_DEF("fexit/", TRACING,
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
.attach_fn = attach_trace),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
@ -5061,12 +5134,26 @@ static const struct {
#undef BPF_APROG_SEC
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
#undef SEC_DEF
#define MAX_TYPE_NAME_SIZE 32
static const struct bpf_sec_def *find_sec_def(const char *sec_name)
{
int i, n = ARRAY_SIZE(section_defs);
for (i = 0; i < n; i++) {
if (strncmp(sec_name,
section_defs[i].sec, section_defs[i].len))
continue;
return &section_defs[i];
}
return NULL;
}
static char *libbpf_get_type_names(bool attach_type)
{
int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
char *buf;
buf = malloc(len);
@ -5075,16 +5162,16 @@ static char *libbpf_get_type_names(bool attach_type)
buf[0] = '\0';
/* Forge string buf with all available names */
for (i = 0; i < ARRAY_SIZE(section_names); i++) {
if (attach_type && !section_names[i].is_attachable)
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
if (attach_type && !section_defs[i].is_attachable)
continue;
if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
free(buf);
return NULL;
}
strcat(buf, " ");
strcat(buf, section_names[i].sec);
strcat(buf, section_defs[i].sec);
}
return buf;
@ -5093,23 +5180,23 @@ static char *libbpf_get_type_names(bool attach_type)
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
const struct bpf_sec_def *sec_def;
char *type_names;
int i;
if (!name)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(section_names); i++) {
if (strncmp(name, section_names[i].sec, section_names[i].len))
continue;
*prog_type = section_names[i].prog_type;
*expected_attach_type = section_names[i].expected_attach_type;
sec_def = find_sec_def(name);
if (sec_def) {
*prog_type = sec_def->prog_type;
*expected_attach_type = sec_def->expected_attach_type;
return 0;
}
pr_warn("failed to guess program type from ELF section '%s'\n", name);
type_names = libbpf_get_type_names(false);
if (type_names != NULL) {
pr_info("supported section(type) names are:%s\n", type_names);
pr_debug("supported section(type) names are:%s\n", type_names);
free(type_names);
}
@ -5188,16 +5275,16 @@ static int libbpf_find_attach_btf_id(const char *name,
if (!name)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(section_names); i++) {
if (!section_names[i].is_attach_btf)
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
if (!section_defs[i].is_attach_btf)
continue;
if (strncmp(name, section_names[i].sec, section_names[i].len))
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
continue;
if (attach_prog_fd)
err = libbpf_find_prog_btf_id(name + section_names[i].len,
err = libbpf_find_prog_btf_id(name + section_defs[i].len,
attach_prog_fd);
else
err = libbpf_find_vmlinux_btf_id(name + section_names[i].len,
err = libbpf_find_vmlinux_btf_id(name + section_defs[i].len,
attach_type);
if (err <= 0)
pr_warn("%s is not found in vmlinux BTF\n", name);
@ -5216,12 +5303,12 @@ int libbpf_attach_type_by_name(const char *name,
if (!name)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(section_names); i++) {
if (strncmp(name, section_names[i].sec, section_names[i].len))
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
continue;
if (!section_names[i].is_attachable)
if (!section_defs[i].is_attachable)
return -EINVAL;
*attach_type = section_names[i].attach_type;
*attach_type = section_defs[i].attach_type;
return 0;
}
pr_warn("failed to guess attach type based on ELF section name '%s'\n", name);
@ -5681,6 +5768,18 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
return link;
}
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
const char *func_name;
bool retprobe;
func_name = bpf_program__title(prog, false) + sec->len;
retprobe = strcmp(sec->sec, "kretprobe/") == 0;
return bpf_program__attach_kprobe(prog, retprobe, func_name);
}
struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
bool retprobe, pid_t pid,
const char *binary_path,
@ -5793,6 +5892,32 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
return link;
}
static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
char *sec_name, *tp_cat, *tp_name;
struct bpf_link *link;
sec_name = strdup(bpf_program__title(prog, false));
if (!sec_name)
return ERR_PTR(-ENOMEM);
/* extract "tp/<category>/<name>" */
tp_cat = sec_name + sec->len;
tp_name = strchr(tp_cat, '/');
if (!tp_name) {
link = ERR_PTR(-EINVAL);
goto out;
}
*tp_name = '\0';
tp_name++;
link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
out:
free(sec_name);
return link;
}
static int bpf_link__destroy_fd(struct bpf_link *link)
{
struct bpf_link_fd *l = (void *)link;
@ -5832,6 +5957,14 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
return (struct bpf_link *)link;
}
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
const char *tp_name = bpf_program__title(prog, false) + sec->len;
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
{
char errmsg[STRERR_BUFSIZE];
@ -5863,6 +5996,23 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
return (struct bpf_link *)link;
}
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
return bpf_program__attach_trace(prog);
}
struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
sec_def = find_sec_def(bpf_program__title(prog, false));
if (!sec_def || !sec_def->attach_fn)
return ERR_PTR(-ESRCH);
return sec_def->attach_fn(sec_def, prog);
}
enum bpf_perf_event_ret
bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void **copy_mem, size_t *copy_size,
@ -6643,3 +6793,165 @@ int libbpf_num_possible_cpus(void)
WRITE_ONCE(cpus, tmp_cpus);
return tmp_cpus;
}
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
.object_name = s->name,
);
struct bpf_object *obj;
int i;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
* as it breaks global data maps, because they contain object name
* prefix as their own map name prefix. When skeleton is generated,
* bpftool is making an assumption that this name will stay the same.
*/
if (opts) {
memcpy(&skel_opts, opts, sizeof(*opts));
if (!opts->object_name)
skel_opts.object_name = s->name;
}
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
if (IS_ERR(obj)) {
pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
s->name, PTR_ERR(obj));
return PTR_ERR(obj);
}
*s->obj = obj;
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map **map = s->maps[i].map;
const char *name = s->maps[i].name;
void **mmaped = s->maps[i].mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
pr_warn("failed to find skeleton map '%s'\n", name);
return -ESRCH;
}
if (mmaped)
*mmaped = (*map)->mmaped;
}
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program **prog = s->progs[i].prog;
const char *name = s->progs[i].name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
pr_warn("failed to find skeleton program '%s'\n", name);
return -ESRCH;
}
}
return 0;
}
int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
err = bpf_object__load(*s->obj);
if (err) {
pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
return err;
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = bpf_map__fd(map);
void **mmaped = s->maps[i].mmaped;
void *remapped;
if (!mmaped)
continue;
if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
*mmaped = NULL;
continue;
}
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
prot = PROT_READ | PROT_WRITE;
/* Remap anonymous mmap()-ed "map initialization image" as
* a BPF map-backed mmap()-ed memory, but preserving the same
* memory address. This will cause kernel to change process'
* page table to point to a different piece of kernel memory,
* but from userspace point of view memory address (and its
* contents, being identical at this point) will stay the
* same. This mapping will be released by bpf_object__close()
* as per normal clean up procedure, so we don't need to worry
* about it from skeleton's clean up perspective.
*/
remapped = mmap(*mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED,
map_fd, 0);
if (remapped == MAP_FAILED) {
err = -errno;
*mmaped = NULL;
pr_warn("failed to re-mmap() map '%s': %d\n",
bpf_map__name(map), err);
return err;
}
}
return 0;
}
int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
{
int i;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
const struct bpf_sec_def *sec_def;
const char *sec_name = bpf_program__title(prog, false);
sec_def = find_sec_def(sec_name);
if (!sec_def || !sec_def->attach_fn)
continue;
*link = sec_def->attach_fn(sec_def, prog);
if (IS_ERR(*link)) {
pr_warn("failed to auto-attach program '%s': %ld\n",
bpf_program__name(prog), PTR_ERR(*link));
return PTR_ERR(*link);
}
}
return 0;
}
void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
{
int i;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_link **link = s->progs[i].link;
if (!IS_ERR_OR_NULL(*link))
bpf_link__destroy(*link);
*link = NULL;
}
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
{
if (s->progs)
bpf_object__detach_skeleton(s);
if (s->obj)
bpf_object__close(*s->obj);
free(s->maps);
free(s->progs);
free(s);
}

View file

@ -17,14 +17,12 @@
#include <sys/types.h> // for size_t
#include <linux/bpf.h>
#include "libbpf_common.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
enum libbpf_errno {
__LIBBPF_ERRNO__START = 4000,
@ -67,28 +65,6 @@ struct bpf_object_open_attr {
enum bpf_prog_type prog_type;
};
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
* followed by assignment using compound literal syntax is done to preserve
* ability to use a nice struct field initialization syntax and **hopefully**
* have all the padding bytes initialized to zero. It's not guaranteed though,
* when copying literal, that compiler won't copy garbage in literal's padding
* bytes, but that's the best way I've found and it seems to work in practice.
*
* Macro declares opts struct of given type and name, zero-initializes,
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/
#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \
(struct TYPE) { \
.sz = sizeof(struct TYPE), \
__VA_ARGS__ \
}; \
})
struct bpf_object_open_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
@ -114,10 +90,10 @@ struct bpf_object_open_opts {
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts);
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts);
LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts);
const struct bpf_object_open_opts *opts);
/* deprecated bpf_object__open variants */
LIBBPF_API struct bpf_object *
@ -126,11 +102,6 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
LIBBPF_API struct bpf_object *
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
enum libbpf_pin_type {
LIBBPF_PIN_NONE,
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
@ -161,6 +132,7 @@ struct bpf_object_load_attr {
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
@ -171,6 +143,9 @@ LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
bpf_object__find_program_by_title(const struct bpf_object *obj,
const char *title);
LIBBPF_API struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name);
LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
@ -214,6 +189,7 @@ LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
bool needs_copy);
@ -237,6 +213,8 @@ struct bpf_link;
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
LIBBPF_API struct bpf_link *
bpf_program__attach(struct bpf_program *prog);
LIBBPF_API struct bpf_link *
bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
LIBBPF_API struct bpf_link *
@ -512,18 +490,6 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void **copy_mem, size_t *copy_size,
bpf_perf_event_print_t fn, void *private_data);
struct nlattr;
typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
int libbpf_netlink_open(unsigned int *nl_pid);
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie);
int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
struct bpf_prog_linfo;
struct bpf_prog_info;
@ -630,6 +596,79 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
*/
LIBBPF_API int libbpf_num_possible_cpus(void);
struct bpf_embed_data {
void *data;
size_t size;
};
#define BPF_EMBED_OBJ_DECLARE(NAME) \
extern struct bpf_embed_data NAME##_embed; \
extern char NAME##_data[]; \
extern char NAME##_data_end[];
#define __BPF_EMBED_OBJ(NAME, PATH, SZ, ASM_TYPE) \
asm ( \
" .pushsection \".rodata\", \"a\", @progbits \n" \
" .global "#NAME"_data \n" \
#NAME"_data: \n" \
" .incbin \"" PATH "\" \n" \
" .global "#NAME"_data_end \n" \
#NAME"_data_end: \n" \
" .global "#NAME"_embed \n" \
" .type "#NAME"_embed, @object \n" \
" .size "#NAME"_size, "#SZ" \n" \
" .align 8, \n" \
#NAME"_embed: \n" \
" "ASM_TYPE" "#NAME"_data \n" \
" "ASM_TYPE" "#NAME"_data_end - "#NAME"_data \n" \
" .popsection \n" \
); \
BPF_EMBED_OBJ_DECLARE(NAME)
#if __SIZEOF_POINTER__ == 4
#define BPF_EMBED_OBJ(NAME, PATH) __BPF_EMBED_OBJ(NAME, PATH, 8, ".long")
#else
#define BPF_EMBED_OBJ(NAME, PATH) __BPF_EMBED_OBJ(NAME, PATH, 16, ".quad")
#endif
struct bpf_map_skeleton {
const char *name;
struct bpf_map **map;
void **mmaped;
};
struct bpf_prog_skeleton {
const char *name;
struct bpf_program **prog;
struct bpf_link **link;
};
struct bpf_object_skeleton {
size_t sz; /* size of this struct, for forward/backward compatibility */
const char *name;
void *data;
size_t data_sz;
struct bpf_object **obj;
int map_cnt;
int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */
struct bpf_map_skeleton *maps;
int prog_cnt;
int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */
struct bpf_prog_skeleton *progs;
};
LIBBPF_API int
bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts);
LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
#ifdef __cplusplus
} /* extern "C" */
#endif

View file

@ -210,4 +210,15 @@ LIBBPF_0.0.6 {
} LIBBPF_0.0.5;
LIBBPF_0.0.7 {
global:
btf_dump__emit_type_decl;
bpf_object__find_program_by_name;
bpf_object__attach_skeleton;
bpf_object__destroy_skeleton;
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
bpf_program__attach;
bpf_program__name;
btf__align_of;
} LIBBPF_0.0.6;

View file

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* Common user-facing libbpf helpers.
*
* Copyright (c) 2019 Facebook
*/
#ifndef __LIBBPF_LIBBPF_COMMON_H
#define __LIBBPF_LIBBPF_COMMON_H
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
* followed by assignment using compound literal syntax is done to preserve
* ability to use a nice struct field initialization syntax and **hopefully**
* have all the padding bytes initialized to zero. It's not guaranteed though,
* when copying literal, that compiler won't copy garbage in literal's padding
* bytes, but that's the best way I've found and it seems to work in practice.
*
* Macro declares opts struct of given type and name, zero-initializes,
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/
#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \
(struct TYPE) { \
.sz = sizeof(struct TYPE), \
__VA_ARGS__ \
}; \
})
#endif /* __LIBBPF_LIBBPF_COMMON_H */

View file

@ -100,6 +100,23 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
struct nlattr;
typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
int libbpf_netlink_open(unsigned int *nl_pid);
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie);
int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
struct btf_ext_info {
/*
* info points to the individual info section (e.g. func_info and

View file

@ -38,5 +38,7 @@ test_hashmap
test_btf_dump
xdping
test_cpp
*.skel.h
/no_alu32
/bpf_gcc
/tools

View file

@ -3,10 +3,12 @@ include ../../../../scripts/Kbuild.include
include ../../../scripts/Makefile.arch
CURDIR := $(abspath .)
LIBDIR := $(abspath ../../../lib)
TOOLSDIR := $(abspath ../../..)
LIBDIR := $(TOOLSDIR)/lib
BPFDIR := $(LIBDIR)/bpf
TOOLSDIR := $(abspath ../../../include)
APIDIR := $(TOOLSDIR)/uapi
TOOLSINCDIR := $(TOOLSDIR)/include
BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
APIDIR := $(TOOLSINCDIR)/uapi
GENDIR := $(abspath ../../../../include/generated)
GENHDR := $(GENDIR)/autoconf.h
@ -19,7 +21,7 @@ LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) \
-I$(GENDIR) -I$(TOOLSDIR) -I$(CURDIR) \
-I$(GENDIR) -I$(TOOLSINCDIR) -I$(CURDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lrt -lpthread
@ -117,6 +119,12 @@ $(OUTPUT)/test_cgroup_attach: cgroup_helpers.c
# force a rebuild of BPFOBJ when its dependencies are updated
force:
DEFAULT_BPFTOOL := $(OUTPUT)/tools/usr/local/sbin/bpftool
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): force
$(MAKE) -C $(BPFTOOLDIR) DESTDIR=$(OUTPUT)/tools install
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
@ -180,6 +188,8 @@ define GCC_BPF_BUILD_RULE
$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
# Parameters:
@ -195,8 +205,11 @@ TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
$$(filter %.c,$(TRUNNER_EXTRA_SOURCES)))
TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES))
TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
$$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c)))
TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))
TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS))
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(filter-out $(SKEL_BLACKLIST), \
$$(TRUNNER_BPF_SRCS)))
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
$$(eval $$(call DEFINE_TEST_RUNNER_RULES,$1,$2))
@ -226,6 +239,11 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
$(TRUNNER_BPF_LDFLAGS))
$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
$(TRUNNER_OUTPUT)/%.o \
| $(BPFTOOL) $(TRUNNER_OUTPUT)
$$(BPFTOOL) gen skeleton $$< > $$@
endif
# ensure we set up tests.h header generation rule just once
@ -245,6 +263,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_TESTS_DIR)/%.c \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
cd $$(@D) && $$(CC) $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
@ -255,9 +274,9 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
# only copy extra resources if in flavored build
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
ifneq ($2,)
# only copy extra resources if in flavored build
cp -a $$^ $(TRUNNER_OUTPUT)/
endif
@ -323,4 +342,5 @@ $(OUTPUT)/test_cpp: test_cpp.cpp $(BPFOBJ)
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc
feature $(OUTPUT)/*.o $(OUTPUT)/no_alu32 $(OUTPUT)/bpf_gcc \
tools *.skel.h

View file

@ -1,23 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define EMBED_FILE(NAME, PATH) \
asm ( \
" .pushsection \".rodata\", \"a\", @progbits \n" \
" .global "#NAME"_data \n" \
#NAME"_data: \n" \
" .incbin \"" PATH "\" \n" \
#NAME"_data_end: \n" \
" .global "#NAME"_size \n" \
" .type "#NAME"_size, @object \n" \
" .size "#NAME"_size, 4 \n" \
" .align 4, \n" \
#NAME"_size: \n" \
" .int "#NAME"_data_end - "#NAME"_data \n" \
" .popsection \n" \
); \
extern char NAME##_data[]; \
extern int NAME##_size;
#include "test_attach_probe.skel.h"
ssize_t get_base_addr() {
size_t start;
@ -39,30 +22,14 @@ ssize_t get_base_addr() {
return -EINVAL;
}
EMBED_FILE(probe, "test_attach_probe.o");
BPF_EMBED_OBJ(probe, "test_attach_probe.o");
void test_attach_probe(void)
{
const char *kprobe_name = "kprobe/sys_nanosleep";
const char *kretprobe_name = "kretprobe/sys_nanosleep";
const char *uprobe_name = "uprobe/trigger_func";
const char *uretprobe_name = "uretprobe/trigger_func";
const int kprobe_idx = 0, kretprobe_idx = 1;
const int uprobe_idx = 2, uretprobe_idx = 3;
const char *obj_name = "attach_probe";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name,
.relaxed_maps = true,
);
struct bpf_program *kprobe_prog, *kretprobe_prog;
struct bpf_program *uprobe_prog, *uretprobe_prog;
struct bpf_object *obj;
int err, duration = 0, res;
struct bpf_link *kprobe_link = NULL;
struct bpf_link *kretprobe_link = NULL;
struct bpf_link *uprobe_link = NULL;
struct bpf_link *uretprobe_link = NULL;
int results_map_fd;
int duration = 0;
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
struct test_attach_probe* skel;
size_t uprobe_offset;
ssize_t base_addr;
@ -72,123 +39,68 @@ void test_attach_probe(void)
return;
uprobe_offset = (size_t)&get_base_addr - base_addr;
/* open object */
obj = bpf_object__open_mem(probe_data, probe_size, &open_opts);
if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj)))
skel = test_attach_probe__open_and_load(&probe_embed);
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
bpf_object__name(obj), obj_name))
if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
goto cleanup;
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
goto cleanup;
kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
if (CHECK(!kretprobe_prog, "find_probe",
"prog '%s' not found\n", kretprobe_name))
goto cleanup;
uprobe_prog = bpf_object__find_program_by_title(obj, uprobe_name);
if (CHECK(!uprobe_prog, "find_probe",
"prog '%s' not found\n", uprobe_name))
goto cleanup;
uretprobe_prog = bpf_object__find_program_by_title(obj, uretprobe_name);
if (CHECK(!uretprobe_prog, "find_probe",
"prog '%s' not found\n", uretprobe_name))
goto cleanup;
/* create maps && load programs */
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
/* load maps */
results_map_fd = bpf_find_map(__func__, obj, "results_map");
if (CHECK(results_map_fd < 0, "find_results_map",
"err %d\n", results_map_fd))
goto cleanup;
kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
"err %ld\n", PTR_ERR(kprobe_link))) {
kprobe_link = NULL;
"err %ld\n", PTR_ERR(kprobe_link)))
goto cleanup;
}
kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
skel->links.handle_kprobe = kprobe_link;
kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
true /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
"err %ld\n", PTR_ERR(kretprobe_link))) {
kretprobe_link = NULL;
"err %ld\n", PTR_ERR(kretprobe_link)))
goto cleanup;
}
uprobe_link = bpf_program__attach_uprobe(uprobe_prog,
skel->links.handle_kretprobe = kretprobe_link;
uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
false /* retprobe */,
0 /* self pid */,
"/proc/self/exe",
uprobe_offset);
if (CHECK(IS_ERR(uprobe_link), "attach_uprobe",
"err %ld\n", PTR_ERR(uprobe_link))) {
uprobe_link = NULL;
"err %ld\n", PTR_ERR(uprobe_link)))
goto cleanup;
}
uretprobe_link = bpf_program__attach_uprobe(uretprobe_prog,
skel->links.handle_uprobe = uprobe_link;
uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe,
true /* retprobe */,
-1 /* any pid */,
"/proc/self/exe",
uprobe_offset);
if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe",
"err %ld\n", PTR_ERR(uretprobe_link))) {
uretprobe_link = NULL;
"err %ld\n", PTR_ERR(uretprobe_link)))
goto cleanup;
}
skel->links.handle_uretprobe = uretprobe_link;
/* trigger & validate kprobe && kretprobe */
usleep(1);
err = bpf_map_lookup_elem(results_map_fd, &kprobe_idx, &res);
if (CHECK(err, "get_kprobe_res",
"failed to get kprobe res: %d\n", err))
if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
"wrong kprobe res: %d\n", skel->bss->kprobe_res))
goto cleanup;
if (CHECK(res != kprobe_idx + 1, "check_kprobe_res",
"wrong kprobe res: %d\n", res))
goto cleanup;
err = bpf_map_lookup_elem(results_map_fd, &kretprobe_idx, &res);
if (CHECK(err, "get_kretprobe_res",
"failed to get kretprobe res: %d\n", err))
goto cleanup;
if (CHECK(res != kretprobe_idx + 1, "check_kretprobe_res",
"wrong kretprobe res: %d\n", res))
if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
"wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
goto cleanup;
/* trigger & validate uprobe & uretprobe */
get_base_addr();
err = bpf_map_lookup_elem(results_map_fd, &uprobe_idx, &res);
if (CHECK(err, "get_uprobe_res",
"failed to get uprobe res: %d\n", err))
if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
"wrong uprobe res: %d\n", skel->bss->uprobe_res))
goto cleanup;
if (CHECK(res != uprobe_idx + 1, "check_uprobe_res",
"wrong uprobe res: %d\n", res))
goto cleanup;
err = bpf_map_lookup_elem(results_map_fd, &uretprobe_idx, &res);
if (CHECK(err, "get_uretprobe_res",
"failed to get uretprobe res: %d\n", err))
goto cleanup;
if (CHECK(res != uretprobe_idx + 1, "check_uretprobe_res",
"wrong uretprobe res: %d\n", res))
if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
"wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
goto cleanup;
cleanup:
bpf_link__destroy(kprobe_link);
bpf_link__destroy(kretprobe_link);
bpf_link__destroy(uprobe_link);
bpf_link__destroy(uretprobe_link);
bpf_object__close(obj);
test_attach_probe__destroy(skel);
}

View file

@ -1,90 +1,59 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
#include "fexit_test.skel.h"
BPF_EMBED_OBJ(pkt_access, "test_pkt_access.o");
BPF_EMBED_OBJ(fentry, "fentry_test.o");
BPF_EMBED_OBJ(fexit, "fexit_test.o");
void test_fentry_fexit(void)
{
struct bpf_prog_load_attr attr_fentry = {
.file = "./fentry_test.o",
};
struct bpf_prog_load_attr attr_fexit = {
.file = "./fexit_test.o",
};
struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
struct fexit_test *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
__u32 duration = 0, retval;
int err, pkt_fd, i;
struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj;
struct bpf_map *data_map_fentry, *data_map_fexit;
char fentry_name[] = "fentry/bpf_fentry_testX";
char fexit_name[] = "fexit/bpf_fentry_testX";
int err, pkt_fd, kfree_skb_fd, i;
struct bpf_link *link[12] = {};
struct bpf_program *prog[12];
__u32 duration, retval;
const int zero = 0;
u64 result[12];
err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
&pkt_obj, &pkt_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
pkt_skel = test_pkt_access__open_and_load(&pkt_access_embed);
if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
return;
err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd);
if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
fentry_skel = fentry_test__open_and_load(&fentry_embed);
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto close_prog;
err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd);
if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
fexit_skel = fexit_test__open_and_load(&fexit_embed);
if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
goto close_prog;
for (i = 0; i < 6; i++) {
fentry_name[sizeof(fentry_name) - 2] = '1' + i;
prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name);
if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name))
goto close_prog;
link[i] = bpf_program__attach_trace(prog[i]);
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
goto close_prog;
}
data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss");
if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n"))
goto close_prog;
for (i = 6; i < 12; i++) {
fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6;
prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name);
if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name))
goto close_prog;
link[i] = bpf_program__attach_trace(prog[i]);
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
goto close_prog;
}
data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss");
if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n"))
err = fentry_test__attach(fentry_skel);
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto close_prog;
err = fexit_test__attach(fexit_skel);
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
for (i = 0; i < 12; i++)
if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
i % 6 + 1, result[i]))
goto close_prog;
fentry_res = (__u64 *)fentry_skel->bss;
fexit_res = (__u64 *)fexit_skel->bss;
printf("%lld\n", fentry_skel->bss->test1_result);
for (i = 0; i < 6; i++) {
CHECK(fentry_res[i] != 1, "result",
"fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
CHECK(fexit_res[i] != 1, "result",
"fexit_test%d failed err %lld\n", i + 1, fexit_res[i]);
}
close_prog:
for (i = 0; i < 12; i++)
if (!IS_ERR_OR_NULL(link[i]))
bpf_link__destroy(link[i]);
bpf_object__close(obj_fentry);
bpf_object__close(obj_fexit);
bpf_object__close(pkt_obj);
test_pkt_access__destroy(pkt_skel);
fentry_test__destroy(fentry_skel);
fexit_test__destroy(fexit_skel);
}

View file

@ -1,64 +1,46 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
BPF_EMBED_OBJ_DECLARE(pkt_access);
BPF_EMBED_OBJ_DECLARE(fentry);
void test_fentry_test(void)
{
struct bpf_prog_load_attr attr = {
.file = "./fentry_test.o",
};
char prog_name[] = "fentry/bpf_fentry_testX";
struct bpf_object *obj = NULL, *pkt_obj;
int err, pkt_fd, kfree_skb_fd, i;
struct bpf_link *link[6] = {};
struct bpf_program *prog[6];
struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
int err, pkt_fd, i;
__u32 duration, retval;
struct bpf_map *data_map;
const int zero = 0;
u64 result[6];
__u64 *result;
err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
&pkt_obj, &pkt_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
pkt_skel = test_pkt_access__open_and_load(&pkt_access_embed);
if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
return;
err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
goto close_prog;
fentry_skel = fentry_test__open_and_load(&fentry_embed);
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto cleanup;
for (i = 0; i < 6; i++) {
prog_name[sizeof(prog_name) - 2] = '1' + i;
prog[i] = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
goto close_prog;
link[i] = bpf_program__attach_trace(prog[i]);
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
goto close_prog;
}
data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto close_prog;
err = fentry_test__attach(fentry_skel);
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto cleanup;
pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
result = (__u64 *)fentry_skel->bss;
for (i = 0; i < 6; i++) {
if (CHECK(result[i] != 1, "result",
"fentry_test%d failed err %lld\n", i + 1, result[i]))
goto cleanup;
}
for (i = 0; i < 6; i++)
if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
i + 1, result[i]))
goto close_prog;
close_prog:
for (i = 0; i < 6; i++)
if (!IS_ERR_OR_NULL(link[i]))
bpf_link__destroy(link[i]);
bpf_object__close(obj);
bpf_object__close(pkt_obj);
cleanup:
fentry_test__destroy(fentry_skel);
test_pkt_access__destroy(pkt_skel);
}

View file

@ -1,59 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <sys/mman.h>
#include "test_mmap.skel.h"
struct map_data {
__u64 val[512 * 4];
};
struct bss_data {
__u64 in_val;
__u64 out_val;
};
static size_t roundup_page(size_t sz)
{
long page_size = sysconf(_SC_PAGE_SIZE);
return (sz + page_size - 1) / page_size * page_size;
}
BPF_EMBED_OBJ(test_mmap, "test_mmap.o");
void test_mmap(void)
{
const char *file = "test_mmap.o";
const char *probe_name = "raw_tracepoint/sys_enter";
const char *tp_name = "sys_enter";
const size_t bss_sz = roundup_page(sizeof(struct bss_data));
const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
const size_t map_sz = roundup_page(sizeof(struct map_data));
const int zero = 0, one = 1, two = 2, far = 1500;
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link = NULL;
struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
volatile struct bss_data *bss_data;
volatile struct map_data *map_data;
struct test_mmap__bss *bss_data;
struct map_data *map_data;
struct test_mmap *skel;
__u64 val = 0;
obj = bpf_object__open_file("test_mmap.o", NULL);
if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
file, PTR_ERR(obj)))
return;
prog = bpf_object__find_program_by_title(obj, probe_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name))
goto cleanup;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n",
probe_name, err))
goto cleanup;
bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss");
if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n"))
goto cleanup;
data_map = bpf_object__find_map_by_name(obj, "data_map");
if (CHECK(!data_map, "find_data_map", "data_map map not found\n"))
goto cleanup;
skel = test_mmap__open_and_load(&test_mmap_embed);
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
bss_map = skel->maps.bss;
data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
@ -77,13 +59,15 @@ void test_mmap(void)
CHECK_FAIL(bss_data->in_val);
CHECK_FAIL(bss_data->out_val);
CHECK_FAIL(skel->bss->in_val);
CHECK_FAIL(skel->bss->out_val);
CHECK_FAIL(map_data->val[0]);
CHECK_FAIL(map_data->val[1]);
CHECK_FAIL(map_data->val[2]);
CHECK_FAIL(map_data->val[far]);
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
err = test_mmap__attach(skel);
if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
bss_data->in_val = 123;
@ -94,6 +78,8 @@ void test_mmap(void)
CHECK_FAIL(bss_data->in_val != 123);
CHECK_FAIL(bss_data->out_val != 123);
CHECK_FAIL(skel->bss->in_val != 123);
CHECK_FAIL(skel->bss->out_val != 123);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 123);
@ -160,6 +146,8 @@ void test_mmap(void)
usleep(1);
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
CHECK_FAIL(skel->bss->in_val != 321);
CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
@ -203,6 +191,8 @@ void test_mmap(void)
map_data = tmp2;
CHECK_FAIL(bss_data->in_val != 321);
CHECK_FAIL(bss_data->out_val != 321);
CHECK_FAIL(skel->bss->in_val != 321);
CHECK_FAIL(skel->bss->out_val != 321);
CHECK_FAIL(map_data->val[0] != 111);
CHECK_FAIL(map_data->val[1] != 222);
CHECK_FAIL(map_data->val[2] != 321);
@ -214,7 +204,5 @@ cleanup:
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
if (map_mmaped)
CHECK_FAIL(munmap(map_mmaped, map_sz));
if (!IS_ERR_OR_NULL(link))
bpf_link__destroy(link);
bpf_object__close(obj);
test_mmap__destroy(skel);
}

View file

@ -3,8 +3,7 @@
void test_probe_user(void)
{
#define kprobe_name "__sys_connect"
const char *prog_name = "kprobe/" kprobe_name;
const char *prog_name = "kprobe/__sys_connect";
const char *obj_file = "./test_probe_user.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
@ -33,8 +32,7 @@ void test_probe_user(void)
"err %d\n", results_map_fd))
goto cleanup;
kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false,
kprobe_name);
kprobe_link = bpf_program__attach(kprobe_prog);
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
"err %ld\n", PTR_ERR(kprobe_link))) {
kprobe_link = NULL;

View file

@ -16,14 +16,11 @@ struct rdonly_map_subtest {
void test_rdonly_maps(void)
{
const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
const char *file = "test_rdonly_maps.o";
struct rdonly_map_subtest subtests[] = {
{ "skip loop", prog_name_skip_loop, 0, 0 },
{ "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
{ "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
{ "skip loop", "skip_loop", 0, 0 },
{ "part loop", "part_loop", 3, 2 + 3 + 4 },
{ "full loop", "full_loop", 4, 2 + 3 + 4 + 5 },
};
int i, err, zero = 0, duration = 0;
struct bpf_link *link = NULL;
@ -50,7 +47,7 @@ void test_rdonly_maps(void)
if (!test__start_subtest(t->subtest_name))
continue;
prog = bpf_object__find_program_by_title(obj, t->prog_name);
prog = bpf_object__find_program_by_name(obj, t->prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
t->prog_name))
goto cleanup;

View file

@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
struct s {
int a;
long long b;
} __attribute__((packed));
#include "test_skeleton.skel.h"
BPF_EMBED_OBJ(skeleton, "test_skeleton.o");
void test_skeleton(void)
{
int duration = 0, err;
struct test_skeleton* skel;
struct test_skeleton__bss *bss;
skel = test_skeleton__open_and_load(&skeleton_embed);
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
bss->in1 = 1;
bss->in2 = 2;
bss->in3 = 3;
bss->in4 = 4;
bss->in5.a = 5;
bss->in5.b = 6;
err = test_skeleton__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
CHECK(bss->out1 != 1, "res1", "got %d != exp %d\n", bss->out1, 1);
CHECK(bss->out2 != 2, "res2", "got %lld != exp %d\n", bss->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
bss->handler_out5.a, 5);
CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
bss->handler_out5.b, 6);
cleanup:
test_skeleton__destroy(skel);
}

View file

@ -1,16 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
BPF_EMBED_OBJ(stacktrace_build_id, "test_stacktrace_build_id.o");
void test_stacktrace_build_id(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *prog_name = "tracepoint/random/urandom_read";
const char *file = "./test_stacktrace_build_id.o";
int err, prog_fd, stack_trace_len;
struct test_stacktrace_build_id *skel;
int err, stack_trace_len;
__u32 key, previous_key, val, duration = 0;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link = NULL;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@ -18,43 +18,24 @@ void test_stacktrace_build_id(void)
int retry = 1;
retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
skel = test_stacktrace_build_id__open_and_load(&stacktrace_build_id_embed);
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
link = bpf_program__attach_tracepoint(prog, "random", "urandom_read");
if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
goto close_prog;
err = test_stacktrace_build_id__attach(skel);
if (CHECK(err, "attach_tp", "err %d\n", err))
goto cleanup;
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
err, errno))
goto disable_pmu;
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
control_map_fd = bpf_map__fd(skel->maps.control_map);
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
goto disable_pmu;
goto cleanup;
if (CHECK_FAIL(system("./urandom_read")))
goto disable_pmu;
goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
@ -66,23 +47,23 @@ retry:
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = extract_build_id(buf, 256);
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
do {
char build_id[64];
@ -90,7 +71,7 @@ retry:
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
@ -108,8 +89,7 @@ retry:
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
bpf_link__destroy(link);
bpf_object__close(obj);
test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
@ -117,17 +97,14 @@ retry:
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
goto cleanup;
stack_trace_len = PERF_MAX_STACK_DEPTH
* sizeof(struct bpf_stack_build_id);
stack_trace_len = PERF_MAX_STACK_DEPTH *
sizeof(struct bpf_stack_build_id);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno);
disable_pmu:
bpf_link__destroy(link);
close_prog:
bpf_object__close(obj);
cleanup:
test_stacktrace_build_id__destroy(skel);
}

View file

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
static __u64 read_perf_max_sample_freq(void)
{
@ -14,21 +15,19 @@ static __u64 read_perf_max_sample_freq(void)
return sample_freq;
}
BPF_EMBED_OBJ_DECLARE(stacktrace_build_id);
void test_stacktrace_build_id_nmi(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *prog_name = "tracepoint/random/urandom_read";
const char *file = "./test_stacktrace_build_id.o";
int err, pmu_fd, prog_fd;
int control_map_fd, stackid_hmap_fd, stackmap_fd;
struct test_stacktrace_build_id *skel;
int err, pmu_fd;
struct perf_event_attr attr = {
.freq = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
__u32 key, previous_key, val, duration = 0;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@ -38,13 +37,16 @@ void test_stacktrace_build_id_nmi(void)
attr.sample_freq = read_perf_max_sample_freq();
retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
skel = test_stacktrace_build_id__open(&stacktrace_build_id_embed);
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
/* override program type */
bpf_program__set_perf_event(skel->progs.oncpu);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
@ -52,40 +54,25 @@ retry:
if (CHECK(pmu_fd < 0, "perf_event_open",
"err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
pmu_fd, errno))
goto close_prog;
goto cleanup;
link = bpf_program__attach_perf_event(prog, pmu_fd);
if (CHECK(IS_ERR(link), "attach_perf_event",
"err %ld\n", PTR_ERR(link))) {
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
"err %ld\n", PTR_ERR(skel->links.oncpu))) {
close(pmu_fd);
goto close_prog;
goto cleanup;
}
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
err, errno))
goto disable_pmu;
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
control_map_fd = bpf_map__fd(skel->maps.control_map);
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
goto disable_pmu;
goto cleanup;
if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
goto disable_pmu;
goto cleanup;
/* disable stack trace collection */
key = 0;
val = 1;
@ -97,23 +84,23 @@ retry:
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = extract_build_id(buf, 256);
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
do {
char build_id[64];
@ -121,7 +108,7 @@ retry:
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
@ -139,8 +126,7 @@ retry:
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
bpf_link__destroy(link);
bpf_object__close(obj);
test_stacktrace_build_id__destroy(skel);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
@ -148,7 +134,7 @@ retry:
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
goto cleanup;
/*
* We intentionally skip compare_stack_ips(). This is because we
@ -157,8 +143,6 @@ retry:
* BPF_STACK_BUILD_ID_IP;
*/
disable_pmu:
bpf_link__destroy(link);
close_prog:
bpf_object__close(obj);
cleanup:
test_stacktrace_build_id__destroy(skel);
}

View file

@ -5,46 +5,36 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
__type(key, int);
__type(value, int);
} results_map SEC(".maps");
int kprobe_res = 0;
int kretprobe_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
SEC("kprobe/sys_nanosleep")
int handle_sys_nanosleep_entry(struct pt_regs *ctx)
int handle_kprobe(struct pt_regs *ctx)
{
const int key = 0, value = 1;
bpf_map_update_elem(&results_map, &key, &value, 0);
kprobe_res = 1;
return 0;
}
SEC("kretprobe/sys_nanosleep")
int handle_sys_getpid_return(struct pt_regs *ctx)
int handle_kretprobe(struct pt_regs *ctx)
{
const int key = 1, value = 2;
bpf_map_update_elem(&results_map, &key, &value, 0);
kretprobe_res = 2;
return 0;
}
SEC("uprobe/trigger_func")
int handle_uprobe_entry(struct pt_regs *ctx)
int handle_uprobe(struct pt_regs *ctx)
{
const int key = 2, value = 3;
bpf_map_update_elem(&results_map, &key, &value, 0);
uprobe_res = 3;
return 0;
}
SEC("uretprobe/trigger_func")
int handle_uprobe_return(struct pt_regs *ctx)
int handle_uretprobe(struct pt_regs *ctx)
{
const int key = 3, value = 4;
bpf_map_update_elem(&results_map, &key, &value, 0);
uretprobe_res = 4;
return 0;
}

View file

@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct s {
int a;
long long b;
} __attribute__((packed));
int in1 = 0;
long long in2 = 0;
char in3 = '\0';
long long in4 __attribute__((aligned(64))) = 0;
struct s in5 = {};
long long out2 = 0;
char out3 = 0;
long long out4 = 0;
int out1 = 0;
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
static volatile struct s out5;
out1 = in1;
out2 = in2;
out3 = in3;
out4 = in4;
out5 = in5;
return 0;
}
char _license[] SEC("license") = "GPL";