Merge branch 'resolve_prog_type'

Udip Pant says:

====================
This patch series adds changes in verifier to make decisions such as granting
of read / write access or enforcement of return code status based on
the program type of the target program while using dynamic program
extension (of type BPF_PROG_TYPE_EXT).

The BPF_PROG_TYPE_EXT type can be used to extend types such as XDP, SKB
and others. Since the BPF_PROG_TYPE_EXT program type on itself is just a
placeholder for those, we need this extended check for those extended
programs to actually work with proper access, while using this option.

Patch #1 includes changes in the verifier.
Patch #2 adds selftests to verify write access on a packet for a valid
extension program type
Patch #3 adds selftests to verify proper check for the return code
Patch #4 adds selftests to ensure access permissions and restrictions
for some map types such sockmap.

Changelogs:
  v2 -> v3:
    * more comprehensive resolution of the program type in the verifier
      based on the target program (and not just for the packet access)
    * selftests for checking return code and map access
    * Also moved this patch to 'bpf-next' from 'bpf' tree
  v1 -> v2:
    * extraction of the logic to resolve prog type into a separate method
    * selftests to check for packet access for a valid freplace prog
====================

Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2020-08-26 12:47:57 -07:00
commit 1fc0e18b6e
7 changed files with 229 additions and 11 deletions

View file

@ -2625,11 +2625,19 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
#define MAX_PACKET_OFF 0xffff
static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
{
return prog->aux->linked_prog ? prog->aux->linked_prog->type
: prog->type;
}
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
switch (prog_type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
@ -4186,7 +4194,7 @@ err_type:
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
{
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = env->prog->type;
enum bpf_prog_type type = resolve_prog_type(env->prog);
if (func_id != BPF_FUNC_map_update_elem)
return false;
@ -7376,7 +7384,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
u8 mode = BPF_MODE(insn->code);
int i, err;
if (!may_access_skb(env->prog->type)) {
if (!may_access_skb(resolve_prog_type(env->prog))) {
verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
@ -7464,11 +7472,12 @@ static int check_return_code(struct bpf_verifier_env *env)
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
/* LSM and struct_ops func-ptr's return type could be "void" */
if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
env->prog->type == BPF_PROG_TYPE_LSM) &&
if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
prog_type == BPF_PROG_TYPE_LSM) &&
!prog->aux->attach_func_proto->type)
return 0;
@ -7487,7 +7496,7 @@ static int check_return_code(struct bpf_verifier_env *env)
return -EACCES;
}
switch (env->prog->type) {
switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
@ -9243,6 +9252,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_prog *prog)
{
enum bpf_prog_type prog_type = resolve_prog_type(prog);
/*
* Validate that trace type programs use preallocated hash maps.
*
@ -9260,8 +9270,8 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
* now, but warnings are emitted so developers are made aware of
* the unsafety and can fix their programs before this is enforced.
*/
if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
@ -9273,8 +9283,8 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
if ((is_tracing_prog_type(prog->type) ||
prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
if ((is_tracing_prog_type(prog_type) ||
prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
map_value_has_spin_lock(map)) {
verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
@ -9986,7 +9996,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->code = BPF_LDX | BPF_PROBE_MEM |
BPF_SIZE((insn)->code);
env->prog->aux->num_exentries++;
} else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
} else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
verbose(env, "Writes through BTF pointers are not allowed\n");
return -EINVAL;
}

View file

@ -123,6 +123,7 @@ static void test_func_replace(void)
"freplace/get_skb_len",
"freplace/get_skb_ifindex",
"freplace/get_constant",
"freplace/test_pkt_write_access_subprog",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
"./test_pkt_access.o",
@ -141,10 +142,77 @@ static void test_func_replace_verify(void)
prog_name, false);
}
static void test_func_sockmap_update(void)
{
const char *prog_name[] = {
"freplace/cls_redirect",
};
test_fexit_bpf2bpf_common("./freplace_cls_redirect.o",
"./test_cls_redirect.o",
ARRAY_SIZE(prog_name),
prog_name, false);
}
static void test_obj_load_failure_common(const char *obj_file,
const char *target_obj_file)
{
/*
* standalone test that asserts failure to load freplace prog
* because of invalid return code.
*/
struct bpf_object *obj = NULL, *pkt_obj;
int err, pkt_fd;
__u32 duration = 0;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
target_obj_file, err, errno))
return;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.attach_prog_fd = pkt_fd,
);
obj = bpf_object__open_file(obj_file, &opts);
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
"failed to open %s: %ld\n", obj_file,
PTR_ERR(obj)))
goto close_prog;
/* It should fail to load the program */
err = bpf_object__load(obj);
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
goto close_prog;
close_prog:
if (!IS_ERR_OR_NULL(obj))
bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
static void test_func_replace_return_code(void)
{
/* test invalid return code in the replaced program */
test_obj_load_failure_common("./freplace_connect_v4_prog.o",
"./connect4_prog.o");
}
static void test_func_map_prog_compatibility(void)
{
/* test with spin lock map value in the replaced program */
test_obj_load_failure_common("./freplace_attach_probe.o",
"./test_attach_probe.o");
}
void test_fexit_bpf2bpf(void)
{
test_target_no_callees();
test_target_yes_callees();
test_func_replace();
test_func_replace_verify();
test_func_sockmap_update();
test_func_replace_return_code();
test_func_map_prog_compatibility();
}

View file

@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
@ -151,4 +153,29 @@ int new_get_constant(long val)
test_get_constant = 1;
return test_get_constant; /* original get_constant() returns val - 122 */
}
__u64 test_pkt_write_access_subprog = 0;
SEC("freplace/test_pkt_write_access_subprog")
int new_test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modifications to the packet data */
tcp->check++;
tcp->syn = 0;
test_pkt_write_access_subprog = 1;
return 0;
}
char _license[] SEC("license") = "GPL";

View file

@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define VAR_NUM 2
struct hmap_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct hmap_elem);
} hash_map SEC(".maps");
SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx)
{
struct hmap_elem zero = {}, *val;
int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key);
if (!val)
return 1;
/* spin_lock in hash map */
bpf_spin_lock(&val->lock);
val->var[0] = 99;
bpf_spin_unlock(&val->lock);
return 0;
}
char _license[] SEC("license") = "GPL";

View file

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
struct bpf_map_def SEC("maps") sock_map = {
.type = BPF_MAP_TYPE_SOCKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 2,
};
SEC("freplace/cls_redirect")
int freplace_cls_redirect_test(struct __sk_buff *skb)
{
int ret = 0;
const int zero = 0;
struct bpf_sock *sk;
sk = bpf_map_lookup_elem(&sock_map, &zero);
if (!sk)
return TC_ACT_SHOT;
ret = bpf_map_update_elem(&sock_map, &zero, sk, 0);
bpf_sk_release(sk);
return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";

View file

@ -0,0 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
// return value thats in invalid range
return 255;
}
char _license[] SEC("license") = "GPL";

View file

@ -79,6 +79,24 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
return skb->ifindex * val * var;
}
__attribute__ ((noinline))
int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp = NULL;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modification to the packet data */
tcp->check++;
return 0;
}
SEC("classifier/test_pkt_access")
int test_pkt_access(struct __sk_buff *skb)
{
@ -117,6 +135,8 @@ int test_pkt_access(struct __sk_buff *skb)
if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex)
return TC_ACT_SHOT;
if (tcp) {
if (test_pkt_write_access_subprog(skb, (void *)tcp - data))
return TC_ACT_SHOT;
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
barrier(); /* to force ordering of checks */