1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2018-12-05

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) fix bpf uapi pointers for 32-bit architectures, from Daniel.

2) improve verifer ability to handle progs with a lot of branches, from Alexei.

3) strict btf checks, from Yonghong.

4) bpf_sk_lookup api cleanup, from Joe.

5) other misc fixes
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-12-05 16:30:30 -08:00
commit e37d05a538
13 changed files with 732 additions and 95 deletions

View File

@ -891,6 +891,55 @@ cond_branch:
return 0;
}
/* Fix the branch target addresses for subprog calls */
static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx, u32 *addrs)
{
const struct bpf_insn *insn = fp->insnsi;
bool func_addr_fixed;
u64 func_addr;
u32 tmp_idx;
int i, ret;
for (i = 0; i < fp->len; i++) {
/*
* During the extra pass, only the branch target addresses for
* the subprog calls need to be fixed. All other instructions
* can left untouched.
*
* The JITed image length does not change because we already
* ensure that the JITed instruction sequence for these calls
* are of fixed length by padding them with NOPs.
*/
if (insn[i].code == (BPF_JMP | BPF_CALL) &&
insn[i].src_reg == BPF_PSEUDO_CALL) {
ret = bpf_jit_get_func_addr(fp, &insn[i], true,
&func_addr,
&func_addr_fixed);
if (ret < 0)
return ret;
/*
* Save ctx->idx as this would currently point to the
* end of the JITed image and set it to the offset of
* the instruction sequence corresponding to the
* subprog call temporarily.
*/
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/*
* Restore ctx->idx here. This is safe as the length
* of the JITed sequence remains unchanged.
*/
ctx->idx = tmp_idx;
}
}
return 0;
}
struct powerpc64_jit_data {
struct bpf_binary_header *header;
u32 *addrs;
@ -989,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
if (extra_pass) {
/*
* Do not touch the prologue and epilogue as they will remain
* unchanged. Only fix the branch target address for subprog
* calls in the body.
*
* This does not change the offsets and lengths of the subprog
* call instruction sequences and hence, the size of the JITed
* image as well.
*/
bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
/* There is no need to perform the usual passes. */
goto skip_codegen_passes;
}
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
@ -1002,6 +1067,7 @@ skip_init_ctx:
proglen - (cgctx.idx * 4), cgctx.seen);
}
skip_codegen_passes:
if (bpf_jit_enable > 1)
/*
* Note that we output the base address of the code_base

View File

@ -449,6 +449,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
#if BITS_PER_LONG == 64
# define bpf_ctx_range_ptr(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#else
# define bpf_ctx_range_ptr(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \

View File

@ -2170,7 +2170,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@ -2187,12 +2187,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
* If the *netns* is zero, then the socket lookup table in the
* netns associated with the *ctx* will be used. For the TC hooks,
* this in the netns of the device in the skb. For socket hooks,
* this in the netns of the socket. If *netns* is non-zero, then
* it specifies the ID of the netns relative to the netns
* associated with the *ctx*.
* If the *netns* is a negative signed 32-bit integer, then the
* socket lookup table in the netns associated with the *ctx* will
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
* equal to zero then it specifies the ID of the netns relative to
* the netns associated with the *ctx*. *netns* values beyond the
* range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@ -2201,8 +2203,10 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, the *struct bpf_sock*
* result is from reuse->socks[] using the hash of the tuple.
*
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@ -2219,12 +2223,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
* If the *netns* is zero, then the socket lookup table in the
* netns associated with the *ctx* will be used. For the TC hooks,
* this in the netns of the device in the skb. For socket hooks,
* this in the netns of the socket. If *netns* is non-zero, then
* it specifies the ID of the netns relative to the netns
* associated with the *ctx*.
* If the *netns* is a negative signed 32-bit integer, then the
* socket lookup table in the netns associated with the *ctx* will
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
* equal to zero then it specifies the ID of the netns relative to
* the netns associated with the *ctx*. *netns* values beyond the
* range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@ -2233,6 +2239,8 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, the *struct bpf_sock*
* result is from reuse->socks[] using the hash of the tuple.
*
* int bpf_sk_release(struct bpf_sock *sk)
* Description
@ -2405,6 +2413,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
#define __bpf_md_ptr(type, name) \
union { \
type name; \
__u64 :64; \
} __attribute__((aligned(8)))
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@ -2456,7 +2473,7 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
struct bpf_flow_keys *flow_keys;
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
};
struct bpf_tunnel_key {
@ -2572,8 +2589,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
void *data;
void *data_end;
__bpf_md_ptr(void *, data);
__bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
void *data;
void *data_end; /* End of directly accessible data */
__bpf_md_ptr(void *, data);
/* End of directly accessible data */
__bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)

View File

@ -5,6 +5,7 @@
#include <uapi/linux/types.h>
#include <linux/seq_file.h>
#include <linux/compiler.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
offset < btf->hdr.str_len;
}
/* Only C-style identifier is permitted. This can be relaxed if
* necessary.
*/
static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
{
/* offset must be valid */
const char *src = &btf->strings[offset];
const char *src_limit;
if (!isalpha(*src) && *src != '_')
return false;
/* set a limit on identifier length */
src_limit = src + KSYM_NAME_LEN;
src++;
while (*src && src < src_limit) {
if (!isalnum(*src) && *src != '_')
return false;
src++;
}
return !*src;
}
static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
{
if (!offset)
@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* typedef type must have a valid name, and other ref types,
* volatile, const, restrict, should have a null name.
*/
if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
if (!t->name_off ||
!btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
} else {
if (t->name_off) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
}
btf_verifier_log_type(env, t, NULL);
return 0;
@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* fwd type must have a valid name */
if (!t->name_off ||
!btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
btf_verifier_log_type(env, t, NULL);
return 0;
@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* array type should not have a name */
if (t->name_off) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
if (btf_type_vlen(t)) {
btf_verifier_log_type(env, t, "vlen != 0");
return -EINVAL;
@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* struct type either no name or a valid one */
if (t->name_off &&
!btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
btf_verifier_log_type(env, t, NULL);
last_offset = 0;
@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* struct member either no name or a valid one */
if (member->name_off &&
!btf_name_valid_identifier(btf, member->name_off)) {
btf_verifier_log_member(env, t, member, "Invalid name");
return -EINVAL;
}
/* A member cannot be in type void */
if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
btf_verifier_log_member(env, t, member,
@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* enum type either no name or a valid one */
if (t->name_off &&
!btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
btf_verifier_log_type(env, t, NULL);
for (i = 0; i < nr_enums; i++) {
@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* enum member must have a valid name */
if (!enums[i].name_off ||
!btf_name_valid_identifier(btf, enums[i].name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
btf_verifier_log(env, "\t%s val=%d\n",
btf_name_by_offset(btf, enums[i].name_off),
enums[i].val);

View File

@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
@ -3751,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
}
}
/* compute branch direction of the expression "if (reg opcode val) goto target;"
* and return:
* 1 - branch will be taken and "goto target" will be executed
* 0 - branch will not be taken and fall-through to next insn
* -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
*/
static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
{
if (__is_pointer_value(false, reg))
return -1;
switch (opcode) {
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
return !!tnum_equals_const(reg->var_off, val);
break;
case BPF_JNE:
if (tnum_is_const(reg->var_off))
return !tnum_equals_const(reg->var_off, val);
break;
case BPF_JGT:
if (reg->umin_value > val)
return 1;
else if (reg->umax_value <= val)
return 0;
break;
case BPF_JSGT:
if (reg->smin_value > (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLT:
if (reg->umax_value < val)
return 1;
else if (reg->umin_value >= val)
return 0;
break;
case BPF_JSLT:
if (reg->smax_value < (s64)val)
return 1;
else if (reg->smin_value >= (s64)val)
return 0;
break;
case BPF_JGE:
if (reg->umin_value >= val)
return 1;
else if (reg->umax_value < val)
return 0;
break;
case BPF_JSGE:
if (reg->smin_value >= (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLE:
if (reg->umax_value <= val)
return 1;
else if (reg->umin_value > val)
return 0;
break;
case BPF_JSLE:
if (reg->smax_value <= (s64)val)
return 1;
else if (reg->smin_value > (s64)val)
return 0;
break;
}
return -1;
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@ -4152,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
/* detect if R == 0 where R was initialized to zero earlier */
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
tnum_is_const(dst_reg->var_off)) {
if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
(opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
if (BPF_SRC(insn->code) == BPF_K) {
int pred = is_branch_taken(dst_reg, insn->imm, opcode);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
} else {
/* if (imm != imm) goto pc+off;
* only follow fall-through branch, since
} else if (pred == 0) {
/* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
@ -4980,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err;
int i, j, err, states_cnt = 0;
sl = env->explored_states[insn_idx];
if (!sl)
@ -5007,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return 1;
}
sl = sl->next;
states_cnt++;
}
if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return 0;
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
@ -5148,6 +5220,9 @@ static int do_check(struct bpf_verifier_env *env)
goto process_bpf_exit;
}
if (signal_pending(current))
return -EAGAIN;
if (need_resched())
cond_resched();

View File

@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
return ret;
}
static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
u32 *time)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
u32 ret = 0, i;
u32 i;
for_each_cgroup_storage_type(stype) {
storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
repeat = 1;
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
ret = bpf_test_run_one(prog, ctx, storage);
*ret = bpf_test_run_one(prog, ctx, storage);
if (need_resched()) {
if (signal_pending(current))
break;
@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
return ret;
return 0;
}
static int bpf_test_finish(const union bpf_attr *kattr,
@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
__skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
retval = bpf_test_run(prog, skb, repeat, &duration);
ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
if (ret) {
kfree_skb(skb);
kfree(sk);
return ret;
}
if (!is_l2) {
if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp.rxq = &rxqueue->xdp_rxq;
retval = bpf_test_run(prog, &xdp, repeat, &duration);
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
if (ret)
goto out;
if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
xdp.data_end != xdp.data + size)
size = xdp.data_end - xdp.data;
ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
out:
kfree(data);
return ret;
}

View File

@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *net;
family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
if (unlikely(family == AF_UNSPEC || flags ||
!((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
if (skb->dev)
caller_net = dev_net(skb->dev);
else
caller_net = sock_net(skb->sk);
if (netns_id) {
if ((s32)netns_id < 0) {
net = caller_net;
sk = sk_lookup(net, tuple, skb, family, proto);
} else {
net = get_net_ns_by_id(caller_net, netns_id);
if (unlikely(!net))
goto out;
sk = sk_lookup(net, tuple, skb, family, proto);
put_net(net);
} else {
net = caller_net;
sk = sk_lookup(net, tuple, skb, family, proto);
}
if (sk)
@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != size_default)
return false;
break;
case bpf_ctx_range(struct __sk_buff, flow_keys):
if (size != sizeof(struct bpf_flow_keys *))
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
if (size != sizeof(__u64))
return false;
break;
default:
@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
return false;
}
@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
return false;
}
@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
info->reg_type = PTR_TO_FLOW_KEYS;
break;
case bpf_ctx_range(struct __sk_buff, tc_classid):

View File

@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
const void *data)
__u8 bit_offset, const void *data)
{
int actual_type_id;
@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
if (actual_type_id < 0)
return actual_type_id;
return btf_dumper_do_type(d, actual_type_id, 0, data);
return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
static void btf_dumper_enum(const void *data, json_writer_t *jw)
@ -237,7 +237,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_dumper_modifier(d, type_id, data);
return btf_dumper_modifier(d, type_id, bit_offset, data);
default:
jsonw_printf(d->jw, "(unsupported-kind");
return -EINVAL;

View File

@ -2170,7 +2170,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@ -2187,12 +2187,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
* If the *netns* is zero, then the socket lookup table in the
* netns associated with the *ctx* will be used. For the TC hooks,
* this in the netns of the device in the skb. For socket hooks,
* this in the netns of the socket. If *netns* is non-zero, then
* it specifies the ID of the netns relative to the netns
* associated with the *ctx*.
* If the *netns* is a negative signed 32-bit integer, then the
* socket lookup table in the netns associated with the *ctx* will
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
* equal to zero then it specifies the ID of the netns relative to
* the netns associated with the *ctx*. *netns* values beyond the
* range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@ -2201,8 +2203,10 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, the *struct bpf_sock*
* result is from reuse->socks[] using the hash of the tuple.
*
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@ -2219,12 +2223,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
* If the *netns* is zero, then the socket lookup table in the
* netns associated with the *ctx* will be used. For the TC hooks,
* this in the netns of the device in the skb. For socket hooks,
* this in the netns of the socket. If *netns* is non-zero, then
* it specifies the ID of the netns relative to the netns
* associated with the *ctx*.
* If the *netns* is a negative signed 32-bit integer, then the
* socket lookup table in the netns associated with the *ctx* will
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
* equal to zero then it specifies the ID of the netns relative to
* the netns associated with the *ctx*. *netns* values beyond the
* range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@ -2233,6 +2239,8 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, the *struct bpf_sock*
* result is from reuse->socks[] using the hash of the tuple.
*
* int bpf_sk_release(struct bpf_sock *sk)
* Description
@ -2405,6 +2413,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
#define __bpf_md_ptr(type, name) \
union { \
type name; \
__u64 :64; \
} __attribute__((aligned(8)))
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@ -2456,7 +2473,7 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
struct bpf_flow_keys *flow_keys;
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
};
struct bpf_tunnel_key {
@ -2572,8 +2589,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
void *data;
void *data_end;
__bpf_md_ptr(void *, data);
__bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
void *data;
void *data_end; /* End of directly accessible data */
__bpf_md_ptr(void *, data);
/* End of directly accessible data */
__bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)

View File

@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned int netns_id,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned int netns_id,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_udp;
static int (*bpf_sk_release)(struct bpf_sock *sk) =

View File

@ -432,11 +432,11 @@ static struct btf_raw_test raw_tests[] = {
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
/* typedef const void * const_void_ptr */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
/* struct A { */ /* [4] */
BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
/* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const_void_ptr m; */
BTF_MEMBER_ENC(NAME_TBD, 3, 0),
BTF_MEMBER_ENC(NAME_TBD, 4, 0),
/* } */
BTF_END_RAW,
},
@ -494,10 +494,10 @@ static struct btf_raw_test raw_tests[] = {
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
/* typedef const void * const_void_ptr */ /* [4] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
/* const_void_ptr[4] */ /* [5] */
BTF_TYPE_ARRAY_ENC(3, 1, 4),
/* typedef const void * const_void_ptr */
BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
/* const_void_ptr[4] */
BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */
BTF_END_RAW,
},
.str_sec = "\0const_void_ptr",
@ -1292,6 +1292,367 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "type != 0",
},
{
.descr = "typedef (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(0, 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "typedef (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__!int",
.str_sec_size = sizeof("\0__!int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "ptr type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "ptr_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "volatile type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "volatile_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "const type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "const_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "restrict type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
BTF_END_RAW,
},
.str_sec = "\0__int",
.str_sec_size = sizeof("\0__int"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "restrict_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "fwd type (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__skb",
.str_sec_size = sizeof("\0__skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "fwd type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
BTF_END_RAW,
},
.str_sec = "\0__!skb",
.str_sec_size = sizeof("\0__!skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "fwd_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "array type (invalid name, name_off <> 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */
BTF_ARRAY_ENC(1, 1, 4),
BTF_END_RAW,
},
.str_sec = "\0__skb",
.str_sec_size = sizeof("\0__skb"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "array_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "struct type (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A",
.str_sec_size = sizeof("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A!\0B",
.str_sec_size = sizeof("\0A!\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "struct member (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A",
.str_sec_size = sizeof("\0A"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "struct member (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
BTF_END_RAW,
},
.str_sec = "\0A\0B*",
.str_sec_size = sizeof("\0A\0B*"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "struct_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum type (name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A\0B",
.str_sec_size = sizeof("\0A\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
},
{
.descr = "enum type (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(NAME_TBD,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A!\0B",
.str_sec_size = sizeof("\0A!\0B"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum member (invalid name, name_off = 0)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(0, 0),
BTF_END_RAW,
},
.str_sec = "",
.str_sec_size = sizeof(""),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "enum member (invalid name, invalid identifier)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_ENC(0,
BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
sizeof(int)), /* [2] */
BTF_ENUM_ENC(NAME_TBD, 0),
BTF_END_RAW,
},
.str_sec = "\0A!",
.str_sec_size = sizeof("\0A!"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "enum_type_check_btf",
.key_size = sizeof(int),
.value_size = sizeof(int),
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid name",
},
{
.descr = "arraymap invalid btf key (a bit field)",
.raw_types = {

View File

@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return TC_ACT_SHOT;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk)
bpf_sk_release(sk);
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk)
bpf_sk_release(sk);
return 0;
@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family = 0;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
bpf_sk_release(sk);
family = sk->family;
@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
sk += 1;
bpf_sk_release(sk);
@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1;
if (sk)
bpf_sk_release(sk);
@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
return 0;
}
@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
bpf_sk_release(sk);
return 0;
@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
return 0;
}
@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
void lookup_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
SEC("fail_no_release_subcall")

View File

@ -8576,7 +8576,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map_hash_8b = { 4 },
.errstr = "R0 invalid mem access 'inv'",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -10547,7 +10547,7 @@ static struct bpf_test tests[] = {
"check deducing bounds from const, 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
@ -14230,7 +14230,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
reject_from_alignment = fd_prog < 0 &&
(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
strstr(bpf_vlog, "Unknown alignment.");
strstr(bpf_vlog, "misaligned");
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (reject_from_alignment) {
printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",