1
0
Fork 0

bpf: fix cb access in socket filter programs

eBPF socket filter programs may see junk in 'u32 cb[5]' area,
since it could have been used by protocol layers earlier.

For socket filter programs used in af_packet we need to clean
20 bytes of skb->cb area if it could be used by the program.
For programs attached to TCP/UDP sockets we need to save/restore
these 20 bytes, since it's used by protocol layers.

Remove SK_RUN_FILTER macro, since it's no longer used.

Long term we may move this bpf cb area to per-cpu scratch, but that
requires addition of new 'per-cpu load/store' instructions,
so not suitable as a short term fix.

Fixes: d691f9e8d4 ("bpf: allow programs to write to certain skb fields")
Reported-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Alexei Starovoitov 2015-10-07 10:55:41 -07:00 committed by David S. Miller
parent d49ae37c61
commit ff936a04e5
5 changed files with 51 additions and 18 deletions

View File

@ -100,6 +100,8 @@ enum bpf_access_type {
BPF_WRITE = 2
};
struct bpf_prog;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@ -111,7 +113,7 @@ struct bpf_verifier_ops {
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn);
struct bpf_insn *insn, struct bpf_prog *prog);
};
struct bpf_prog_type_list {
@ -120,8 +122,6 @@ struct bpf_prog_type_list {
enum bpf_prog_type type;
};
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;

View File

@ -13,6 +13,7 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <net/sch_generic.h>
#include <asm/cacheflush.h>
@ -302,10 +303,6 @@ struct bpf_prog_aux;
bpf_size; \
})
/* Macro to invoke filter function. */
#define SK_RUN_FILTER(filter, ctx) \
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
@ -329,6 +326,7 @@ struct bpf_prog {
kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
@ -352,6 +350,39 @@ struct sk_filter {
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = qdisc_skb_cb(skb)->data;
u8 saved_cb[QDISC_CB_PRIV_LEN];
u32 res;
BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
QDISC_CB_PRIV_LEN);
if (unlikely(prog->cb_access)) {
memcpy(saved_cb, cb_data, sizeof(saved_cb));
memset(cb_data, 0, sizeof(saved_cb));
}
res = BPF_PROG_RUN(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, saved_cb, sizeof(saved_cb));
return res;
}
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = qdisc_skb_cb(skb)->data;
if (unlikely(prog->cb_access))
memset(cb_data, 0, QDISC_CB_PRIV_LEN);
return BPF_PROG_RUN(prog, skb);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),

View File

@ -2024,7 +2024,7 @@ static int convert_ctx_accesses(struct verifier_env *env)
cnt = env->prog->aux->ops->
convert_ctx_access(type, insn->dst_reg, insn->src_reg,
insn->off, insn_buf);
insn->off, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;

View File

@ -56,10 +56,10 @@
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
*
* Run the filter code and then cut skb->data to correct size returned by
* SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
* wrapper to SK_RUN_FILTER. It returns 0 if the packet should
* wrapper to BPF_PROG_RUN. It returns 0 if the packet should
* be accepted or -EPERM if the packet should be tossed.
*
*/
@ -83,7 +83,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
@ -1736,7 +1736,8 @@ static bool tc_cls_act_is_valid_access(int off, int size,
static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn_buf)
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
@ -1827,6 +1828,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
offsetof(struct __sk_buff, cb[4]):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
prog->cb_access = 1;
ctx_off -= offsetof(struct __sk_buff, cb[0]);
ctx_off += offsetof(struct sk_buff, cb);
ctx_off += offsetof(struct qdisc_skb_cb, data);

View File

@ -1423,7 +1423,7 @@ static unsigned int fanout_demux_bpf(struct packet_fanout *f,
rcu_read_lock();
prog = rcu_dereference(f->bpf_prog);
if (prog)
ret = BPF_PROG_RUN(prog, skb) % num;
ret = bpf_prog_run_clear_cb(prog, skb) % num;
rcu_read_unlock();
return ret;
@ -1939,16 +1939,16 @@ out_free:
return err;
}
static unsigned int run_filter(const struct sk_buff *skb,
const struct sock *sk,
unsigned int res)
static unsigned int run_filter(struct sk_buff *skb,
const struct sock *sk,
unsigned int res)
{
struct sk_filter *filter;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = SK_RUN_FILTER(filter, skb);
res = bpf_prog_run_clear_cb(filter->prog, skb);
rcu_read_unlock();
return res;