remarkable-linux/net/netfilter/xt_bpf.c
Jann Horn 03e82f2b21 netfilter: xt_bpf: add overflow checks
[ Upstream commit 6ab405114b ]

Check whether inputs from userspace are too long (explicit length field too
big or string not null-terminated) to avoid out-of-bounds reads.

As far as I can tell, this can at worst lead to very limited kernel heap
memory disclosure or oopses.

This bug can be triggered by an unprivileged user even if the xt_bpf module
is not loaded: iptables is available in network namespaces, and the xt_bpf
module can be autoloaded.

Triggering the bug with a classic BPF filter with fake length 0x1000 causes
the following KASAN report:

==================================================================
BUG: KASAN: slab-out-of-bounds in bpf_prog_create+0x84/0xf0
Read of size 32768 at addr ffff8801eff2c494 by task test/4627

CPU: 0 PID: 4627 Comm: test Not tainted 4.15.0-rc1+ #1
[...]
Call Trace:
 dump_stack+0x5c/0x85
 print_address_description+0x6a/0x260
 kasan_report+0x254/0x370
 ? bpf_prog_create+0x84/0xf0
 memcpy+0x1f/0x50
 bpf_prog_create+0x84/0xf0
 bpf_mt_check+0x90/0xd6 [xt_bpf]
[...]
Allocated by task 4627:
 kasan_kmalloc+0xa0/0xd0
 __kmalloc_node+0x47/0x60
 xt_alloc_table_info+0x41/0x70 [x_tables]
[...]
The buggy address belongs to the object at ffff8801eff2c3c0
                which belongs to the cache kmalloc-2048 of size 2048
The buggy address is located 212 bytes inside of
                2048-byte region [ffff8801eff2c3c0, ffff8801eff2cbc0)
[...]
==================================================================

Fixes: e6f30c7317 ("netfilter: x_tables: add xt_bpf match")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-25 11:08:01 +01:00

165 lines
3.8 KiB
C

/* Xtables module to match packets using a BPF filter.
* Copyright 2013 Google Inc.
* Written by Willem de Bruijn <willemb@google.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/netfilter/xt_bpf.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>");
MODULE_DESCRIPTION("Xtables: BPF filter match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_bpf");
MODULE_ALIAS("ip6t_bpf");
static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
struct bpf_prog **ret)
{
struct sock_fprog_kern program;
if (len > XT_BPF_MAX_NUM_INSTR)
return -EINVAL;
program.len = len;
program.filter = insns;
if (bpf_prog_create(ret, &program)) {
pr_info("bpf: check failed: parse error\n");
return -EINVAL;
}
return 0;
}
static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
{
struct bpf_prog *prog;
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
if (IS_ERR(prog))
return PTR_ERR(prog);
*ret = prog;
return 0;
}
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
{
mm_segment_t oldfs = get_fs();
int retval, fd;
if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
return -EINVAL;
set_fs(KERNEL_DS);
fd = bpf_obj_get_user(path);
set_fs(oldfs);
if (fd < 0)
return fd;
retval = __bpf_mt_check_fd(fd, ret);
sys_close(fd);
return retval;
}
static int bpf_mt_check(const struct xt_mtchk_param *par)
{
struct xt_bpf_info *info = par->matchinfo;
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
}
static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
{
struct xt_bpf_info_v1 *info = par->matchinfo;
if (info->mode == XT_BPF_MODE_BYTECODE)
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
else if (info->mode == XT_BPF_MODE_FD_ELF)
return __bpf_mt_check_fd(info->fd, &info->filter);
else if (info->mode == XT_BPF_MODE_PATH_PINNED)
return __bpf_mt_check_path(info->path, &info->filter);
else
return -EINVAL;
}
static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_bpf_info *info = par->matchinfo;
return BPF_PROG_RUN(info->filter, skb);
}
static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_bpf_info_v1 *info = par->matchinfo;
return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb);
}
static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_bpf_info *info = par->matchinfo;
bpf_prog_destroy(info->filter);
}
static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
const struct xt_bpf_info_v1 *info = par->matchinfo;
bpf_prog_destroy(info->filter);
}
static struct xt_match bpf_mt_reg[] __read_mostly = {
{
.name = "bpf",
.revision = 0,
.family = NFPROTO_UNSPEC,
.checkentry = bpf_mt_check,
.match = bpf_mt,
.destroy = bpf_mt_destroy,
.matchsize = sizeof(struct xt_bpf_info),
.usersize = offsetof(struct xt_bpf_info, filter),
.me = THIS_MODULE,
},
{
.name = "bpf",
.revision = 1,
.family = NFPROTO_UNSPEC,
.checkentry = bpf_mt_check_v1,
.match = bpf_mt_v1,
.destroy = bpf_mt_destroy_v1,
.matchsize = sizeof(struct xt_bpf_info_v1),
.usersize = offsetof(struct xt_bpf_info_v1, filter),
.me = THIS_MODULE,
},
};
static int __init bpf_mt_init(void)
{
return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
}
static void __exit bpf_mt_exit(void)
{
xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg));
}
module_init(bpf_mt_init);
module_exit(bpf_mt_exit);