1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Fix suspicious RCU usage in ipset, from Jozsef Kadlecsik.

2) Use kvcalloc, from Joe Perches.

3) Flush flowtable hardware workqueue after garbage collection run,
   from Paul Blakey.

4) Missing flowtable hardware workqueue flush from nf_flow_table_free(),
   also from Paul.

5) Restore NF_FLOW_HW_DEAD in flow_offload_work_del(), from Paul.

6) Flowtable documentation fixes, from Matteo Croce.
====================

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
alistair/sensors
Jakub Kicinski 2020-02-01 12:38:20 -08:00
commit b7c3a17c60
6 changed files with 28 additions and 26 deletions

View File

@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
table inet x {
flowtable f {
hook ingress priority 0 devices = { eth0, eth1 };
hook ingress priority 0; devices = { eth0, eth1 };
}
chain y {
type filter hook forward priority 0; policy accept;

View File

@ -1483,31 +1483,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static int
dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
ip_set_dump_start(struct netlink_callback *cb)
{
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
struct nlattr *attr = (void *)nlh + min_len;
struct sk_buff *skb = cb->skb;
struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
u32 dump_type;
ip_set_id_t index;
int ret;
ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len,
ip_set_dump_policy, NULL);
if (ret)
return ret;
goto error;
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
ip_set_id_t index;
struct ip_set *set;
set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
&index);
if (!set)
return -ENOENT;
if (!set) {
ret = -ENOENT;
goto error;
}
dump_type = DUMP_ONE;
cb->args[IPSET_CB_INDEX] = index;
} else {
@ -1523,10 +1526,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
cb->args[IPSET_CB_DUMP] = dump_type;
return 0;
error:
/* We have to create and send the error message manually :-( */
if (nlh->nlmsg_flags & NLM_F_ACK) {
netlink_ack(cb->skb, nlh, ret, NULL);
}
return ret;
}
static int
ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
{
ip_set_id_t index = IPSET_INVALID_ID, max;
struct ip_set *set = NULL;
@ -1537,18 +1547,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
bool is_destroyed;
int ret = 0;
if (!cb->args[IPSET_CB_DUMP]) {
ret = dump_init(cb, inst);
if (ret < 0) {
nlh = nlmsg_hdr(cb->skb);
/* We have to create and send the error message
* manually :-(
*/
if (nlh->nlmsg_flags & NLM_F_ACK)
netlink_ack(cb->skb, nlh, ret, NULL);
return ret;
}
}
if (!cb->args[IPSET_CB_DUMP])
return -EINVAL;
if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
goto out;
@ -1684,7 +1684,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
{
struct netlink_dump_control c = {
.dump = ip_set_dump_start,
.start = ip_set_dump_start,
.dump = ip_set_dump_do,
.done = ip_set_dump_done,
};
return netlink_dump_start(ctnl, skb, nlh, &c);

View File

@ -2248,8 +2248,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
GFP_KERNEL | __GFP_ZERO);
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)

View File

@ -529,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
nf_flow_table_offload_flush(flowtable);
}
void nf_flow_table_cleanup(struct net_device *dev)
@ -553,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
nf_flow_table_offload_flush(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);

View File

@ -675,6 +675,7 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
{
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
static void flow_offload_tuple_stats(struct flow_offload_work *offload,

View File

@ -939,14 +939,14 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
*
* @size: number of entries
*
* Return: NULL or kmalloc'd or vmalloc'd array
* Return: NULL or zeroed kmalloc'd or vmalloc'd array
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
return NULL;
return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);