1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following batch contains netfilter updates for net-next. Basically,
enhancements for xt_recent, skip zeroing of timer in conntrack, fix
linking problem with recent redirect support for nf_tables, ipset
updates and a couple of cleanups. More specifically, they are:

1) Rise maximum number per IP address to be remembered in xt_recent
   while retaining backward compatibility, from Florian Westphal.

2) Skip zeroing timer area in nf_conn objects, also from Florian.

3) Inspect IPv4 and IPv6 traffic from the bridge to allow filtering using
   using meta l4proto and transport layer header, from Alvaro Neira.

4) Fix linking problems in the new redirect support when CONFIG_IPV6=n
   and IP6_NF_IPTABLES=n.

And ipset updates from Jozsef Kadlecsik:

5) Support updating element extensions when the set is full (fixes
   netfilter bugzilla id 880).

6) Fix set match with 32-bits userspace / 64-bits kernel.

7) Indicate explicitly when /0 networks are supported in ipset.

8) Simplify cidr handling for hash:*net* types.

9) Allocate the proper size of memory when /0 networks are supported.

10) Explicitly add padding elements to hash:net,net and hash:net,port,
    because the elements must be u32 sized for the used hash function.

Jozsef is also cooking ipset RCU conversion which should land soon if
they reach the merge window in time.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2014-12-05 20:56:46 -08:00
commit 244ebd9f8f
28 changed files with 372 additions and 254 deletions

View File

@ -1,9 +0,0 @@
#ifndef _NF_NAT_REDIRECT_IPV4_H_
#define _NF_NAT_REDIRECT_IPV4_H_
unsigned int
nf_nat_redirect_ipv4(struct sk_buff *skb,
const struct nf_nat_ipv4_multi_range_compat *mr,
unsigned int hooknum);
#endif /* _NF_NAT_REDIRECT_IPV4_H_ */

View File

@ -1,8 +0,0 @@
#ifndef _NF_NAT_REDIRECT_IPV6_H_
#define _NF_NAT_REDIRECT_IPV6_H_
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
unsigned int hooknum);
#endif /* _NF_NAT_REDIRECT_IPV6_H_ */

View File

@ -92,12 +92,18 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
/* If we were expected by an expectation, this will be it */
struct nf_conn *master;
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
/* If we were expected by an expectation, this will be it */
struct nf_conn *master;
#if defined(CONFIG_NF_CONNTRACK_MARK)
u_int32_t mark;
#endif
@ -108,9 +114,6 @@ struct nf_conn {
/* Extensions */
struct nf_ct_ext *ext;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;

View File

@ -0,0 +1,12 @@
#ifndef _NF_NAT_REDIRECT_H_
#define _NF_NAT_REDIRECT_H_
unsigned int
nf_nat_redirect_ipv4(struct sk_buff *skb,
const struct nf_nat_ipv4_multi_range_compat *mr,
unsigned int hooknum);
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
unsigned int hooknum);
#endif /* _NF_NAT_REDIRECT_H_ */

View File

@ -0,0 +1,7 @@
#ifndef _NET_NF_TABLES_BRIDGE_H
#define _NET_NF_TABLES_BRIDGE_H
int nft_bridge_iphdr_validate(struct sk_buff *skb);
int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
#endif /* _NET_NF_TABLES_BRIDGE_H */

View File

@ -256,11 +256,17 @@ enum {
IPSET_COUNTER_GT,
};
struct ip_set_counter_match {
/* Backward compatibility for set match v3 */
struct ip_set_counter_match0 {
__u8 op;
__u64 value;
};
struct ip_set_counter_match {
__aligned_u64 value;
__u8 op;
};
/* Interface to iptables/ip6tables */
#define SO_IP_SET 83

View File

@ -66,8 +66,8 @@ struct xt_set_info_target_v2 {
struct xt_set_info_match_v3 {
struct xt_set_info match_set;
struct ip_set_counter_match packets;
struct ip_set_counter_match bytes;
struct ip_set_counter_match0 packets;
struct ip_set_counter_match0 bytes;
__u32 flags;
};
@ -81,4 +81,13 @@ struct xt_set_info_target_v3 {
__u32 timeout;
};
/* Revision 4 match */
struct xt_set_info_match_v4 {
struct xt_set_info match_set;
struct ip_set_counter_match packets;
struct ip_set_counter_match bytes;
__u32 flags;
};
#endif /*_XT_SET_H*/

View File

@ -13,6 +13,82 @@
#include <linux/module.h>
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_bridge.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
int nft_bridge_iphdr_validate(struct sk_buff *skb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (skb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(skb, iph->ihl*4))
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(nft_bridge_iphdr_validate);
int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out)
{
if (nft_bridge_iphdr_validate(skb))
nft_set_pktinfo_ipv4(pkt, ops, skb, in, out);
else
nft_set_pktinfo(pkt, ops, skb, in, out);
}
static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out)
{
#if IS_ENABLED(CONFIG_IPV6)
if (nft_bridge_ip6hdr_validate(skb) &&
nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0)
return;
#endif
nft_set_pktinfo(pkt, ops, skb, in, out);
}
static unsigned int
nft_do_chain_bridge(const struct nf_hook_ops *ops,
@ -23,7 +99,17 @@ nft_do_chain_bridge(const struct nf_hook_ops *ops,
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, ops, skb, in, out);
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
break;
case htons(ETH_P_IPV6):
nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
break;
default:
nft_set_pktinfo(&pkt, ops, skb, in, out);
break;
}
return nft_do_chain(&pkt, ops);
}

View File

@ -14,6 +14,7 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/nf_tables_bridge.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
@ -35,30 +36,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
skb_pull(nskb, ETH_HLEN);
}
static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(oldskb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (oldskb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(oldskb, iph->ihl*4))
return 0;
return 1;
}
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
@ -66,7 +43,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
const struct tcphdr *oth;
struct tcphdr _oth;
if (!nft_reject_iphdr_validate(oldskb))
if (!nft_bridge_iphdr_validate(oldskb))
return;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
@ -101,7 +78,7 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
void *payload;
__wsum csum;
if (!nft_reject_iphdr_validate(oldskb))
if (!nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
@ -146,25 +123,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(oldskb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
return 0;
return 1;
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb, int hook)
{
@ -174,7 +132,7 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
unsigned int otcplen;
struct ipv6hdr *nip6h;
if (!nft_reject_ip6hdr_validate(oldskb))
if (!nft_bridge_ip6hdr_validate(oldskb))
return;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
@ -207,7 +165,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
unsigned int len;
void *payload;
if (!nft_reject_ip6hdr_validate(oldskb))
if (!nft_bridge_ip6hdr_validate(oldskb))
return;
/* Include "As much of invoking packet as possible without the ICMPv6

View File

@ -104,12 +104,6 @@ config NF_NAT_MASQUERADE_IPV4
This is the kernel functionality to provide NAT in the masquerade
flavour (automatic source address selection).
config NF_NAT_REDIRECT_IPV4
tristate "IPv4 redirect support"
help
This is the kernel functionality to provide NAT in the redirect
flavour (redirect packets to local machine).
config NFT_MASQ_IPV4
tristate "IPv4 masquerading support for nf_tables"
depends on NF_TABLES_IPV4
@ -123,7 +117,7 @@ config NFT_REDIR_IPV4
tristate "IPv4 redirect support for nf_tables"
depends on NF_TABLES_IPV4
depends on NFT_REDIR
select NF_NAT_REDIRECT_IPV4
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.

View File

@ -31,7 +31,6 @@ obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
obj-$(CONFIG_NF_NAT_REDIRECT_IPV4) += nf_nat_redirect_ipv4.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o

View File

@ -14,7 +14,7 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/ipv4/nf_nat_redirect.h>
#include <net/netfilter/nf_nat_redirect.h>
#include <net/netfilter/nft_redir.h>
static void nft_redir_ipv4_eval(const struct nft_expr *expr,

View File

@ -82,12 +82,6 @@ config NF_NAT_MASQUERADE_IPV6
This is the kernel functionality to provide NAT in the masquerade
flavour (automatic source address selection) for IPv6.
config NF_NAT_REDIRECT_IPV6
tristate "IPv6 redirect support"
help
This is the kernel functionality to provide NAT in the redirect
flavour (redirect packet to local machine) for IPv6.
config NFT_MASQ_IPV6
tristate "IPv6 masquerade support for nf_tables"
depends on NF_TABLES_IPV6
@ -101,7 +95,7 @@ config NFT_REDIR_IPV6
tristate "IPv6 redirect support for nf_tables"
depends on NF_TABLES_IPV6
depends on NFT_REDIR
select NF_NAT_REDIRECT_IPV6
select NF_NAT_REDIRECT
help
This is the expression that provides IPv4 redirect support for
nf_tables.

View File

@ -19,7 +19,6 @@ obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
obj-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o
obj-$(CONFIG_NF_NAT_REDIRECT_IPV6) += nf_nat_redirect_ipv6.o
# defrag
nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o

View File

@ -422,6 +422,6 @@ module_init(nf_log_ipv6_init);
module_exit(nf_log_ipv6_exit);
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
MODULE_DESCRIPTION("Netfilter IPv6 packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);

View File

@ -1,75 +0,0 @@
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
* NAT funded by Astaro.
*/
#include <linux/if.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/types.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/addrconf.h>
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/ipv6/nf_nat_redirect.h>
static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
unsigned int hooknum)
{
struct nf_nat_range newrange;
struct in6_addr newdst;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (hooknum == NF_INET_LOCAL_OUT) {
newdst = loopback_addr;
} else {
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
bool addr = false;
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
list_for_each_entry(ifa, &idev->addr_list, if_list) {
newdst = ifa->addr;
addr = true;
break;
}
}
rcu_read_unlock();
if (!addr)
return NF_DROP;
}
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.in6 = newdst;
newrange.max_addr.in6 = newdst;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");

View File

@ -15,7 +15,7 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nft_redir.h>
#include <net/netfilter/ipv6/nf_nat_redirect.h>
#include <net/netfilter/nf_nat_redirect.h>
static void nft_redir_ipv6_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],

View File

@ -411,6 +411,13 @@ config NF_NAT_TFTP
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
config NF_NAT_REDIRECT
tristate "IPv4/IPv6 redirect support"
depends on NF_NAT
help
This is the kernel functionality to redirect packets to local
machine through NAT.
config NETFILTER_SYNPROXY
tristate
@ -844,8 +851,7 @@ config NETFILTER_XT_TARGET_RATEEST
config NETFILTER_XT_TARGET_REDIRECT
tristate "REDIRECT target support"
depends on NF_NAT
select NF_NAT_REDIRECT_IPV4 if NF_NAT_IPV4
select NF_NAT_REDIRECT_IPV6 if NF_NAT_IPV6
select NF_NAT_REDIRECT
---help---
REDIRECT is a special case of NAT: all incoming connections are
mapped onto the incoming interface's address, causing the packets to

View File

@ -51,6 +51,7 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
obj-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o

View File

@ -147,16 +147,22 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#else
#define __CIDR(cidr, i) (cidr)
#endif
/* cidr + 1 is stored in net_prefixes to support /0 */
#define SCIDR(cidr, i) (__CIDR(cidr, i) + 1)
#ifdef IP_SET_HASH_WITH_NETS_PACKED
/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
#define CIDR(cidr, i) (__CIDR(cidr, i) + 1)
/* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
#define GCIDR(cidr, i) (__CIDR(cidr, i) + 1)
#define NCIDR(cidr) (cidr)
#else
#define CIDR(cidr, i) (__CIDR(cidr, i))
#define GCIDR(cidr, i) (__CIDR(cidr, i))
#define NCIDR(cidr) (cidr - 1)
#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_MULTI
#ifdef IP_SET_HASH_WITH_NET0
#define NLEN(family) (SET_HOST_MASK(family) + 1)
#else
#define NLEN(family) SET_HOST_MASK(family)
@ -292,24 +298,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
int i, j;
/* Add in increasing prefix order, so larger cidr first */
for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
for (i = 0, j = -1; i < nets_length && h->nets[i].cidr[n]; i++) {
if (j != -1)
continue;
else if (h->nets[i].cidr[n] < cidr)
j = i;
else if (h->nets[i].cidr[n] == cidr) {
h->nets[i].nets[n]++;
h->nets[cidr - 1].nets[n]++;
return;
}
}
if (j != -1) {
for (; i > j; i--) {
for (; i > j; i--)
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
h->nets[i].nets[n] = h->nets[i - 1].nets[n];
}
}
h->nets[i].cidr[n] = cidr;
h->nets[i].nets[n] = 1;
h->nets[cidr - 1].nets[n] = 1;
}
static void
@ -320,16 +324,12 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
if (h->nets[i].nets[n] > 1 || i == net_end ||
h->nets[i + 1].nets[n] == 0) {
h->nets[i].nets[n]--;
h->nets[cidr -1].nets[n]--;
if (h->nets[cidr -1].nets[n] > 0)
return;
}
for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
h->nets[j].nets[n] = h->nets[j + 1].nets[n];
}
h->nets[j].nets[n] = 0;
h->nets[j].cidr[n] = 0;
return;
}
}
@ -486,7 +486,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
for (k = 0; k < IPSET_NET_COUNT; k++)
mtype_del_cidr(h, CIDR(data->cidr, k),
mtype_del_cidr(h, SCIDR(data->cidr, k),
nets_length, k);
#endif
ip_set_ext_destroy(set, data);
@ -633,29 +633,6 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set)) {
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t,key);
if (n->pos) {
/* Choosing the first entry in the array to replace */
j = 0;
goto reuse_slot;
}
rcu_read_unlock_bh();
}
if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
mtype_expire(set, h, NLEN(set->family), set->dsize);
if (h->elements >= h->maxelem) {
if (net_ratelimit())
pr_warn("Set %s is full, maxelem %u reached\n",
set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
}
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
@ -680,15 +657,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
j != AHASH_MAX(h) + 1)
j = i;
}
if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) {
/* Choosing the first entry in the array to replace */
j = 0;
goto reuse_slot;
}
if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
mtype_expire(set, h, NLEN(set->family), set->dsize);
if (h->elements >= h->maxelem) {
if (net_ratelimit())
pr_warn("Set %s is full, maxelem %u reached\n",
set->name, h->maxelem);
ret = -IPSET_ERR_HASH_FULL;
goto out;
}
reuse_slot:
if (j != AHASH_MAX(h) + 1) {
/* Fill out reused slot */
data = ahash_data(n, j, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++) {
mtype_del_cidr(h, CIDR(data->cidr, i),
mtype_del_cidr(h, SCIDR(data->cidr, i),
NLEN(set->family), i);
mtype_add_cidr(h, CIDR(d->cidr, i),
mtype_add_cidr(h, SCIDR(d->cidr, i),
NLEN(set->family), i);
}
#endif
@ -705,7 +699,7 @@ reuse_slot:
data = ahash_data(n, n->pos++, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family),
i);
#endif
h->elements++;
@ -766,7 +760,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
for (j = 0; j < IPSET_NET_COUNT; j++)
mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family),
j);
#endif
ip_set_ext_destroy(set, data);
@ -827,15 +821,15 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
u8 nets_length = NLEN(set->family);
pr_debug("test by nets\n");
for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
for (; j < nets_length && h->nets[j].cidr[0] && !multi; j++) {
#if IPSET_NET_COUNT == 2
mtype_data_reset_elem(d, &orig);
mtype_data_netmask(d, h->nets[j].cidr[0], false);
for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]), false);
for (k = 0; k < nets_length && h->nets[k].cidr[1] && !multi;
k++) {
mtype_data_netmask(d, h->nets[k].cidr[1], true);
mtype_data_netmask(d, NCIDR(h->nets[k].cidr[1]), true);
#else
mtype_data_netmask(d, h->nets[j].cidr[0]);
mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]));
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
@ -883,7 +877,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* If we test an IP address and not a network address,
* try all possible network sizes */
for (i = 0; i < IPSET_NET_COUNT; i++)
if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family))
break;
if (i == IPSET_NET_COUNT) {
ret = mtype_test_cidrs(set, d, ext, mext, flags);
@ -1107,8 +1101,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
hsize = sizeof(*h);
#ifdef IP_SET_HASH_WITH_NETS
hsize += sizeof(struct net_prefixes) *
(set->family == NFPROTO_IPV4 ? 32 : 128);
hsize += sizeof(struct net_prefixes) * NLEN(set->family);
#endif
h = kzalloc(hsize, GFP_KERNEL);
if (!h)

View File

@ -115,6 +115,7 @@ iface_add(struct rb_root *root, const char **iface)
#define IP_SET_HASH_WITH_NETS
#define IP_SET_HASH_WITH_RBTREE
#define IP_SET_HASH_WITH_MULTI
#define IP_SET_HASH_WITH_NET0
#define STREQ(a, b) (strcmp(a, b) == 0)

View File

@ -46,6 +46,7 @@ struct hash_netnet4_elem {
__be64 ipcmp;
};
u8 nomatch;
u8 padding;
union {
u8 cidr[2];
u16 ccmp;
@ -271,6 +272,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netnet6_elem {
union nf_inet_addr ip[2];
u8 nomatch;
u8 padding;
union {
u8 cidr[2];
u16 ccmp;

View File

@ -53,6 +53,7 @@ struct hash_netportnet4_elem {
u8 cidr[2];
u16 ccmp;
};
u16 padding;
u8 nomatch:1;
u8 proto;
};
@ -324,6 +325,7 @@ struct hash_netportnet6_elem {
u8 cidr[2];
u16 ccmp;
};
u16 padding;
u8 nomatch:1;
u8 proto;
};

View File

@ -824,22 +824,19 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
}
/*
* Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
*/
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
offsetof(struct nf_conn, proto) -
offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset[0], 0,
offsetof(struct nf_conn, proto) -
offsetof(struct nf_conn, __nfct_init_offset[0]));
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;

View File

@ -20,12 +20,13 @@
#include <linux/netfilter.h>
#include <linux/types.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <net/addrconf.h>
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/ipv4/nf_nat_redirect.h>
#include <net/netfilter/nf_nat_redirect.h>
unsigned int
nf_nat_redirect_ipv4(struct sk_buff *skb,
@ -78,5 +79,49 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
unsigned int hooknum)
{
struct nf_nat_range newrange;
struct in6_addr newdst;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (hooknum == NF_INET_LOCAL_OUT) {
newdst = loopback_addr;
} else {
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
bool addr = false;
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
list_for_each_entry(ifa, &idev->addr_list, if_list) {
newdst = ifa->addr;
addr = true;
break;
}
}
rcu_read_unlock();
if (!addr)
return NF_DROP;
}
newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
newrange.min_addr.in6 = newdst;
newrange.max_addr.in6 = newdst;
newrange.min_proto = range->min_proto;
newrange.max_proto = range->max_proto;
return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");

View File

@ -26,8 +26,7 @@
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/ipv4/nf_nat_redirect.h>
#include <net/netfilter/ipv6/nf_nat_redirect.h>
#include <net/netfilter/nf_nat_redirect.h>
static unsigned int
redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)

View File

@ -43,25 +43,29 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
static unsigned int ip_list_tot = 100;
static unsigned int ip_pkt_list_tot = 20;
static unsigned int ip_list_hash_size = 0;
static unsigned int ip_list_perms = 0644;
static unsigned int ip_list_uid = 0;
static unsigned int ip_list_gid = 0;
static unsigned int ip_list_tot __read_mostly = 100;
static unsigned int ip_list_hash_size __read_mostly;
static unsigned int ip_list_perms __read_mostly = 0644;
static unsigned int ip_list_uid __read_mostly;
static unsigned int ip_list_gid __read_mostly;
module_param(ip_list_tot, uint, 0400);
module_param(ip_pkt_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
/* retained for backwards compatibility */
static unsigned int ip_pkt_list_tot __read_mostly;
module_param(ip_pkt_list_tot, uint, 0400);
MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
#define XT_RECENT_MAX_NSTAMPS 256
struct recent_entry {
struct list_head list;
struct list_head lru_list;
@ -79,6 +83,7 @@ struct recent_table {
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
u8 nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[0];
};
@ -90,7 +95,8 @@ struct recent_net {
#endif
};
static int recent_net_id;
static int recent_net_id __read_mostly;
static inline struct recent_net *recent_pernet(struct net *net)
{
return net_generic(net, recent_net_id);
@ -171,12 +177,15 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
{
struct recent_entry *e;
unsigned int nstamps_max = t->nstamps_max_mask;
if (t->entries >= ip_list_tot) {
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
recent_entry_remove(t, e);
}
e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot,
nstamps_max += 1;
e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max,
GFP_ATOMIC);
if (e == NULL)
return NULL;
@ -197,7 +206,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
e->index %= ip_pkt_list_tot;
e->index &= t->nstamps_max_mask;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
@ -326,6 +335,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
kuid_t uid;
kgid_t gid;
#endif
unsigned int nstamp_mask;
unsigned int i;
int ret = -EINVAL;
size_t sz;
@ -349,19 +359,33 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
return -EINVAL;
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
return -EINVAL;
if (info->hit_count > ip_pkt_list_tot) {
pr_info("hitcount (%u) is larger than "
"packets to be remembered (%u)\n",
info->hit_count, ip_pkt_list_tot);
if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
return -EINVAL;
if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
else if (info->hit_count)
nstamp_mask = roundup_pow_of_two(info->hit_count) - 1;
else
nstamp_mask = 32 - 1;
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
if (info->hit_count > t->nstamps_max_mask) {
pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
info->hit_count, t->nstamps_max_mask + 1,
info->name);
ret = -EINVAL;
goto out;
}
t->refcnt++;
ret = 0;
goto out;
@ -377,6 +401,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
goto out;
}
t->refcnt = 1;
t->nstamps_max_mask = nstamp_mask;
memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
@ -497,9 +522,12 @@ static void recent_seq_stop(struct seq_file *s, void *v)
static int recent_seq_show(struct seq_file *seq, void *v)
{
const struct recent_entry *e = v;
struct recent_iter_state *st = seq->private;
const struct recent_table *t = st->table;
unsigned int i;
i = (e->index - 1) % ip_pkt_list_tot;
i = (e->index - 1) & t->nstamps_max_mask;
if (e->family == NFPROTO_IPV4)
seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u",
&e->addr.ip, e->ttl, e->stamps[i], e->index);
@ -717,7 +745,9 @@ static int __init recent_mt_init(void)
{
int err;
if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255)
BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS);
if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);

View File

@ -157,7 +157,7 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
/* Revision 3 match */
static bool
match_counter(u64 counter, const struct ip_set_counter_match *info)
match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
{
switch (info->op) {
case IPSET_COUNTER_NONE:
@ -182,6 +182,52 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
info->match_set.flags, info->flags, UINT_MAX);
int ret;
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
ret = match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH);
if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
return ret;
if (!match_counter0(opt.ext.packets, &info->packets))
return 0;
return match_counter0(opt.ext.bytes, &info->bytes);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
/* Revision 4 match */
static bool
match_counter(u64 counter, const struct ip_set_counter_match *info)
{
switch (info->op) {
case IPSET_COUNTER_NONE:
return true;
case IPSET_COUNTER_EQ:
return counter == info->value;
case IPSET_COUNTER_NE:
return counter != info->value;
case IPSET_COUNTER_LT:
return counter < info->value;
case IPSET_COUNTER_GT:
return counter > info->value;
}
return false;
}
static bool
set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v4 *info = par->matchinfo;
ADT_OPT(opt, par->family, info->match_set.dim,
info->match_set.flags, info->flags, UINT_MAX);
int ret;
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
@ -197,8 +243,8 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
return match_counter(opt.ext.bytes, &info->bytes);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
#define set_match_v4_checkentry set_match_v1_checkentry
#define set_match_v4_destroy set_match_v1_destroy
/* Revision 0 interface: backward compatible with netfilter/iptables */
@ -573,6 +619,27 @@ static struct xt_match set_matches[] __read_mostly = {
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
/* new revision for counters support: update, match */
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 4,
.match = set_match_v4,
.matchsize = sizeof(struct xt_set_info_match_v4),
.checkentry = set_match_v4_checkentry,
.destroy = set_match_v4_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 4,
.match = set_match_v4,
.matchsize = sizeof(struct xt_set_info_match_v4),
.checkentry = set_match_v4_checkentry,
.destroy = set_match_v4_destroy,
.me = THIS_MODULE
},
};
static struct xt_target set_targets[] __read_mostly = {