1
0
Fork 0

netfilter: fix the race when initializing nf_ct_expect_hash_rnd

Since nf_ct_expect_dst_hash() may be called without nf_conntrack_lock
locked, nf_ct_expect_hash_rnd should be initialized in the atomic way.

In this patch, we use nf_conntrack_hash_rnd instead of
nf_ct_expect_hash_rnd.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Changli Gao 2011-01-05 04:23:23 +00:00 committed by David S. Miller
parent 6623e3b24a
commit f682cefa5a
3 changed files with 22 additions and 20 deletions

View File

@ -298,6 +298,8 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
#define NF_CT_STAT_INC(net, count) \
__this_cpu_inc((net)->ct.stat->count)

View File

@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static unsigned int nf_conntrack_hash_rnd __read_mostly;
unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
{
@ -596,6 +596,21 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
}
void init_nf_conntrack_hash_rnd(void)
{
unsigned int rand;
/*
* Why not initialize nf_conntrack_rnd in a "init()" function ?
* Because there isn't enough entropy when system initializing,
* and we initialize it as late as possible.
*/
do {
get_random_bytes(&rand, sizeof(rand));
} while (!rand);
cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
}
static struct nf_conn *
__nf_conntrack_alloc(struct net *net, u16 zone,
const struct nf_conntrack_tuple *orig,
@ -605,18 +620,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
struct nf_conn *ct;
if (unlikely(!nf_conntrack_hash_rnd)) {
unsigned int rand;
/*
* Why not initialize nf_conntrack_rnd in a "init()" function ?
* Because there isn't enough entropy when system initializing,
* and we initialize it as late as possible.
*/
do {
get_random_bytes(&rand, sizeof(rand));
} while (!rand);
cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
init_nf_conntrack_hash_rnd();
/* recompute the hash as nf_conntrack_hash_rnd is initialized */
hash = hash_conntrack_raw(orig, zone);
}

View File

@ -32,9 +32,7 @@
unsigned int nf_ct_expect_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
static unsigned int nf_ct_expect_hash_rnd __read_mostly;
unsigned int nf_ct_expect_max __read_mostly;
static int nf_ct_expect_hash_rnd_initted __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
@ -77,15 +75,13 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
{
unsigned int hash;
if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
get_random_bytes(&nf_ct_expect_hash_rnd,
sizeof(nf_ct_expect_hash_rnd));
nf_ct_expect_hash_rnd_initted = 1;
if (unlikely(!nf_conntrack_hash_rnd)) {
init_nf_conntrack_hash_rnd();
}
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
(__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
(__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
return ((u64)hash * nf_ct_expect_hsize) >> 32;
}