xfrm: state: use atomic_inc_not_zero to increment refcount

Once xfrm_state_lookup_byaddr no longer acquires the state lock another
cpu might be freeing the state entry at the same time.

To detect this we use atomic_inc_not_zero, we then signal -EAGAIN to
caller in case our result was stale.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
This commit is contained in:
Florian Westphal 2016-08-09 12:16:05 +02:00 committed by Steffen Klassert
parent ae3fb6d321
commit 02efdff7e2

View file

@ -37,6 +37,11 @@
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
return atomic_inc_not_zero(&x->refcnt);
}
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@ -668,7 +673,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
xfrm_state_hold(x);
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
@ -692,7 +698,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
xfrm_state_hold(x);
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
@ -871,10 +878,14 @@ found:
}
}
out:
if (x)
xfrm_state_hold(x);
else
if (x) {
if (!xfrm_state_hold_rcu(x)) {
*err = -EAGAIN;
x = NULL;
}
} else {
*err = acquire_in_progress ? -EAGAIN : error;
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (to_put)
xfrm_state_put(to_put);