pkt_sched: gen_estimator: add a new lock

gen_kill_estimator() / gen_new_estimator() is not always called with
RTNL held.

net/netfilter/xt_RATEEST.c is one user of these API that do not hold
RTNL, so random corruptions can occur between "tc" and "iptables".

Add a new fine grained lock instead of trying to use RTNL in netfilter.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-06-08 23:39:10 +00:00 committed by David S. Miller
parent 597a264b1a
commit ae638c47dc

View file

@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */ /* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT; static struct rb_root est_root = RB_ROOT;
static DEFINE_SPINLOCK(est_tree_lock);
static void est_timer(unsigned long arg) static void est_timer(unsigned long arg)
{ {
@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* *
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
* *
* NOTE: Called under rtnl_mutex
*/ */
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est, struct gnet_stats_rate_est *rate_est,
@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->last_packets = bstats->packets; est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10; est->avpps = rate_est->pps<<10;
spin_lock(&est_tree_lock);
if (!elist[idx].timer.function) { if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list); INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx); setup_timer(&elist[idx].timer, est_timer, idx);
@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
list_add_rcu(&est->list, &elist[idx].list); list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est); gen_add_node(est);
spin_unlock(&est_tree_lock);
return 0; return 0;
} }
@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
* *
* Removes the rate estimator specified by &bstats and &rate_est. * Removes the rate estimator specified by &bstats and &rate_est.
* *
* NOTE: Called under rtnl_mutex
*/ */
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est) struct gnet_stats_rate_est *rate_est)
{ {
struct gen_estimator *e; struct gen_estimator *e;
spin_lock(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) { while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root); rb_erase(&e->node, &est_root);
@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
list_del_rcu(&e->list); list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator); call_rcu(&e->e_rcu, __gen_kill_estimator);
} }
spin_unlock(&est_tree_lock);
} }
EXPORT_SYMBOL(gen_kill_estimator); EXPORT_SYMBOL(gen_kill_estimator);
@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est) const struct gnet_stats_rate_est *rate_est)
{ {
bool res;
ASSERT_RTNL(); ASSERT_RTNL();
return gen_find_node(bstats, rate_est) != NULL; spin_lock(&est_tree_lock);
res = gen_find_node(bstats, rate_est) != NULL;
spin_unlock(&est_tree_lock);
return res;
} }
EXPORT_SYMBOL(gen_estimator_active); EXPORT_SYMBOL(gen_estimator_active);