1
0
Fork 0

net_sched: change tcf_del_walker() to take idrinfo->lock

Action API was changed to work with actions and action_idr in concurrency
safe manner, however tcf_del_walker() still uses actions without taking a
reference or idrinfo->lock first, and deletes them directly, disregarding
possible concurrent delete.

Change tcf_del_walker() to take idrinfo->lock while iterating over actions
and use new tcf_idr_release_unsafe() to release them while holding the
lock.

And the blocking function fl_hw_destroy_tmplt() could be called when we
put a filter chain, so defer it to a work queue.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
[xiyou.wangcong@gmail.com: heavily modify the code and changelog]
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Vlad Buslov 2018-09-19 16:37:29 -07:00 committed by David S. Miller
parent 947e326c45
commit ec3ed293e7
2 changed files with 30 additions and 3 deletions

View File

@ -246,6 +246,20 @@ nla_put_failure:
goto done;
}
static int tcf_idr_release_unsafe(struct tc_action *p)
{
if (atomic_read(&p->tcfa_bindcnt) > 0)
return -EPERM;
if (refcount_dec_and_test(&p->tcfa_refcnt)) {
idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
tcf_action_cleanup(p);
return ACT_P_DELETED;
}
return 0;
}
static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
const struct tc_action_ops *ops)
{
@ -262,15 +276,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure;
spin_lock(&idrinfo->lock);
idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true);
ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
n_i++;
} else if (ret < 0) {
spin_unlock(&idrinfo->lock);
goto nla_put_failure;
}
}
spin_unlock(&idrinfo->lock);
if (nla_put_u32(skb, TCA_FCNT, n_i))
goto nla_put_failure;
nla_nest_end(skb, nest);

View File

@ -79,6 +79,7 @@ struct fl_flow_tmplt {
struct fl_flow_key mask;
struct flow_dissector dissector;
struct tcf_chain *chain;
struct rcu_work rwork;
};
struct cls_fl_head {
@ -1437,12 +1438,20 @@ errout_tb:
return ERR_PTR(err);
}
static void fl_tmplt_destroy_work(struct work_struct *work)
{
struct fl_flow_tmplt *tmplt = container_of(to_rcu_work(work),
struct fl_flow_tmplt, rwork);
fl_hw_destroy_tmplt(tmplt->chain, tmplt);
kfree(tmplt);
}
static void fl_tmplt_destroy(void *tmplt_priv)
{
struct fl_flow_tmplt *tmplt = tmplt_priv;
fl_hw_destroy_tmplt(tmplt->chain, tmplt);
kfree(tmplt);
tcf_queue_work(&tmplt->rwork, fl_tmplt_destroy_work);
}
static int fl_dump_key_val(struct sk_buff *skb,