1
0
Fork 0

net: sched: take reference to action dev before calling offloads

In order to remove dependency on rtnl lock when calling hardware offload
API, take reference to action mirred dev when initializing flow_action
structure in tc_setup_flow_action(). Implement function
tc_cleanup_flow_action(), use it to release the device after hardware
offload API is done using it.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
alistair/sunxi64-5.4-dsi
Vlad Buslov 2019-08-26 16:45:04 +03:00 committed by David S. Miller
parent 9838b20a7f
commit 5a6ff4b13d
3 changed files with 36 additions and 0 deletions

View File

@ -505,6 +505,8 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts, bool rtnl_held);
void tc_cleanup_flow_action(struct flow_action *flow_action);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop, bool rtnl_held);
int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,

View File

@ -3265,6 +3265,27 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
}
EXPORT_SYMBOL(tc_setup_cb_reoffload);
void tc_cleanup_flow_action(struct flow_action *flow_action)
{
struct flow_action_entry *entry;
int i;
flow_action_for_each(i, entry, flow_action) {
switch (entry->id) {
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_MIRRED_INGRESS:
if (entry->dev)
dev_put(entry->dev);
break;
default:
break;
}
}
}
EXPORT_SYMBOL(tc_cleanup_flow_action);
int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts, bool rtnl_held)
{
@ -3294,15 +3315,23 @@ int tc_setup_flow_action(struct flow_action *flow_action,
} else if (is_tcf_mirred_egress_redirect(act)) {
entry->id = FLOW_ACTION_REDIRECT;
entry->dev = tcf_mirred_dev(act);
if (entry->dev)
dev_hold(entry->dev);
} else if (is_tcf_mirred_egress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED;
entry->dev = tcf_mirred_dev(act);
if (entry->dev)
dev_hold(entry->dev);
} else if (is_tcf_mirred_ingress_redirect(act)) {
entry->id = FLOW_ACTION_REDIRECT_INGRESS;
entry->dev = tcf_mirred_dev(act);
if (entry->dev)
dev_hold(entry->dev);
} else if (is_tcf_mirred_ingress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED_INGRESS;
entry->dev = tcf_mirred_dev(act);
if (entry->dev)
dev_hold(entry->dev);
} else if (is_tcf_vlan(act)) {
switch (tcf_vlan_action(act)) {
case TCA_VLAN_ACT_PUSH:
@ -3410,6 +3439,9 @@ err_out:
if (!rtnl_held)
rtnl_unlock();
if (err)
tc_cleanup_flow_action(flow_action);
return err;
}
EXPORT_SYMBOL(tc_setup_flow_action);

View File

@ -465,6 +465,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
skip_sw, &f->flags, &f->in_hw_count, true);
tc_cleanup_flow_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {
@ -1838,6 +1839,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_SETUP_CLSFLOWER, &cls_flower,
cb_priv, &f->flags,
&f->in_hw_count);
tc_cleanup_flow_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {