nfp: bpf: don't allow changing MTU above BPF offload limit when active

When BPF offload is active we need may need to restrict the MTU
changes more than just to the limitation of the kernel XDP datapath.
Allow the BPF code to veto a MTU change.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Jakub Kicinski 2018-01-10 12:25:57 +00:00 committed by Daniel Borkmann
parent c4f7730be5
commit ccbdc596f4
4 changed files with 46 additions and 0 deletions

View file

@ -194,6 +194,24 @@ static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
}
static int
nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
unsigned int max_mtu;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (new_mtu > max_mtu) {
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
max_mtu);
return -EBUSY;
}
return 0;
}
static int
nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
u32 length)
@ -311,6 +329,8 @@ const struct nfp_app_type app_bpf = {
.init = nfp_bpf_init,
.clean = nfp_bpf_clean,
.change_mtu = nfp_bpf_change_mtu,
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc,

View file

@ -82,6 +82,8 @@ extern const struct nfp_app_type app_flower;
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
* @change_mtu: MTU change on a netdev has been requested (veto-only, change
* is not guaranteed to be committed)
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
@ -120,6 +122,9 @@ struct nfp_app_type {
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
int new_mtu);
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@ -242,6 +247,14 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
app->type->repr_clean(app, netdev);
}
static inline int
nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
if (!app || !app->type->change_mtu)
return 0;
return app->type->change_mtu(app, netdev, new_mtu);
}
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;

View file

@ -3049,6 +3049,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp;
int err;
err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
if (err)
return err;
dp = nfp_net_clone_dp(nn);
if (!dp)

View file

@ -186,6 +186,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_repr *repr = netdev_priv(netdev);
return nfp_app_change_mtu(repr->app, netdev, new_mtu);
}
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
@ -240,6 +247,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
.ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,