1
0
Fork 0

Merge branch 'net-support-binding-vlan-dev-link-state-to-vlan-member-bridge-ports'

Mike Manning says:

====================
net: support binding vlan dev link state to vlan member bridge ports

For vlan filtering on bridges, the bridge may also have vlan devices
as upper devices. For switches, these are used to provide L3 packet
processing for ports that are members of a given vlan.

While it is correct that the admin state for these vlan devices is
either set directly for the device or inherited from the lower device,
the link state is also transferred from the lower device. So this is
always up if the bridge is in admin up state and there is at least one
bridge port that is up, regardless of the vlan that the port is in.

The link state of the vlan device may need to track only the state of
the subset of ports that are also members of the corresponding vlan,
rather than that of all ports.

This series provides an optional vlan flag so that the link state of
the vlan device is only up if there is at least one bridge port that is
up AND is a member of the corresponding vlan.

v2:
   - Address review comments from Nikolay Aleksandrov
     in patches 3 & 4 and add patch 5 to address bridge link down due to STP
v3:
   - Address review comment from Nikolay Aleksandrov
     in patch 4 so as to remove unnecessary inline #ifdef
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.2
David S. Miller 2019-04-19 13:58:17 -07:00
commit 1ab839281c
7 changed files with 272 additions and 21 deletions

View File

@ -32,10 +32,11 @@ enum vlan_ioctl_cmds {
};
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
VLAN_FLAG_MVRP = 0x8,
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
VLAN_FLAG_MVRP = 0x8,
VLAN_FLAG_BRIDGE_BINDING = 0x10,
};
enum vlan_name_types {

View File

@ -75,6 +75,14 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg,
return 0;
}
static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev,
struct vlan_dev_priv *vlan)
{
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_stacked_transfer_operstate(rootdev, dev);
}
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@ -180,7 +188,7 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
/* Account for reference in struct vlan_dev_priv */
dev_hold(real_dev);
netif_stacked_transfer_operstate(real_dev, dev);
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
/* So, got the sucker initialized, now lets place
@ -399,7 +407,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
netif_stacked_transfer_operstate(dev, vlandev);
vlan_stacked_transfer_operstate(dev, vlandev,
vlan_dev_priv(vlandev));
break;
case NETDEV_CHANGEADDR:
@ -446,7 +455,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev_close_many(&close_list, false);
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
netif_stacked_transfer_operstate(dev, vlandev);
vlan_stacked_transfer_operstate(dev, vlandev,
vlan_dev_priv(vlandev));
list_del_init(&vlandev->close_list);
}
list_del(&close_list);
@ -463,7 +473,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs | IFF_UP,
extack);
netif_stacked_transfer_operstate(dev, vlandev);
vlan_stacked_transfer_operstate(dev, vlandev, vlan);
}
break;

View File

@ -223,7 +223,8 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
u32 old_flags = vlan->flags;
if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
VLAN_FLAG_BRIDGE_BINDING))
return -EINVAL;
vlan->flags = (old_flags & ~mask) | (flags & mask);
@ -296,7 +297,8 @@ static int vlan_dev_open(struct net_device *dev)
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_join(dev);
if (netif_carrier_ok(real_dev))
if (netif_carrier_ok(real_dev) &&
!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_carrier_on(dev);
return 0;
@ -326,7 +328,8 @@ static int vlan_dev_stop(struct net_device *dev)
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
netif_carrier_off(dev);
return 0;
}
@ -550,7 +553,8 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
netif_carrier_off(dev);
@ -561,6 +565,9 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
dev->state |= (1 << __LINK_STATE_NOCARRIER);
dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
NETIF_F_GSO_ENCAP_ALL |
@ -591,8 +598,7 @@ static int vlan_dev_init(struct net_device *dev)
#endif
dev->needed_headroom = real_dev->needed_headroom;
if (vlan_hw_offload_capable(real_dev->features,
vlan_dev_priv(dev)->vlan_proto)) {
if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
@ -606,8 +612,8 @@ static int vlan_dev_init(struct net_device *dev)
vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
return 0;

View File

@ -84,7 +84,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
flags = nla_data(data[IFLA_VLAN_FLAGS]);
if ((flags->flags & flags->mask) &
~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP)) {
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
VLAN_FLAG_BRIDGE_BINDING)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN flags");
return -EINVAL;
}

View File

@ -40,10 +40,13 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
br_sysfs_addbr(dev);
return NOTIFY_DONE;
if (dev->priv_flags & IFF_EBRIDGE) {
if (event == NETDEV_REGISTER) {
/* register of bridge completed, add sysfs entries */
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
br_vlan_bridge_event(dev, event, ptr);
}
/* not a port of a bridge */
@ -126,6 +129,8 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
break;
}
br_vlan_port_event(p, event);
/* Events that may cause spanning tree to refresh */
if (!notified && (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN))

View File

@ -321,6 +321,7 @@ enum net_bridge_opts {
BROPT_MTU_SET_BY_USER,
BROPT_VLAN_STATS_PER_PORT,
BROPT_NO_LL_LEARN,
BROPT_VLAN_BRIDGE_BINDING,
};
struct net_bridge {
@ -895,6 +896,9 @@ int nbp_vlan_init(struct net_bridge_port *port, struct netlink_ext_ack *extack);
int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
void br_vlan_get_stats(const struct net_bridge_vlan *v,
struct br_vlan_stats *stats);
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
void *ptr);
static inline struct net_bridge_vlan_group *br_vlan_group(
const struct net_bridge *br)
@ -1078,6 +1082,16 @@ static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
struct br_vlan_stats *stats)
{
}
static inline void br_vlan_port_event(struct net_bridge_port *p,
unsigned long event)
{
}
static inline void br_vlan_bridge_event(struct net_device *dev,
unsigned long event, void *ptr)
{
}
#endif
struct nf_br_ops {

View File

@ -7,6 +7,8 @@
#include "br_private.h"
#include "br_private_tunnel.h"
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
@ -293,6 +295,9 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
__vlan_add_list(v);
__vlan_add_flags(v, flags);
if (p)
nbp_vlan_set_vlan_dev_state(p, v->vid);
out:
return err;
@ -357,6 +362,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params);
__vlan_del_list(v);
nbp_vlan_set_vlan_dev_state(p, v->vid);
call_rcu(&v->rcu, nbp_vlan_rcu_free);
}
@ -1264,3 +1270,211 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid,
return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_info);
static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
{
return is_vlan_dev(dev) &&
!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
}
static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
__always_unused void *data)
{
return br_vlan_is_bind_vlan_dev(dev);
}
static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
{
int found;
rcu_read_lock();
found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
NULL);
rcu_read_unlock();
return !!found;
}
struct br_vlan_bind_walk_data {
u16 vid;
struct net_device *result;
};
static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
void *data_in)
{
struct br_vlan_bind_walk_data *data = data_in;
int found = 0;
if (br_vlan_is_bind_vlan_dev(dev) &&
vlan_dev_priv(dev)->vlan_id == data->vid) {
data->result = dev;
found = 1;
}
return found;
}
static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
{
struct br_vlan_bind_walk_data data = {
.vid = vid,
};
rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
&data);
rcu_read_unlock();
return data.result;
}
static bool br_vlan_is_dev_up(const struct net_device *dev)
{
return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
}
static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
struct net_device *vlan_dev)
{
u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
bool has_carrier = false;
if (!netif_carrier_ok(br->dev)) {
netif_carrier_off(vlan_dev);
return;
}
list_for_each_entry(p, &br->port_list, list) {
vg = nbp_vlan_group(p);
if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
has_carrier = true;
break;
}
}
if (has_carrier)
netif_carrier_on(vlan_dev);
else
netif_carrier_off(vlan_dev);
}
static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
{
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
struct net_bridge_vlan *vlan;
struct net_device *vlan_dev;
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
vlan->vid);
if (vlan_dev) {
if (br_vlan_is_dev_up(p->dev)) {
if (netif_carrier_ok(p->br->dev))
netif_carrier_on(vlan_dev);
} else {
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}
}
}
}
static void br_vlan_upper_change(struct net_device *dev,
struct net_device *upper_dev,
bool linking)
{
struct net_bridge *br = netdev_priv(dev);
if (!br_vlan_is_bind_vlan_dev(upper_dev))
return;
if (linking) {
br_vlan_set_vlan_dev_state(br, upper_dev);
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
} else {
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
br_vlan_has_upper_bind_vlan_dev(dev));
}
}
struct br_vlan_link_state_walk_data {
struct net_bridge *br;
};
static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
void *data_in)
{
struct br_vlan_link_state_walk_data *data = data_in;
if (br_vlan_is_bind_vlan_dev(vlan_dev))
br_vlan_set_vlan_dev_state(data->br, vlan_dev);
return 0;
}
static void br_vlan_link_state_change(struct net_device *dev,
struct net_bridge *br)
{
struct br_vlan_link_state_walk_data data = {
.br = br
};
rcu_read_lock();
netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
&data);
rcu_read_unlock();
}
/* Must be protected by RTNL. */
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
{
struct net_device *vlan_dev;
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
return;
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
if (vlan_dev)
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
}
/* Must be protected by RTNL. */
void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
void *ptr)
{
struct netdev_notifier_changeupper_info *info;
struct net_bridge *br;
switch (event) {
case NETDEV_CHANGEUPPER:
info = ptr;
br_vlan_upper_change(dev, info->upper_dev, info->linking);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
br = netdev_priv(dev);
if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
return;
br_vlan_link_state_change(dev, br);
break;
}
}
/* Must be protected by RTNL. */
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
{
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
return;
switch (event) {
case NETDEV_CHANGE:
case NETDEV_DOWN:
case NETDEV_UP:
br_vlan_set_all_vlan_dev_state(p);
break;
}
}