1
0
Fork 0

Merge branch 'sja1105-fixes'

Vladimir Oltean says:

====================
Fixes for SJA1105 DSA: FDBs, Learning and PTP

This is an assortment of functional fixes for the sja1105 switch driver
targeted for the "net" tree (although they apply on net-next just as
well).

Patch 1/5 ("net: dsa: sja1105: Fix broken learning with vlan_filtering
disabled") repairs a breakage introduced in the early development stages
of the driver: support for traffic from the CPU has broken "normal"
frame forwarding (based on DMAC) - there is connectivity through the
switch only because all frames are flooded.
I debated whether this patch qualifies as a fix, since it puts the
switch into a mode it has never operated in before (aka SVL). But
"normal" forwarding did use to work before the "Traffic support for
SJA1105 DSA driver" patchset, and arguably this patch should have been
part of that.
Also, it would be strange for this feature to be broken in the 5.2 LTS.

Patch 2/5 ("net: dsa: sja1105: Use the LOCKEDS bit for SJA1105 E/T as
well") is a simplification of a previous FDB-related patch that is
currently in the 5.3 rc's.

Patches 3/5 - 5/5 fix various crashes found while running linuxptp over the
switch ports for extended periods of time, or in conjunction with other
error conditions. The fixed-up commits were all introduced in 5.2.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
alistair/sunxi64-5.4-dsi
David S. Miller 2019-08-06 14:37:02 -07:00
commit feac1d6802
4 changed files with 75 additions and 98 deletions

View File

@ -277,6 +277,18 @@ sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
}
static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_l2_lookup_entry *entry = entry_ptr;
u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
}
static void
sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@ -477,7 +489,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
/* SJA1105E/T: First generation */
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_L2_LOOKUP] = {
.entry_packing = sja1105et_l2_lookup_entry_packing,
.entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,

View File

@ -218,7 +218,7 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
/* This selects between Independent VLAN Learning (IVL) and
* Shared VLAN Learning (SVL)
*/
.shared_learn = false,
.shared_learn = true,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@ -1092,8 +1092,13 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
l2_lookup.mask_vlanid = 0;
l2_lookup.mask_iotag = 0;
}
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@ -1150,8 +1155,13 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
l2_lookup.mask_vlanid = 0;
l2_lookup.mask_iotag = 0;
}
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@ -1181,60 +1191,31 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
u16 rx_vid, tx_vid;
int rc, i;
if (dsa_port_is_vlan_filtering(&ds->ports[port]))
return priv->info->fdb_add_cmd(ds, port, addr, vid);
/* Since we make use of VLANs even when the bridge core doesn't tell us
* to, translate these FDB entries into the correct dsa_8021q ones.
* The basic idea (also repeats for removal below) is:
* - Each of the other front-panel ports needs to be able to forward a
* pvid-tagged (aka tagged with their rx_vid) frame that matches this
* DMAC.
* - The CPU port (aka the tx_vid of this port) needs to be able to
* send a frame matching this DMAC to the specified port.
* For a better picture see net/dsa/tag_8021q.c.
/* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
* so the switch still does some VLAN processing internally.
* But Shared VLAN Learning (SVL) is also active, and it will take
* care of autonomous forwarding between the unique pvid's of each
* port. Here we just make sure that users can't add duplicate FDB
* entries when in this mode - the actual VID doesn't matter except
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
if (i == port)
continue;
if (i == dsa_upstream_port(priv->ds, port))
continue;
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
vid = 0;
rx_vid = dsa_8021q_rx_vid(ds, i);
rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid);
if (rc < 0)
return rc;
}
tx_vid = dsa_8021q_tx_vid(ds, port);
return priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
u16 rx_vid, tx_vid;
int rc, i;
if (dsa_port_is_vlan_filtering(&ds->ports[port]))
return priv->info->fdb_del_cmd(ds, port, addr, vid);
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
vid = 0;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
if (i == port)
continue;
if (i == dsa_upstream_port(priv->ds, port))
continue;
rx_vid = dsa_8021q_rx_vid(ds, i);
rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid);
if (rc < 0)
return rc;
}
tx_vid = dsa_8021q_tx_vid(ds, port);
return priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
@ -1273,39 +1254,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* On SJA1105 E/T, the switch doesn't implement the LOCKEDS
* bit, so it doesn't tell us whether a FDB entry is static
* or not.
* But, of course, we can find out - we're the ones who added
* it in the first place.
*/
if (priv->info->device_id == SJA1105E_DEVICE_ID ||
priv->info->device_id == SJA1105T_DEVICE_ID) {
int match;
match = sja1105_find_static_fdb_entry(priv, port,
&l2_lookup);
l2_lookup.lockeds = (match >= 0);
}
/* We need to hide the dsa_8021q VLANs from the user. This
* basically means hiding the duplicates and only showing
* the pvid that is supposed to be active in standalone and
* non-vlan_filtering modes (aka 1).
* - For statically added FDB entries (bridge fdb add), we
* can convert the TX VID (coming from the CPU port) into the
* pvid and ignore the RX VIDs of the other ports.
* - For dynamically learned FDB entries, a single entry with
* no duplicates is learned - that which has the real port's
* pvid, aka RX VID.
*/
if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
if (l2_lookup.vlanid == tx_vid ||
l2_lookup.vlanid == rx_vid)
l2_lookup.vlanid = 1;
else
continue;
}
/* We need to hide the dsa_8021q VLANs from the user. */
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
return 0;
@ -1597,6 +1548,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
*/
static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@ -1625,6 +1577,28 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
/* VLAN filtering => independent VLAN learning.
* No VLAN filtering => shared VLAN learning.
*
* In shared VLAN learning mode, untagged traffic still gets
* pvid-tagged, and the FDB table gets populated with entries
* containing the "real" (pvid or from VLAN tag) VLAN ID.
* However the switch performs a masked L2 lookup in the FDB,
* effectively only looking up a frame's DMAC (and not VID) for the
* forwarding decision.
*
* This is extremely convenient for us, because in modes with
* vlan_filtering=0, dsa_8021q actually installs unique pvid's into
* each front panel port. This is good for identification but breaks
* learning badly - the VID of the learnt FDB entry is unique, aka
* no frames coming from any other port are going to have it. So
* for forwarding purposes, this is as though learning was broken
* (all frames get flooded).
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
l2_lookup_params->shared_learn = !enabled;
rc = sja1105_static_config_reload(priv);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@ -1754,6 +1728,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
cancel_work_sync(&priv->tagger_data.rxtstamp_work);
skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
sja1105_ptp_clock_unregister(priv);
sja1105_static_config_free(&priv->static_config);
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
@ -2211,9 +2187,7 @@ static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
sja1105_ptp_clock_unregister(priv);
dsa_unregister_switch(priv->ds);
sja1105_static_config_free(&priv->static_config);
return 0;
}

View File

@ -369,16 +369,15 @@ int sja1105_ptp_clock_register(struct sja1105_private *priv)
.mult = SJA1105_CC_MULT,
};
mutex_init(&priv->ptp_lock);
INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
priv->ptp_caps = sja1105_ptp_caps;
priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
if (IS_ERR_OR_NULL(priv->clock))
return PTR_ERR(priv->clock);
INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
return sja1105_ptp_reset(priv);
}

View File

@ -165,6 +165,7 @@ static struct sk_buff
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
kfree_skb(sp->data->stampable_skb);
}
/* Hold a reference to avoid dsa_switch_rcv
@ -211,17 +212,8 @@ static struct sk_buff
* for further processing up the network stack.
*/
kfree_skb(skb);
skb = skb_copy(stampable_skb, GFP_ATOMIC);
if (!skb) {
dev_err_ratelimited(dp->ds->dev,
"Failed to copy stampable skb\n");
spin_unlock(&sp->data->meta_lock);
return NULL;
}
skb = stampable_skb;
sja1105_transfer_meta(skb, meta);
/* The cached copy will be freed now */
skb_unref(stampable_skb);
spin_unlock(&sp->data->meta_lock);
}