1
0
Fork 0

net: aquantia: add support of rx-vlan-filter offload

Since it uses the same NIC table as rx flow vlan filter therefore
rx-flow vlan filter accepts only vlans that present on the interface
in case of rx-vlan-filter is on.

Signed-off-by: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Dmitry Bogdanov 2018-11-12 15:46:09 +00:00 committed by David S. Miller
parent 9a8cac4b4d
commit 7975d2aff5
7 changed files with 197 additions and 18 deletions

View File

@ -157,6 +157,14 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
return -EINVAL;
}
if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
(!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
aq_nic->active_vlans))) {
netdev_err(aq_nic->ndev,
"ethtool: unknown vlan-id specified");
return -EINVAL;
}
if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
@ -331,26 +339,108 @@ static int aq_add_del_fether(struct aq_nic_s *aq_nic,
return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
}
static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
{
int i;
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].enable &&
aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
aq_vlans[i].vlan_id == vlan) {
return true;
}
}
return false;
}
/* Function rebuilds array of vlan filters so that filters with assigned
* queue have a precedence over just vlans on the interface.
*/
static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
unsigned long *active_vlans,
struct aq_rx_filter_vlan *aq_vlans)
{
bool vlan_busy = false;
int vlan = -1;
int i;
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].enable &&
aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
continue;
do {
vlan = find_next_bit(active_vlans,
VLAN_N_VID,
vlan + 1);
if (vlan == VLAN_N_VID) {
aq_vlans[i].enable = 0U;
aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
aq_vlans[i].vlan_id = 0;
continue;
}
vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
if (!vlan_busy) {
aq_vlans[i].enable = 1U;
aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
aq_vlans[i].vlan_id = vlan;
}
} while (vlan_busy && vlan != VLAN_N_VID);
}
}
static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr,
struct aq_rx_filter_vlan *aq_vlans, bool add)
{
const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
int i;
memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
if (!add)
return 0;
/* remove vlan if it was in table without queue assignment */
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].vlan_id ==
(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
aq_vlans[i].enable = false;
}
}
aq_vlans[location].location = location;
aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
& VLAN_VID_MASK;
aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
aq_vlans[location].enable = 1U;
return 0;
}
int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct aq_rx_filter *rule = NULL;
struct hlist_node *aq_node2;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
break;
}
if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
struct ethtool_rxnfc cmd;
cmd.fs.location = rule->aq_fsp.location;
return aq_del_rxnfc_rule(aq_nic, &cmd);
}
return -ENOENT;
}
static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, bool add)
{
@ -725,14 +815,62 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
hweight += hweight_long(aq_nic->active_vlans[i]);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
}
err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
);
if (err)
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (hweight < AQ_VLAN_MAX_FILTERS)
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
/* otherwise left in promiscue mode */
}
return err;
}
int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
);
return err;
}

View File

@ -27,8 +27,10 @@ int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
u32 *rule_locs);
int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
#endif /* AQ_FILTERS_H */

View File

@ -27,6 +27,7 @@
#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
#define AQ_VLAN_MAX_FILTERS \
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
/* NIC H/W capabilities */
struct aq_hw_caps_s {

View File

@ -114,6 +114,13 @@ static int aq_ndev_set_features(struct net_device *ndev,
goto err_exit;
}
}
if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
err = aq_filters_vlan_offload_off(aq_nic);
if (unlikely(err))
goto err_exit;
}
}
aq_cfg->features = features;
@ -162,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
return -EOPNOTSUPP;
set_bit(vid, aq_nic->active_vlans);
return aq_filters_vlans_update(aq_nic);
}
static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
return -EOPNOTSUPP;
clear_bit(vid, aq_nic->active_vlans);
if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
return aq_filters_vlans_update(aq_nic);
return 0;
}
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@ -169,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
.ndo_set_features = aq_ndev_set_features,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};

View File

@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */

View File

@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@ -98,6 +97,8 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
/* Bitmask of currently assigned vlans from linux */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;

View File

@ -42,7 +42,8 @@
NETIF_F_SG | \
NETIF_F_TSO | \
NETIF_F_LRO | \
NETIF_F_NTUPLE, \
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@ -320,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
// Always accept untagged packets
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@ -1074,6 +1066,14 @@ static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
{
/* set promisc in case of disabing the vland filter */
hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
return aq_hw_err_from_flags(self);
}
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@ -1102,6 +1102,7 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
.hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
.hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
.hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,