1
0
Fork 0

Merge branch 'netcp-next'

Murali Karicheri says:

====================
netcp: enhancements and minor fixes

This series is for net-next. This propagates enhancements and minor
bug fixes from internal version of the driver to keep the upstream
in sync. Please review and apply if this looks good.

Tested on all of K2HK/E/L boards with nfs rootfs.
Test logs below
K2HK-EVM: http://pastebin.ubuntu.com/23754106/
k2L-EVM: http://pastebin.ubuntu.com/23754143/
K2E-EVM: http://pastebin.ubuntu.com/23754159/

History:
  v1 - dropped 1/10 amd 2/10 of v0 based on comments from Rob as
       it needs more work before submission
  v0 - Initial version
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2017-01-07 21:03:51 -05:00
commit 82e4869da3
6 changed files with 292 additions and 53 deletions

View File

@ -1,5 +1,5 @@
/*
* Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine
*
* Copyright (C) 2012 Texas Instruments
*
@ -27,11 +27,14 @@
#define BITMASK(bits) (BIT(bits) - 1)
#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
#define ALE_VERSION_MINOR(rev) (rev & 0xff)
#define ALE_VERSION_1R3 0x0103
#define ALE_VERSION_1R4 0x0104
/* ALE Registers */
#define ALE_IDVER 0x00
#define ALE_STATUS 0x04
#define ALE_CONTROL 0x08
#define ALE_PRESCALE 0x10
#define ALE_UNKNOWNVLAN 0x18
@ -39,6 +42,13 @@
#define ALE_TABLE 0x34
#define ALE_PORTCTL 0x40
/* ALE NetCP NU switch specific Registers */
#define ALE_UNKNOWNVLAN_MEMBER 0x90
#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94
#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@ -51,6 +61,10 @@
#define ALE_UCAST_OUI 2
#define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024
#define ALE_STATUS_SIZE_MASK 0x1f
#define ALE_TABLE_SIZE_DEFAULT 64
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
int idx;
@ -84,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
#define DEFINE_ALE_FIELD1(name, start) \
static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
} \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
u32 bits) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
DEFINE_ALE_FIELD(mcast_state, 62, 2)
DEFINE_ALE_FIELD(port_mask, 66, 3)
DEFINE_ALE_FIELD1(port_mask, 66)
DEFINE_ALE_FIELD(super, 65, 1)
DEFINE_ALE_FIELD(ucast_type, 62, 2)
DEFINE_ALE_FIELD(port_num, 66, 2)
DEFINE_ALE_FIELD1(port_num, 66)
DEFINE_ALE_FIELD(blocked, 65, 1)
DEFINE_ALE_FIELD(secure, 64, 1)
DEFINE_ALE_FIELD(vlan_untag_force, 24, 3)
DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3)
DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3)
DEFINE_ALE_FIELD(vlan_member_list, 0, 3)
DEFINE_ALE_FIELD1(vlan_untag_force, 24)
DEFINE_ALE_FIELD1(vlan_reg_mcast, 16)
DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8)
DEFINE_ALE_FIELD1(vlan_member_list, 0)
DEFINE_ALE_FIELD(mcast, 40, 1)
/* ALE NetCP nu switch specific */
DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
@ -223,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
{
int mask;
mask = cpsw_ale_get_port_mask(ale_entry);
mask = cpsw_ale_get_port_mask(ale_entry,
ale->port_mask_bits);
if ((mask & port_mask) == 0)
return; /* ports dont intersect, not interested */
mask &= ~port_mask;
/* free if only remaining port is host port */
if (mask)
cpsw_ale_set_port_mask(ale_entry, mask);
cpsw_ale_set_port_mask(ale_entry, mask,
ale->port_mask_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
@ -291,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits);
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
@ -338,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
mask = cpsw_ale_get_port_mask(ale_entry,
ale->port_mask_bits);
port_mask |= mask;
cpsw_ale_set_port_mask(ale_entry, port_mask);
cpsw_ale_set_port_mask(ale_entry, port_mask,
ale->port_mask_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@ -367,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
cpsw_ale_set_port_mask(ale_entry, port_mask);
cpsw_ale_set_port_mask(ale_entry, port_mask,
ale->port_mask_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
@ -376,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
}
EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
/* ALE NetCP NU switch specific vlan functions */
static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
int reg_mcast, int unreg_mcast)
{
int idx;
/* Set VLAN registered multicast flood mask */
idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
/* Set VLAN unregistered multicast flood mask */
idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast)
{
@ -389,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
cpsw_ale_set_vlan_untag_force(ale_entry, untag);
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
cpsw_ale_set_vlan_member_list(ale_entry, port);
cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
if (!ale->params.nu_switch_ale) {
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
ale->vlan_field_bits);
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@ -418,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
ale->vlan_field_bits);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
@ -446,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
if (type != ALE_TYPE_VLAN)
continue;
unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
unreg_mcast =
cpsw_ale_get_vlan_unreg_mcast(ale_entry,
ale->vlan_field_bits);
if (allmulti)
unreg_mcast |= 1;
else
unreg_mcast &= ~1;
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
cpsw_ale_write(ale, idx, ale_entry);
}
}
@ -464,7 +522,7 @@ struct ale_control_info {
int bits;
};
static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
[ALE_ENABLE] = {
.name = "enable",
.offset = ALE_CONTROL,
@ -721,11 +779,83 @@ static void cpsw_ale_timer(unsigned long arg)
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
u32 rev, ale_entries;
rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev));
if (!ale->params.major_ver_mask)
ale->params.major_ver_mask = 0xff;
ale->version =
(ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
ALE_VERSION_MINOR(rev);
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
ALE_VERSION_MINOR(rev));
if (!ale->params.ale_entries) {
ale_entries =
__raw_readl(ale->params.ale_regs + ALE_STATUS) &
ALE_STATUS_SIZE_MASK;
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
* For these, params.ale_entries will be set to zero. So
* read the register and update the value of ale_entries.
* ALE table on NetCP lite, is much smaller and is indicated
* by a value of zero in ALE_STATUS. So use a default value
* of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
* to set the value of ale_entries for all other versions
* of ALE.
*/
if (!ale_entries)
ale_entries = ALE_TABLE_SIZE_DEFAULT;
else
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
dev_info(ale->params.dev,
"ALE Table size %ld\n", ale->params.ale_entries);
/* set default bits for existing h/w */
ale->port_mask_bits = 3;
ale->port_num_bits = 2;
ale->vlan_field_bits = 3;
/* Set defaults override for ALE on NetCP NU switch and for version
* 1R3
*/
if (ale->params.nu_switch_ale) {
/* Separate registers for unknown vlan configuration.
* Also there are N bits, where N is number of ale
* ports and shift value should be 0
*/
ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits =
ale->params.ale_ports;
ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset =
ALE_UNKNOWNVLAN_MEMBER;
ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits =
ale->params.ale_ports;
ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0;
ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset =
ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD;
ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits =
ale->params.ale_ports;
ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0;
ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset =
ALE_UNKNOWNVLAN_REG_MCAST_FLOOD;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits =
ale->params.ale_ports;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
ale->port_mask_bits = ale->params.ale_ports;
ale->port_num_bits = ale->params.ale_ports - 1;
ale->vlan_field_bits = ale->params.ale_ports;
} else if (ale->version == ALE_VERSION_1R3) {
ale->port_mask_bits = ale->params.ale_ports;
ale->port_num_bits = 3;
ale->vlan_field_bits = ale->params.ale_ports;
}
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);

View File

@ -1,5 +1,5 @@
/*
* Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
*
* Copyright (C) 2012 Texas Instruments
*
@ -21,6 +21,16 @@ struct cpsw_ale_params {
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific
* registers for unknown vlan specific fields. So use nu_switch_ale
* to identify this hardware.
*/
bool nu_switch_ale;
/* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
* pass it from caller.
*/
u32 major_ver_mask;
};
struct cpsw_ale {
@ -28,6 +38,11 @@ struct cpsw_ale {
struct timer_list timer;
unsigned long ageout;
int allmulti;
u32 version;
/* These bits are different on NetCP NU Switch ALE */
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
};
enum cpsw_ale_control {

View File

@ -23,6 +23,7 @@
#include <linux/netdevice.h>
#include <linux/soc/ti/knav_dma.h>
#include <linux/u64_stats_sync.h>
/* Maximum Ethernet frame size supported by Keystone switch */
#define NETCP_MAX_FRAME_SIZE 9504
@ -68,6 +69,20 @@ struct netcp_addr {
struct list_head node;
};
struct netcp_stats {
struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp;
u64 rx_packets;
u64 rx_bytes;
u32 rx_errors;
u32 rx_dropped;
struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp;
u64 tx_packets;
u64 tx_bytes;
u32 tx_errors;
u32 tx_dropped;
};
struct netcp_intf {
struct device *dev;
struct device *ndev_dev;
@ -87,6 +102,11 @@ struct netcp_intf {
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0)
u32 hw_cap;
/* 64-bit netcp stats */
struct netcp_stats stats;
void *rx_channel;
const char *dma_chan_name;
@ -115,6 +135,7 @@ struct netcp_packet {
struct sk_buff *skb;
__le32 *epib;
u32 *psdata;
u32 eflags;
unsigned int psdata_len;
struct netcp_intf *netcp;
struct netcp_tx_pipe *tx_pipe;

View File

@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
*ndesc = le32_to_cpu(desc->next_desc);
}
static void get_desc_info(u32 *desc_info, u32 *pkt_info,
struct knav_dma_desc *desc)
{
*desc_info = le32_to_cpu(desc->desc_info);
*pkt_info = le32_to_cpu(desc->packet_info);
}
static u32 get_sw_data(int index, struct knav_dma_desc *desc)
{
/* No Endian conversion needed as this data is untouched by hw */
@ -622,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
static void netcp_empty_rx_queue(struct netcp_intf *netcp)
{
struct netcp_stats *rx_stats = &netcp->stats;
struct knav_dma_desc *desc;
unsigned int dma_sz;
dma_addr_t dma;
@ -635,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp)
if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
__func__);
netcp->ndev->stats.rx_errors++;
rx_stats->rx_errors++;
continue;
}
netcp_free_rx_desc_chain(netcp, desc);
netcp->ndev->stats.rx_dropped++;
rx_stats->rx_dropped++;
}
}
static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
{
struct netcp_stats *rx_stats = &netcp->stats;
unsigned int dma_sz, buf_len, org_buf_len;
struct knav_dma_desc *desc, *ndesc;
unsigned int pkt_sz = 0, accum_sz;
@ -653,6 +662,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
struct netcp_packet p_info;
struct sk_buff *skb;
void *org_buf_ptr;
u32 tmp;
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
if (!dma_desc)
@ -724,21 +734,27 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
knav_pool_desc_put(netcp->rx_pool, ndesc);
}
/* Free the primary descriptor */
knav_pool_desc_put(netcp->rx_pool, desc);
/* check for packet len and warn */
if (unlikely(pkt_sz != accum_sz))
dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
pkt_sz, accum_sz);
/* Remove ethernet FCS from the packet */
__pskb_trim(skb, skb->len - ETH_FCS_LEN);
/* Newer version of the Ethernet switch can trim the Ethernet FCS
* from the packet and is indicated in hw_cap. So trim it only for
* older h/w
*/
if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
__pskb_trim(skb, skb->len - ETH_FCS_LEN);
/* Call each of the RX hooks */
p_info.skb = skb;
skb->dev = netcp->ndev;
p_info.rxtstamp_complete = false;
get_desc_info(&tmp, &p_info.eflags, desc);
p_info.epib = desc->epib;
p_info.psdata = (u32 __force *)desc->psdata;
p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
KNAV_DMA_DESC_EFLAGS_MASK);
list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
int ret;
@ -747,14 +763,20 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
if (unlikely(ret)) {
dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
rx_hook->order, ret);
netcp->ndev->stats.rx_errors++;
/* Free the primary descriptor */
rx_stats->rx_dropped++;
knav_pool_desc_put(netcp->rx_pool, desc);
dev_kfree_skb(skb);
return 0;
}
}
/* Free the primary descriptor */
knav_pool_desc_put(netcp->rx_pool, desc);
netcp->ndev->stats.rx_packets++;
netcp->ndev->stats.rx_bytes += skb->len;
u64_stats_update_begin(&rx_stats->syncp_rx);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
u64_stats_update_end(&rx_stats->syncp_rx);
/* push skb up the stack */
skb->protocol = eth_type_trans(skb, netcp->ndev);
@ -763,7 +785,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
free_desc:
netcp_free_rx_desc_chain(netcp, desc);
netcp->ndev->stats.rx_errors++;
rx_stats->rx_errors++;
return 0;
}
@ -994,6 +1016,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
unsigned int budget)
{
struct netcp_stats *tx_stats = &netcp->stats;
struct knav_dma_desc *desc;
struct netcp_tx_cb *tx_cb;
struct sk_buff *skb;
@ -1008,7 +1031,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
if (unlikely(!desc)) {
dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
netcp->ndev->stats.tx_errors++;
tx_stats->tx_errors++;
continue;
}
@ -1019,7 +1042,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
netcp_free_tx_desc_chain(netcp, desc, dma_sz);
if (!skb) {
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
netcp->ndev->stats.tx_errors++;
tx_stats->tx_errors++;
continue;
}
@ -1036,8 +1059,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
netif_wake_subqueue(netcp->ndev, subqueue);
}
netcp->ndev->stats.tx_packets++;
netcp->ndev->stats.tx_bytes += skb->len;
u64_stats_update_begin(&tx_stats->syncp_tx);
tx_stats->tx_packets++;
tx_stats->tx_bytes += skb->len;
u64_stats_update_end(&tx_stats->syncp_tx);
dev_kfree_skb(skb);
pkts++;
}
@ -1212,9 +1237,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
/* psdata points to both native-endian and device-endian data */
__le32 *psdata = (void __force *)p_info.psdata;
memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
p_info.psdata_len);
set_words(p_info.psdata, p_info.psdata_len, psdata);
set_words((u32 *)psdata +
(KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
p_info.psdata_len, psdata);
tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
KNAV_DMA_DESC_PSLEN_SHIFT;
}
@ -1258,6 +1283,7 @@ out:
static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_stats *tx_stats = &netcp->stats;
int subqueue = skb_get_queue_mapping(skb);
struct knav_dma_desc *desc;
int desc_count, ret = 0;
@ -1273,7 +1299,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If we get here, the skb has already been dropped */
dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
ret);
ndev->stats.tx_dropped++;
tx_stats->tx_dropped++;
return ret;
}
skb->len = NETCP_MIN_PACKET_SIZE;
@ -1301,7 +1327,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
drop:
ndev->stats.tx_dropped++;
tx_stats->tx_dropped++;
if (desc)
netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
dev_kfree_skb(skb);
@ -1883,12 +1909,46 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
static struct rtnl_link_stats64 *
netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_stats *p = &netcp->stats;
u64 rxpackets, rxbytes, txpackets, txbytes;
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&p->syncp_rx);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
} while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
do {
start = u64_stats_fetch_begin_irq(&p->syncp_tx);
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
stats->rx_packets = rxpackets;
stats->rx_bytes = rxbytes;
stats->tx_packets = txpackets;
stats->tx_bytes = txbytes;
/* The following are stored as 32 bit */
stats->rx_errors = p->rx_errors;
stats->rx_dropped = p->rx_dropped;
stats->tx_dropped = p->tx_dropped;
return stats;
}
static const struct net_device_ops netcp_netdev_ops = {
.ndo_open = netcp_ndo_open,
.ndo_stop = netcp_ndo_stop,
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
.ndo_do_ioctl = netcp_ndo_ioctl,
.ndo_get_stats64 = netcp_get_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
@ -1935,6 +1995,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
INIT_LIST_HEAD(&netcp->txhook_list_head);
INIT_LIST_HEAD(&netcp->rxhook_list_head);
INIT_LIST_HEAD(&netcp->addr_list);
u64_stats_init(&netcp->stats.syncp_rx);
u64_stats_init(&netcp->stats.syncp_tx);
netcp->netcp_device = netcp_device;
netcp->dev = netcp_device->device;
netcp->ndev = ndev;

View File

@ -81,7 +81,6 @@
#define GBENU_CPTS_OFFSET 0x1d000
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
#define GBENU_NUM_ALE_ENTRIES 1024
#define GBENU_SGMII_MODULE_SIZE 0x100
/* 10G Ethernet SS defines */
@ -103,7 +102,7 @@
#define XGBE10_ALE_OFFSET 0x700
#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
#define XGBE10_NUM_ALE_ENTRIES 1024
#define XGBE10_NUM_ALE_ENTRIES 2048
#define GBE_TIMER_INTERVAL (HZ / 2)
@ -122,6 +121,7 @@
#define MACSL_FULLDUPLEX BIT(0)
#define GBE_CTL_P0_ENABLE BIT(2)
#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
#define GBE_STATS_CD_SEL BIT(28)
@ -2821,7 +2821,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
struct netcp_intf *netcp = netdev_priv(ndev);
struct gbe_slave *slave = gbe_intf->slave;
int port_num = slave->port_num;
u32 reg;
u32 reg, val;
int ret;
reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
@ -2851,7 +2851,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
/* Control register */
writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
val = GBE_CTL_P0_ENABLE;
if (IS_SS_ID_MU(gbe_dev)) {
val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
}
writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
/* All statistics enabled and STAT AB visible by default */
writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
@ -2930,7 +2935,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
}
slave->open = false;
slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
(slave->link_interface == XGMII_LINK_MAC_PHY))
slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
if (slave->link_interface >= XGMII_LINK_MAC_PHY)
@ -3433,7 +3440,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@ -3601,7 +3607,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
ale_params.ale_entries = gbe_dev->ale_entries;
ale_params.ale_ports = gbe_dev->ale_ports;
if (IS_SS_ID_MU(gbe_dev)) {
ale_params.major_ver_mask = 0x7;
ale_params.nu_switch_ale = true;
}
gbe_dev->ale = cpsw_ale_create(&ale_params);
if (!gbe_dev->ale) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");

View File

@ -41,6 +41,8 @@
#define KNAV_DMA_DESC_RETQ_SHIFT 0
#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4)
#define KNAV_DMA_DESC_EFLAGS_SHIFT 20
#define KNAV_DMA_NUM_EPIB_WORDS 4
#define KNAV_DMA_NUM_PS_WORDS 16