1
0
Fork 0

Merge branch 'Lantiq-Intel-vrx200-support'

Hauke Mehrtens says:

====================
Add support for Lantiq / Intel vrx200 network

This adds basic support for the GSWIP (Gigabit Switch) found in the
VRX200 SoC.
There are different versions of this IP core used in different SoCs, but
this driver was currently only tested on the VRX200 SoC line, for other
SoCs this driver probably need some adoptions to work.

I also plan to add Layer 2 offloading to the DSA driver and later also
layer 3 offloading which is supported by the PPE HW block.

All these patches should go through the net-next tree.

This depends on the patch "MIPS: lantiq: dma: add dev pointer" which
should go into 4.19.

Changes since:
v2:
 * Send patch "MIPS: lantiq: dma: add dev pointer" separately
 * all: removed return in register write functions
 * switch: uses phylink
 * switch: uses hardware MDIO auto polling
 * switch: use usleep_range() in MDIO busy check
 * switch: configure MDIO bus to 2.5 MHz
 * switch: disable xMII link when it is not used
 * Ethernet: use NAPI for TX cleanups
 * Ethernet: enable clock in open callback
 * Ethernet: improve skb allocation
 * Ethernet: use net_dev->stats

v1:
 * Add "MIPS: lantiq: dma: add dev pointer"
 * checkpatch fixes a all patches
 * Added binding documentation
 * use readx_poll_timeout function and ETIMEOUT error code
 * integrate GPHY firmware loading into DSA driver
 * renamed to NET_DSA_LANTIQ_GSWIP
 * removed some needed casts
 * added of_device_id.data information about the detected switch
 * fixed John's email address
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-09-13 08:14:34 -07:00
commit d03790f55a
19 changed files with 2202 additions and 8 deletions

View File

@ -0,0 +1,141 @@
Lantiq GSWIP Ethernet switches
==================================
Required properties for GSWIP core:
- compatible : "lantiq,xrx200-gswip" for the embedded GSWIP in the
xRX200 SoC
- reg : memory range of the GSWIP core registers
: memory range of the GSWIP MDIO registers
: memory range of the GSWIP MII registers
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of
additional required and optional properties.
Required properties for MDIO bus:
- compatible : "lantiq,xrx200-mdio" for the MDIO bus inside the GSWIP
core of the xRX200 SoC and the PHYs connected to it.
See Documentation/devicetree/bindings/net/mdio.txt for a list of additional
required and optional properties.
Required properties for GPHY firmware loading:
- compatible : "lantiq,gphy-fw" and "lantiq,xrx200-gphy-fw",
"lantiq,xrx200a1x-gphy-fw", "lantiq,xrx200a2x-gphy-fw",
"lantiq,xrx300-gphy-fw", or "lantiq,xrx330-gphy-fw"
for the loading of the firmware into the embedded
GPHY core of the SoC.
- lantiq,rcu : reference to the rcu syscon
The GPHY firmware loader has a list of GPHY entries, one for each
embedded GPHY
- reg : Offset of the GPHY firmware register in the RCU
register range
- resets : list of resets of the embedded GPHY
- reset-names : list of names of the resets
Example:
Ethernet switch on the VRX200 SoC:
gswip: gswip@E108000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "lantiq,xrx200-gswip";
reg = < 0xE108000 0x3000 /* switch */
0xE10B100 0x70 /* mdio */
0xE10B1D8 0x30 /* mii */
>;
dsa,member = <0 0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "lan3";
phy-mode = "rgmii";
phy-handle = <&phy0>;
};
port@1 {
reg = <1>;
label = "lan4";
phy-mode = "rgmii";
phy-handle = <&phy1>;
};
port@2 {
reg = <2>;
label = "lan2";
phy-mode = "internal";
phy-handle = <&phy11>;
};
port@4 {
reg = <4>;
label = "lan1";
phy-mode = "internal";
phy-handle = <&phy13>;
};
port@5 {
reg = <5>;
label = "wan";
phy-mode = "rgmii";
phy-handle = <&phy5>;
};
port@6 {
reg = <0x6>;
label = "cpu";
ethernet = <&eth0>;
};
};
mdio@0 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "lantiq,xrx200-mdio";
reg = <0>;
phy0: ethernet-phy@0 {
reg = <0x0>;
};
phy1: ethernet-phy@1 {
reg = <0x1>;
};
phy5: ethernet-phy@5 {
reg = <0x5>;
};
phy11: ethernet-phy@11 {
reg = <0x11>;
};
phy13: ethernet-phy@13 {
reg = <0x13>;
};
};
gphy-fw {
compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw";
lantiq,rcu = <&rcu0>;
gphy@20 {
reg = <0x20>;
resets = <&reset0 31 30>;
reset-names = "gphy";
};
gphy@68 {
reg = <0x68>;
resets = <&reset0 29 28>;
reset-names = "gphy";
};
};
};

View File

@ -0,0 +1,21 @@
Lantiq xRX200 GSWIP PMAC Ethernet driver
==================================
Required properties:
- compatible : "lantiq,xrx200-net" for the PMAC of the embedded
: GSWIP in the xXR200
- reg : memory range of the PMAC core inside of the GSWIP core
- interrupts : TX and RX DMA interrupts. Use interrupt-names "tx" for
: the TX interrupt and "rx" for the RX interrupt.
Example:
eth0: eth@E10B308 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "lantiq,xrx200-net";
reg = <0xE10B308 0x30>;
interrupts = <73>, <72>;
interrupt-names = "tx", "rx";
};

View File

@ -8167,6 +8167,15 @@ S: Maintained
F: net/l3mdev
F: include/net/l3mdev.h
LANTIQ / INTEL Ethernet drivers
M: Hauke Mehrtens <hauke@hauke-m.de>
L: netdev@vger.kernel.org
S: Maintained
F: net/dsa/tag_gswip.c
F: drivers/net/ethernet/lantiq_xrx200.c
F: drivers/net/dsa/lantiq_pce.h
F: drivers/net/dsa/intel_gswip.c
LANTIQ MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org>
L: linux-mips@linux-mips.org

View File

@ -106,7 +106,6 @@ ltq_dma_open(struct ltq_dma_channel *ch)
spin_lock_irqsave(&ltq_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
spin_unlock_irqrestore(&ltq_dma_lock, flag);
}
EXPORT_SYMBOL_GPL(ltq_dma_open);

View File

@ -505,7 +505,7 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
} else if (of_machine_is_compatible("lantiq,ar10")) {
@ -513,11 +513,11 @@ void __init ltq_soc_init(void)
ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH |
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
PMU_PPE_DP | PMU_PPE_TC);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY);
clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY);
clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@ -536,12 +536,12 @@ void __init ltq_soc_init(void)
clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e108000.eth", NULL, 0, 0,
clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY);
clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY);
clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);

View File

@ -23,6 +23,14 @@ config NET_DSA_LOOP
This enables support for a fake mock-up switch chip which
exercises the DSA APIs.
config NET_DSA_LANTIQ_GSWIP
tristate "Lantiq / Intel GSWIP"
depends on NET_DSA
select NET_DSA_TAG_GSWIP
---help---
This enables support for the Lantiq / Intel GSWIP 2.1 found in
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
tristate "Mediatek MT7530 Ethernet switch support"
depends on NET_DSA

View File

@ -5,6 +5,7 @@ obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
ifdef CONFIG_NET_DSA_LOOP
obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
endif
obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PCE microcode extracted from UGW 7.1.1 switch api
*
* Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH
* Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
enum {
OUT_MAC0 = 0,
OUT_MAC1,
OUT_MAC2,
OUT_MAC3,
OUT_MAC4,
OUT_MAC5,
OUT_ETHTYP,
OUT_VTAG0,
OUT_VTAG1,
OUT_ITAG0,
OUT_ITAG1, /*10 */
OUT_ITAG2,
OUT_ITAG3,
OUT_IP0,
OUT_IP1,
OUT_IP2,
OUT_IP3,
OUT_SIP0,
OUT_SIP1,
OUT_SIP2,
OUT_SIP3, /*20*/
OUT_SIP4,
OUT_SIP5,
OUT_SIP6,
OUT_SIP7,
OUT_DIP0,
OUT_DIP1,
OUT_DIP2,
OUT_DIP3,
OUT_DIP4,
OUT_DIP5, /*30*/
OUT_DIP6,
OUT_DIP7,
OUT_SESID,
OUT_PROT,
OUT_APP0,
OUT_APP1,
OUT_IGMP0,
OUT_IGMP1,
OUT_IPOFF, /*39*/
OUT_NONE = 63,
};
/* parser's microcode length type */
#define INSTR 0
#define IPV6 1
#define LENACCU 2
/* parser's microcode flag type */
enum {
FLAG_ITAG = 0,
FLAG_VLAN,
FLAG_SNAP,
FLAG_PPPOE,
FLAG_IPV6,
FLAG_IPV6FL,
FLAG_IPV4,
FLAG_IGMP,
FLAG_TU,
FLAG_HOP,
FLAG_NN1, /*10 */
FLAG_NN2,
FLAG_END,
FLAG_NO, /*13*/
};
struct gswip_pce_microcode {
u16 val_3;
u16 val_2;
u16 val_1;
u16 val_0;
};
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
static const struct gswip_pce_microcode gswip_pce_microcode[] = {
/* value mask ns fields L type flags ipv4_len */
MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
};

View File

@ -108,6 +108,13 @@ config LANTIQ_ETOP
---help---
Support for the MII0 inside the Lantiq SoC
config LANTIQ_XRX200
tristate "Lantiq / Intel xRX200 PMAC network driver"
depends on SOC_TYPE_XWAY
---help---
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"

View File

@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/

View File

@ -432,6 +432,7 @@ ltq_etop_open(struct net_device *dev)
if (!IS_TX(i) && (!IS_RX(i)))
continue;
ltq_dma_open(&ch->dma);
ltq_dma_enable_irq(&ch->dma);
napi_enable(&ch->napi);
}
phy_start(dev->phydev);

View File

@ -0,0 +1,564 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Lantiq / Intel PMAC driver for XRX200 SoCs
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <xway_dma.h>
/* DMA */
#define XRX200_DMA_DATA_LEN 0x600
#define XRX200_DMA_RX 0
#define XRX200_DMA_TX 1
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
#define PMAC_RX_IPG_MASK 0xf
#define PMAC_HD_CTL 0x0000
/* Add Ethernet header to packets from DMA to PMAC */
#define PMAC_HD_CTL_ADD BIT(0)
/* Add VLAN tag to Packets from DMA to PMAC */
#define PMAC_HD_CTL_TAG BIT(1)
/* Add CRC to packets from DMA to PMAC */
#define PMAC_HD_CTL_AC BIT(2)
/* Add status header to packets from PMAC to DMA */
#define PMAC_HD_CTL_AS BIT(3)
/* Remove CRC from packets from PMAC to DMA */
#define PMAC_HD_CTL_RC BIT(4)
/* Remove Layer-2 header from packets from PMAC to DMA */
#define PMAC_HD_CTL_RL2 BIT(5)
/* Status header is present from DMA to PMAC */
#define PMAC_HD_CTL_RXSH BIT(6)
/* Add special tag from PMAC to switch */
#define PMAC_HD_CTL_AST BIT(7)
/* Remove specail Tag from PMAC to DMA */
#define PMAC_HD_CTL_RST BIT(8)
/* Check CRC from DMA to PMAC */
#define PMAC_HD_CTL_CCRC BIT(9)
/* Enable reaction to Pause frames in the PMAC */
#define PMAC_HD_CTL_FC BIT(10)
struct xrx200_chan {
int tx_free;
struct napi_struct napi;
struct ltq_dma_channel dma;
struct sk_buff *skb[LTQ_DESC_NUM];
struct xrx200_priv *priv;
};
struct xrx200_priv {
struct clk *clk;
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
struct net_device *net_dev;
struct device *dev;
__iomem void *pmac_reg;
};
static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
{
return __raw_readl(priv->pmac_reg + offset);
}
static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
{
__raw_writel(val, priv->pmac_reg + offset);
}
static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
u32 offset)
{
u32 val = xrx200_pmac_r32(priv, offset);
val &= ~(clear);
val |= set;
xrx200_pmac_w32(priv, val, offset);
}
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
int i;
for (i = 0; i < LTQ_DESC_NUM; i++) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
XRX200_DMA_DATA_LEN;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
}
static int xrx200_open(struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
int err;
/* enable clock gate */
err = clk_prepare_enable(priv->clk);
if (err)
return err;
napi_enable(&priv->chan_tx.napi);
ltq_dma_open(&priv->chan_tx.dma);
ltq_dma_enable_irq(&priv->chan_tx.dma);
napi_enable(&priv->chan_rx.napi);
ltq_dma_open(&priv->chan_rx.dma);
/* The boot loader does not always deactivate the receiving of frames
* on the ports and then some packets queue up in the PPE buffers.
* They already passed the PMAC so they do not have the tags
* configured here. Read the these packets here and drop them.
* The HW should have written them into memory after 10us
*/
usleep_range(20, 40);
xrx200_flush_dma(&priv->chan_rx);
ltq_dma_enable_irq(&priv->chan_rx.dma);
netif_wake_queue(net_dev);
return 0;
}
static int xrx200_close(struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
netif_stop_queue(net_dev);
napi_disable(&priv->chan_rx.napi);
ltq_dma_close(&priv->chan_rx.dma);
napi_disable(&priv->chan_tx.napi);
ltq_dma_close(&priv->chan_tx.dma);
clk_disable_unprepare(priv->clk);
return 0;
}
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
XRX200_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev,
ch->dma.desc_base[ch->dma.desc].addr))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ret = -ENOMEM;
goto skip;
}
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
XRX200_DMA_DATA_LEN;
return ret;
}
static int xrx200_hw_receive(struct xrx200_chan *ch)
{
struct xrx200_priv *priv = ch->priv;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
struct sk_buff *skb = ch->skb[ch->dma.desc];
int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
struct net_device *net_dev = priv->net_dev;
int ret;
ret = xrx200_alloc_skb(ch);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, net_dev);
netif_receive_skb(skb);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
return 0;
}
static int xrx200_poll_rx(struct napi_struct *napi, int budget)
{
struct xrx200_chan *ch = container_of(napi,
struct xrx200_chan, napi);
int rx = 0;
int ret;
while (rx < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ret = xrx200_hw_receive(ch);
if (ret)
return ret;
rx++;
} else {
break;
}
}
if (rx < budget) {
napi_complete(&ch->napi);
ltq_dma_enable_irq(&ch->dma);
}
return rx;
}
static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
{
struct xrx200_chan *ch = container_of(napi,
struct xrx200_chan, napi);
struct net_device *net_dev = ch->priv->net_dev;
int pkts = 0;
int bytes = 0;
while (pkts < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
struct sk_buff *skb = ch->skb[ch->tx_free];
pkts++;
bytes += skb->len;
ch->skb[ch->tx_free] = NULL;
consume_skb(skb);
memset(&ch->dma.desc_base[ch->tx_free], 0,
sizeof(struct ltq_dma_desc));
ch->tx_free++;
ch->tx_free %= LTQ_DESC_NUM;
} else {
break;
}
}
net_dev->stats.tx_packets += pkts;
net_dev->stats.tx_bytes += bytes;
netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
if (pkts < budget) {
napi_complete(&ch->napi);
ltq_dma_enable_irq(&ch->dma);
}
return pkts;
}
static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
struct xrx200_chan *ch = &priv->chan_tx;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
u32 byte_offset;
dma_addr_t mapping;
int len;
skb->dev = net_dev;
if (skb_put_padto(skb, ETH_ZLEN)) {
net_dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
len = skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
netdev_err(net_dev, "tx ring full\n");
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
ch->skb[ch->dma.desc] = skb;
mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, mapping)))
goto err_drop;
/* dma needs to start on a 16 byte aligned address */
byte_offset = mapping % 16;
desc->addr = mapping - byte_offset;
/* Make sure the address is written before we give it to HW */
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
if (ch->dma.desc == ch->tx_free)
netif_stop_queue(net_dev);
netdev_sent_queue(net_dev, len);
return NETDEV_TX_OK;
err_drop:
dev_kfree_skb(skb);
net_dev->stats.tx_dropped++;
net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
static const struct net_device_ops xrx200_netdev_ops = {
.ndo_open = xrx200_open,
.ndo_stop = xrx200_close,
.ndo_start_xmit = xrx200_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
{
struct xrx200_chan *ch = ptr;
ltq_dma_disable_irq(&ch->dma);
ltq_dma_ack_irq(&ch->dma);
napi_schedule(&ch->napi);
return IRQ_HANDLED;
}
static int xrx200_dma_init(struct xrx200_priv *priv)
{
struct xrx200_chan *ch_rx = &priv->chan_rx;
struct xrx200_chan *ch_tx = &priv->chan_tx;
int ret = 0;
int i;
ltq_dma_init_port(DMA_PORT_ETOP);
ch_rx->dma.nr = XRX200_DMA_RX;
ch_rx->dma.dev = priv->dev;
ch_rx->priv = priv;
ltq_dma_alloc_rx(&ch_rx->dma);
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
ret = xrx200_alloc_skb(ch_rx);
if (ret)
goto rx_free;
}
ch_rx->dma.desc = 0;
ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
"xrx200_net_rx", &priv->chan_rx);
if (ret) {
dev_err(priv->dev, "failed to request RX irq %d\n",
ch_rx->dma.irq);
goto rx_ring_free;
}
ch_tx->dma.nr = XRX200_DMA_TX;
ch_tx->dma.dev = priv->dev;
ch_tx->priv = priv;
ltq_dma_alloc_tx(&ch_tx->dma);
ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
"xrx200_net_tx", &priv->chan_tx);
if (ret) {
dev_err(priv->dev, "failed to request TX irq %d\n",
ch_tx->dma.irq);
goto tx_free;
}
return ret;
tx_free:
ltq_dma_free(&ch_tx->dma);
rx_ring_free:
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++) {
if (priv->chan_rx.skb[i])
dev_kfree_skb_any(priv->chan_rx.skb[i]);
}
rx_free:
ltq_dma_free(&ch_rx->dma);
return ret;
}
static void xrx200_hw_cleanup(struct xrx200_priv *priv)
{
int i;
ltq_dma_free(&priv->chan_tx.dma);
ltq_dma_free(&priv->chan_rx.dma);
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++)
dev_kfree_skb_any(priv->chan_rx.skb[i]);
}
static int xrx200_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct resource *res;
struct xrx200_priv *priv;
struct net_device *net_dev;
const u8 *mac;
int err;
/* alloc the network device */
net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
if (!net_dev)
return -ENOMEM;
priv = netdev_priv(net_dev);
priv->net_dev = net_dev;
priv->dev = dev;
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
net_dev->max_mtu = XRX200_DMA_DATA_LEN;
/* load the memory ranges */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to get resources\n");
return -ENOENT;
}
priv->pmac_reg = devm_ioremap_resource(dev, res);
if (!priv->pmac_reg) {
dev_err(dev, "failed to request and remap io ranges\n");
return -ENOMEM;
}
priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
if (priv->chan_rx.dma.irq < 0) {
dev_err(dev, "failed to get RX IRQ, %i\n",
priv->chan_rx.dma.irq);
return -ENOENT;
}
priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
if (priv->chan_tx.dma.irq < 0) {
dev_err(dev, "failed to get TX IRQ, %i\n",
priv->chan_tx.dma.irq);
return -ENOENT;
}
/* get the clock */
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get clock\n");
return PTR_ERR(priv->clk);
}
mac = of_get_mac_address(np);
if (mac && is_valid_ether_addr(mac))
ether_addr_copy(net_dev->dev_addr, mac);
else
eth_hw_addr_random(net_dev);
/* bring up the dma engine and IP core */
err = xrx200_dma_init(priv);
if (err)
return err;
/* set IPG to 12 */
xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
/* enable status header, enable CRC */
xrx200_pmac_mask(priv, 0,
PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
PMAC_HD_CTL);
/* setup NAPI */
netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
platform_set_drvdata(pdev, priv);
err = register_netdev(net_dev);
if (err)
goto err_uninit_dma;
return err;
err_uninit_dma:
xrx200_hw_cleanup(priv);
return 0;
}
static int xrx200_remove(struct platform_device *pdev)
{
struct xrx200_priv *priv = platform_get_drvdata(pdev);
struct net_device *net_dev = priv->net_dev;
/* free stack related instances */
netif_stop_queue(net_dev);
netif_napi_del(&priv->chan_tx.napi);
netif_napi_del(&priv->chan_rx.napi);
/* remove the actual device */
unregister_netdev(net_dev);
/* shut down hardware */
xrx200_hw_cleanup(priv);
return 0;
}
static const struct of_device_id xrx200_match[] = {
{ .compatible = "lantiq,xrx200-net" },
{},
};
MODULE_DEVICE_TABLE(of, xrx200_match);
static struct platform_driver xrx200_driver = {
.probe = xrx200_probe,
.remove = xrx200_remove,
.driver = {
.name = "lantiq,xrx200-net",
.of_match_table = xrx200_match,
},
};
module_platform_driver(xrx200_driver);
MODULE_AUTHOR("John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
MODULE_LICENSE("GPL");

View File

@ -35,6 +35,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_BRCM_PREPEND,
DSA_TAG_PROTO_DSA,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_GSWIP,
DSA_TAG_PROTO_KSZ,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,

View File

@ -38,6 +38,9 @@ config NET_DSA_TAG_DSA
config NET_DSA_TAG_EDSA
bool
config NET_DSA_TAG_GSWIP
bool
config NET_DSA_TAG_KSZ
bool

View File

@ -9,6 +9,7 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM_PREPEND) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o

View File

@ -52,6 +52,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_EDSA
[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_GSWIP
[DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_KSZ
[DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
#endif

View File

@ -206,6 +206,9 @@ extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
extern const struct dsa_device_ops edsa_netdev_ops;
/* tag_gswip.c */
extern const struct dsa_device_ops gswip_netdev_ops;
/* tag_ksz.c */
extern const struct dsa_device_ops ksz_netdev_ops;

109
net/dsa/tag_gswip.c 100644
View File

@ -0,0 +1,109 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Intel / Lantiq GSWIP V2.0 PMAC tag support
*
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/dsa.h>
#include "dsa_priv.h"
#define GSWIP_TX_HEADER_LEN 4
/* special tag in TX path header */
/* Byte 0 */
#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */
#define GSWIP_TX_SLPID_CPU 2
#define GSWIP_TX_SLPID_APP1 3
#define GSWIP_TX_SLPID_APP2 4
#define GSWIP_TX_SLPID_APP3 5
#define GSWIP_TX_SLPID_APP4 6
#define GSWIP_TX_SLPID_APP5 7
/* Byte 1 */
#define GSWIP_TX_CRCGEN_DIS BIT(7)
#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */
#define GSWIP_TX_DPID_ELAN 0
#define GSWIP_TX_DPID_EWAN 1
#define GSWIP_TX_DPID_CPU 2
#define GSWIP_TX_DPID_APP1 3
#define GSWIP_TX_DPID_APP2 4
#define GSWIP_TX_DPID_APP3 5
#define GSWIP_TX_DPID_APP4 6
#define GSWIP_TX_DPID_APP5 7
/* Byte 2 */
#define GSWIP_TX_PORT_MAP_EN BIT(7)
#define GSWIP_TX_PORT_MAP_SEL BIT(6)
#define GSWIP_TX_LRN_DIS BIT(5)
#define GSWIP_TX_CLASS_EN BIT(4)
#define GSWIP_TX_CLASS_SHIFT 0
#define GSWIP_TX_CLASS_MASK GENMASK(3, 0)
/* Byte 3 */
#define GSWIP_TX_DPID_EN BIT(0)
#define GSWIP_TX_PORT_MAP_SHIFT 1
#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
#define GSWIP_RX_HEADER_LEN 8
/* special tag in RX path header */
/* Byte 7 */
#define GSWIP_RX_SPPID_SHIFT 4
#define GSWIP_RX_SPPID_MASK GENMASK(6, 4)
static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int err;
u8 *gswip_tag;
err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN);
if (err)
return NULL;
skb_push(skb, GSWIP_TX_HEADER_LEN);
gswip_tag = skb->data;
gswip_tag[0] = GSWIP_TX_SLPID_CPU;
gswip_tag[1] = GSWIP_TX_DPID_ELAN;
gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
gswip_tag[3] |= GSWIP_TX_DPID_EN;
return skb;
}
static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt)
{
int port;
u8 *gswip_tag;
if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
return NULL;
gswip_tag = skb->data - ETH_HLEN;
/* Get source port information */
port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev)
return NULL;
/* remove GSWIP tag */
skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
return skb;
}
const struct dsa_device_ops gswip_netdev_ops = {
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
};