1
0
Fork 0

mvebu dt64 for 5.4 (part 2)

Add support for Turris Mox board (Armada 3720 SoC based)
 -----BEGIN PGP SIGNATURE-----
 
 iF0EABECAB0WIQQYqXDMF3cvSLY+g9cLBhiOFHI71QUCXW5eNQAKCRALBhiOFHI7
 1aqnAJsGU/B3GFbhiuwI4+l3In7TMVVpIgCbB1T/eo1RJhvh9kH9zMj641Rmsk0=
 =B8dQ
 -----END PGP SIGNATURE-----

Merge tag 'mvebu-dt64-5.4-2' of git://git.infradead.org/linux-mvebu into arm/late

mvebu dt64 for 5.4 (part 2)

Add support for Turris Mox board (Armada 3720 SoC based)

* tag 'mvebu-dt64-5.4-2' of git://git.infradead.org/linux-mvebu: (53 commits)
  arm64: dts: marvell: add DTS for Turris Mox
  dt-bindings: marvell: document Turris Mox compatible
  arm64: dts: marvell: armada-37xx: add SPI CS1 pinctrl
  arm64: dts: marvell: Add cpu clock node on Armada 7K/8K
  arm64: dts: marvell: Convert 7k/8k usb-phy properties to phy-supply
  arm64: dts: marvell: Add 7k/8k PHYs in PCIe nodes
  arm64: dts: marvell: Add 7k/8k PHYs in USB3 nodes
  arm64: dts: marvell: Add 7k/8k per-port PHYs in SATA nodes
  arm64: dts: marvell: Add CP110 COMPHY clocks
  arm64: dts: marvell: armada-37xx: add mailbox node
  dt-bindings: gpio: Document GPIOs via Moxtet bus
  drivers: gpio: Add support for GPIOs over Moxtet bus
  bus: moxtet: Add sysfs and debugfs documentation
  dt-bindings: bus: Document moxtet bus binding
  bus: Add support for Moxtet bus
  reset: Add support for resets provided by SCMI
  firmware: arm_scmi: Add RESET protocol in SCMI v2.0
  dt-bindings: arm: Extend SCMI to support new reset protocol
  firmware: arm_scmi: Make use SCMI v2.0 fastchannel for performance protocol
  firmware: arm_scmi: Add discovery of SCMI v2.0 performance fastchannels
  ...

Link: https://lore.kernel.org/r/87h85two0r.fsf@FE-laptop
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
alistair/sunxi64-5.4-dsi
Arnd Bergmann 2019-09-04 17:28:46 +02:00
commit 49826a68b5
53 changed files with 3550 additions and 617 deletions

View File

@ -0,0 +1,23 @@
What: /sys/kernel/debug/moxtet/input
Date: March 2019
KernelVersion: 5.3
Contact: Marek Behún <marek.behun@nic.cz>
Description: (R) Read input from the shift registers, in hexadecimal.
Returns N+1 bytes, where N is the number of Moxtet connected
modules. The first byte is from the CPU board itself.
Example: 101214
10: CPU board with SD card
12: 2 = PCIe module, 1 = IRQ not active
14: 4 = Peridot module, 1 = IRQ not active
What: /sys/kernel/debug/moxtet/output
Date: March 2019
KernelVersion: 5.3
Contact: Marek Behún <marek.behun@nic.cz>
Description: (RW) Read last written value to the shift registers, in
hexadecimal, or write values to the shift registers, also
in hexadecimal.
Example: 0102
01: 01 was last written, or is to be written, to the
first module's shift register
02: the same for second module

View File

@ -0,0 +1,17 @@
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description
Date: March 2019
KernelVersion: 5.3
Contact: Marek Behún <marek.behun@nic.cz>
Description: (R) Moxtet module description. Format: string
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id
Date: March 2019
KernelVersion: 5.3
Contact: Marek Behún <marek.behun@nic.cz>
Description: (R) Moxtet module ID. Format: %x
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name
Date: March 2019
KernelVersion: 5.3
Contact: Marek Behún <marek.behun@nic.cz>
Description: (R) Moxtet module name. Format: string

View File

@ -73,6 +73,16 @@ Required properties:
as used by the firmware. Refer to platform details
for your implementation for the IDs to use.
Reset signal bindings for the reset domains based on SCMI Message Protocol
------------------------------------------------------------
This binding for the SCMI reset domain providers uses the generic reset
signal binding[5].
Required properties:
- #reset-cells : Should be 1. Contains the reset domain ID value used
by SCMI commands.
SRAM and Shared Memory for SCMI
-------------------------------
@ -93,6 +103,7 @@ Required sub-node properties:
[2] Documentation/devicetree/bindings/power/power_domain.txt
[3] Documentation/devicetree/bindings/thermal/thermal.txt
[4] Documentation/devicetree/bindings/sram/sram.txt
[5] Documentation/devicetree/bindings/reset/reset.txt
Example:
@ -152,6 +163,11 @@ firmware {
reg = <0x15>;
#thermal-sensor-cells = <1>;
};
scmi_reset: protocol@16 {
reg = <0x16>;
#reset-cells = <1>;
};
};
};
@ -166,6 +182,7 @@ hdlcd@7ff60000 {
reg = <0 0x7ff60000 0 0x1000>;
clocks = <&scmi_clk 4>;
power-domains = <&scmi_devpd 1>;
resets = <&scmi_reset 10>;
};
thermal-zones {

View File

@ -48,3 +48,11 @@ avs: avs@11500 {
compatible = "marvell,armada-3700-avs", "syscon";
reg = <0x11500 0x40>;
}
CZ.NIC's Turris Mox SOHO router Device Tree Bindings
----------------------------------------------------
Required root node property:
- compatible: must contain "cznic,turris-mox"

View File

@ -0,0 +1,46 @@
Turris Mox module status and configuration bus (over SPI)
Required properties:
- compatible : Should be "cznic,moxtet"
- #address-cells : Has to be 1
- #size-cells : Has to be 0
- spi-cpol : Required inverted clock polarity
- spi-cpha : Required shifted clock phase
- interrupts : Must contain reference to the shared interrupt line
- interrupt-controller : Required
- #interrupt-cells : Has to be 1
For other required and optional properties of SPI slave nodes please refer to
../spi/spi-bus.txt.
Required properties of subnodes:
- reg : Should be position on the Moxtet bus (how many Moxtet
modules are between this module and CPU module, so
either 0 or a positive integer)
The driver finds the devices connected to the bus by itself, but it may be
needed to reference some of them from other parts of the device tree. In that
case the devices can be defined as subnodes of the moxtet node.
Example:
moxtet@1 {
compatible = "cznic,moxtet";
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
spi-max-frequency = <10000000>;
spi-cpol;
spi-cpha;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&gpiosb>;
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
moxtet_sfp: gpio@0 {
compatible = "cznic,moxtet-gpio";
gpio-controller;
#gpio-cells = <2>;
reg = <0>;
}
};

View File

@ -0,0 +1,18 @@
Turris Mox Moxtet GPIO expander via Moxtet bus
Required properties:
- compatible : Should be "cznic,moxtet-gpio".
- gpio-controller : Marks the device node as a GPIO controller.
- #gpio-cells : Should be two. For consumer use see gpio.txt.
Other properties are required for a Moxtet bus device, please refer to
Documentation/devicetree/bindings/bus/moxtet.txt.
Example:
moxtet_sfp: gpio@0 {
compatible = "cznic,moxtet-gpio";
gpio-controller;
#gpio-cells = <2>;
reg = <0>;
}

View File

@ -8,6 +8,7 @@ Required properties:
- compatible:
- For i.MX7 SoCs should be "fsl,imx7d-src", "syscon"
- For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon"
- For i.MX8MM SoCs should be "fsl,imx8mm-src", "fsl,imx8mq-src", "syscon"
- reg: should be register base and length as documented in the
datasheet
- interrupts: Should contain SRC interrupt
@ -46,5 +47,6 @@ Example:
For list of all valid reset indices see
<dt-bindings/reset/imx7-reset.h> for i.MX7 and
<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ
<dt-bindings/reset/imx7-reset.h> for i.MX7,
<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ and
<dt-bindings/reset/imx8mq-reset.h> for i.MX8MM

View File

@ -0,0 +1,30 @@
Synopsys DesignWare Reset controller
=======================================
Please also refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
- compatible: should be one of the following.
"snps,dw-high-reset" - for active high configuration
"snps,dw-low-reset" - for active low configuration
- reg: physical base address of the controller and length of memory mapped
region.
- #reset-cells: must be 1.
example:
dw_rst_1: reset-controller@0000 {
compatible = "snps,dw-high-reset";
reg = <0x0000 0x4>;
#reset-cells = <1>;
};
dw_rst_2: reset-controller@1000 {i
compatible = "snps,dw-low-reset";
reg = <0x1000 0x8>;
#reset-cells = <1>;
};

View File

@ -18,7 +18,8 @@ Required properties:
- reg : offset and length of the device registers.
- bus-frequency : the clock frequency for QUICC Engine.
- fsl,qe-num-riscs: define how many RISC engines the QE has.
- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
- fsl,qe-snums: This property has to be specified as '/bits/ 8' value,
defining the array of serial number (SNUM) values for the virtual
threads.
Optional properties:
@ -34,6 +35,11 @@ Recommended properties
- brg-frequency : the internal clock source frequency for baud-rate
generators in Hz.
Deprecated properties
- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use
for the threads. Use fsl,qe-snums instead to not only specify the
number of snums, but also their values.
Example:
qe@e0100000 {
#address-cells = <1>;
@ -44,6 +50,11 @@ Example:
reg = <e0100000 480>;
brg-frequency = <0>;
bus-frequency = <179A7B00>;
fsl,qe-snums = /bits/ 8 <
0x04 0x05 0x0C 0x0D 0x14 0x15 0x1C 0x1D
0x24 0x25 0x2C 0x2D 0x34 0x35 0x88 0x89
0x98 0x99 0xA8 0xA9 0xB8 0xB9 0xC8 0xC9
0xD8 0xD9 0xE8 0xE9>;
}
* Multi-User RAM (MURAM)

View File

@ -1626,6 +1626,18 @@ F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
X: drivers/gnss
ARM/CZ.NIC TURRIS MOX SUPPORT
M: Marek Behun <marek.behun@nic.cz>
W: http://mox.turris.cz
S: Maintained
F: Documentation/ABI/testing/debugfs-moxtet
F: Documentation/ABI/testing/sysfs-bus-moxtet-devices
F: Documentation/devicetree/bindings/bus/moxtet.txt
F: Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
F: include/linux/moxtet.h
F: drivers/bus/moxtet.c
F: drivers/gpio/gpio-moxtet.c
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -15577,6 +15589,7 @@ F: drivers/clk/clk-sc[mp]i.c
F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scpi.c
F: drivers/firmware/arm_scmi/
F: drivers/reset/reset-scmi.c
F: include/linux/sc[mp]i_protocol.h
SYSTEM RESET/SHUTDOWN DRIVERS

View File

@ -2,6 +2,7 @@
# Mvebu SoC Family
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-turris-mox.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-uDPU.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-clearfog-gt-8k.dtb

View File

@ -0,0 +1,840 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for CZ.NIC Turris Mox Board
* 2019 by Marek Behun <marek.behun@nic.cz>
*/
/dts-v1/;
#include <dt-bindings/bus/moxtet.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include "armada-372x.dtsi"
/ {
model = "CZ.NIC Turris Mox Board";
compatible = "cznic,turris-mox", "marvell,armada3720",
"marvell,armada3710";
aliases {
spi0 = &spi0;
ethernet1 = &eth1;
};
chosen {
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
};
leds {
compatible = "gpio-leds";
red {
label = "mox:red:activity";
gpios = <&gpiosb 21 GPIO_ACTIVE_LOW>;
linux,default-trigger = "default-on";
};
};
gpio-keys {
compatible = "gpio-keys";
reset {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpiosb 20 GPIO_ACTIVE_LOW>;
debounce-interval = <60>;
};
};
exp_usb3_vbus: usb3-vbus {
compatible = "regulator-fixed";
regulator-name = "usb3-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
regulator-always-on;
gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
};
usb3_phy: usb3-phy {
compatible = "usb-nop-xceiv";
vcc-supply = <&exp_usb3_vbus>;
};
vsdc_reg: vsdc-reg {
compatible = "regulator-gpio";
regulator-name = "vsdc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
gpios = <&gpiosb 23 GPIO_ACTIVE_HIGH>;
gpios-states = <0>;
states = <1800000 0x1
3300000 0x0>;
enable-active-high;
};
vsdio_reg: vsdio-reg {
compatible = "regulator-gpio";
regulator-name = "vsdio";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
gpios = <&gpiosb 22 GPIO_ACTIVE_HIGH>;
gpios-states = <0>;
states = <1800000 0x1
3300000 0x0>;
enable-active-high;
};
sdhci1_pwrseq: sdhci1-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpionb 19 GPIO_ACTIVE_HIGH>;
status = "okay";
};
sfp: sfp {
compatible = "sff,sfp+";
i2c-bus = <&i2c0>;
los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>;
tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>;
mod-def0-gpio = <&moxtet_sfp 2 GPIO_ACTIVE_LOW>;
tx-disable-gpio = <&moxtet_sfp 4 GPIO_ACTIVE_HIGH>;
rate-select0-gpio = <&moxtet_sfp 5 GPIO_ACTIVE_HIGH>;
/* enabled by U-Boot if SFP module is present */
status = "disabled";
};
};
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <100000>;
status = "okay";
rtc@6f {
compatible = "microchip,mcp7940x";
reg = <0x6f>;
};
};
&pcie_reset_pins {
function = "gpio";
};
&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
status = "okay";
max-link-speed = <2>;
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
phys = <&comphy1 0>;
/* enabled by U-Boot if PCIe module is present */
status = "disabled";
};
&uart0 {
status = "okay";
};
&eth0 {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
phy-mode = "rgmii-id";
phy = <&phy1>;
status = "okay";
};
&eth1 {
phy-mode = "2500base-x";
managed = "in-band-status";
phys = <&comphy0 1>;
};
&sdhci0 {
wp-inverted;
bus-width = <4>;
cd-gpios = <&gpionb 10 GPIO_ACTIVE_HIGH>;
vqmmc-supply = <&vsdc_reg>;
marvell,pad-type = "sd";
status = "okay";
};
&sdhci1 {
pinctrl-names = "default";
pinctrl-0 = <&sdio_pins>;
non-removable;
bus-width = <4>;
marvell,pad-type = "sd";
vqmmc-supply = <&vsdio_reg>;
mmc-pwrseq = <&sdhci1_pwrseq>;
status = "okay";
};
&spi0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_quad_pins &spi_cs1_pins>;
assigned-clocks = <&nb_periph_clk 7>;
assigned-clock-parents = <&tbg 1>;
assigned-clock-rates = <20000000>;
spi-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
label = "secure-firmware";
reg = <0x0 0x20000>;
};
partition@20000 {
label = "u-boot";
reg = <0x20000 0x160000>;
};
partition@180000 {
label = "u-boot-env";
reg = <0x180000 0x10000>;
};
partition@190000 {
label = "Rescue system";
reg = <0x190000 0x660000>;
};
partition@7f0000 {
label = "dtb";
reg = <0x7f0000 0x10000>;
};
};
};
moxtet: moxtet@1 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "cznic,moxtet";
reg = <1>;
reset-gpios = <&gpiosb 2 GPIO_ACTIVE_LOW>;
spi-max-frequency = <10000000>;
spi-cpol;
spi-cpha;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&gpiosb>;
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
status = "okay";
moxtet_sfp: gpio@0 {
compatible = "cznic,moxtet-gpio";
gpio-controller;
#gpio-cells = <2>;
reg = <0>;
status = "disabled";
};
};
};
&usb2 {
status = "okay";
};
&usb3 {
status = "okay";
phys = <&comphy2 0>;
usb-phy = <&usb3_phy>;
};
&mdio {
pinctrl-names = "default";
pinctrl-0 = <&smi_pins>;
status = "okay";
phy1: ethernet-phy@1 {
reg = <1>;
};
/* switch nodes are enabled by U-Boot if modules are present */
switch0@10 {
compatible = "marvell,mv88e6190";
reg = <0x10 0>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(0)>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch0phy1: switch0phy1@1 {
reg = <0x1>;
};
switch0phy2: switch0phy2@2 {
reg = <0x2>;
};
switch0phy3: switch0phy3@3 {
reg = <0x3>;
};
switch0phy4: switch0phy4@4 {
reg = <0x4>;
};
switch0phy5: switch0phy5@5 {
reg = <0x5>;
};
switch0phy6: switch0phy6@6 {
reg = <0x6>;
};
switch0phy7: switch0phy7@7 {
reg = <0x7>;
};
switch0phy8: switch0phy8@8 {
reg = <0x8>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan1";
phy-handle = <&switch0phy1>;
};
port@2 {
reg = <0x2>;
label = "lan2";
phy-handle = <&switch0phy2>;
};
port@3 {
reg = <0x3>;
label = "lan3";
phy-handle = <&switch0phy3>;
};
port@4 {
reg = <0x4>;
label = "lan4";
phy-handle = <&switch0phy4>;
};
port@5 {
reg = <0x5>;
label = "lan5";
phy-handle = <&switch0phy5>;
};
port@6 {
reg = <0x6>;
label = "lan6";
phy-handle = <&switch0phy6>;
};
port@7 {
reg = <0x7>;
label = "lan7";
phy-handle = <&switch0phy7>;
};
port@8 {
reg = <0x8>;
label = "lan8";
phy-handle = <&switch0phy8>;
};
port@9 {
reg = <0x9>;
label = "cpu";
ethernet = <&eth1>;
phy-mode = "2500base-x";
managed = "in-band-status";
};
switch0port10: port@a {
reg = <0xa>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch1port9 &switch2port9>;
status = "disabled";
};
port-sfp@a {
reg = <0xa>;
label = "sfp";
sfp = <&sfp>;
phy-mode = "sgmii";
managed = "in-band-status";
status = "disabled";
};
};
};
switch0@2 {
compatible = "marvell,mv88e6085";
reg = <0x2 0>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch0phy1_topaz: switch0phy1@11 {
reg = <0x11>;
};
switch0phy2_topaz: switch0phy2@12 {
reg = <0x12>;
};
switch0phy3_topaz: switch0phy3@13 {
reg = <0x13>;
};
switch0phy4_topaz: switch0phy4@14 {
reg = <0x14>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan1";
phy-handle = <&switch0phy1_topaz>;
};
port@2 {
reg = <0x2>;
label = "lan2";
phy-handle = <&switch0phy2_topaz>;
};
port@3 {
reg = <0x3>;
label = "lan3";
phy-handle = <&switch0phy3_topaz>;
};
port@4 {
reg = <0x4>;
label = "lan4";
phy-handle = <&switch0phy4_topaz>;
};
port@5 {
reg = <0x5>;
label = "cpu";
phy-mode = "2500base-x";
managed = "in-band-status";
ethernet = <&eth1>;
};
};
};
switch1@11 {
compatible = "marvell,mv88e6190";
reg = <0x11 0>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(1)>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy1: switch1phy1@1 {
reg = <0x1>;
};
switch1phy2: switch1phy2@2 {
reg = <0x2>;
};
switch1phy3: switch1phy3@3 {
reg = <0x3>;
};
switch1phy4: switch1phy4@4 {
reg = <0x4>;
};
switch1phy5: switch1phy5@5 {
reg = <0x5>;
};
switch1phy6: switch1phy6@6 {
reg = <0x6>;
};
switch1phy7: switch1phy7@7 {
reg = <0x7>;
};
switch1phy8: switch1phy8@8 {
reg = <0x8>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan9";
phy-handle = <&switch1phy1>;
};
port@2 {
reg = <0x2>;
label = "lan10";
phy-handle = <&switch1phy2>;
};
port@3 {
reg = <0x3>;
label = "lan11";
phy-handle = <&switch1phy3>;
};
port@4 {
reg = <0x4>;
label = "lan12";
phy-handle = <&switch1phy4>;
};
port@5 {
reg = <0x5>;
label = "lan13";
phy-handle = <&switch1phy5>;
};
port@6 {
reg = <0x6>;
label = "lan14";
phy-handle = <&switch1phy6>;
};
port@7 {
reg = <0x7>;
label = "lan15";
phy-handle = <&switch1phy7>;
};
port@8 {
reg = <0x8>;
label = "lan16";
phy-handle = <&switch1phy8>;
};
switch1port9: port@9 {
reg = <0x9>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch0port10>;
};
switch1port10: port@a {
reg = <0xa>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch2port9>;
status = "disabled";
};
port-sfp@a {
reg = <0xa>;
label = "sfp";
sfp = <&sfp>;
phy-mode = "sgmii";
managed = "in-band-status";
status = "disabled";
};
};
};
switch1@2 {
compatible = "marvell,mv88e6085";
reg = <0x2 0>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy1_topaz: switch1phy1@11 {
reg = <0x11>;
};
switch1phy2_topaz: switch1phy2@12 {
reg = <0x12>;
};
switch1phy3_topaz: switch1phy3@13 {
reg = <0x13>;
};
switch1phy4_topaz: switch1phy4@14 {
reg = <0x14>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan9";
phy-handle = <&switch1phy1_topaz>;
};
port@2 {
reg = <0x2>;
label = "lan10";
phy-handle = <&switch1phy2_topaz>;
};
port@3 {
reg = <0x3>;
label = "lan11";
phy-handle = <&switch1phy3_topaz>;
};
port@4 {
reg = <0x4>;
label = "lan12";
phy-handle = <&switch1phy4_topaz>;
};
port@5 {
reg = <0x5>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch0port10>;
};
};
};
switch2@12 {
compatible = "marvell,mv88e6190";
reg = <0x12 0>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(2)>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch2phy1: switch2phy1@1 {
reg = <0x1>;
};
switch2phy2: switch2phy2@2 {
reg = <0x2>;
};
switch2phy3: switch2phy3@3 {
reg = <0x3>;
};
switch2phy4: switch2phy4@4 {
reg = <0x4>;
};
switch2phy5: switch2phy5@5 {
reg = <0x5>;
};
switch2phy6: switch2phy6@6 {
reg = <0x6>;
};
switch2phy7: switch2phy7@7 {
reg = <0x7>;
};
switch2phy8: switch2phy8@8 {
reg = <0x8>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan17";
phy-handle = <&switch2phy1>;
};
port@2 {
reg = <0x2>;
label = "lan18";
phy-handle = <&switch2phy2>;
};
port@3 {
reg = <0x3>;
label = "lan19";
phy-handle = <&switch2phy3>;
};
port@4 {
reg = <0x4>;
label = "lan20";
phy-handle = <&switch2phy4>;
};
port@5 {
reg = <0x5>;
label = "lan21";
phy-handle = <&switch2phy5>;
};
port@6 {
reg = <0x6>;
label = "lan22";
phy-handle = <&switch2phy6>;
};
port@7 {
reg = <0x7>;
label = "lan23";
phy-handle = <&switch2phy7>;
};
port@8 {
reg = <0x8>;
label = "lan24";
phy-handle = <&switch2phy8>;
};
switch2port9: port@9 {
reg = <0x9>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch1port10 &switch0port10>;
};
port-sfp@a {
reg = <0xa>;
label = "sfp";
sfp = <&sfp>;
phy-mode = "sgmii";
managed = "in-band-status";
status = "disabled";
};
};
};
switch2@2 {
compatible = "marvell,mv88e6085";
reg = <0x2 0>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
status = "disabled";
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch2phy1_topaz: switch2phy1@11 {
reg = <0x11>;
};
switch2phy2_topaz: switch2phy2@12 {
reg = <0x12>;
};
switch2phy3_topaz: switch2phy3@13 {
reg = <0x13>;
};
switch2phy4_topaz: switch2phy4@14 {
reg = <0x14>;
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@1 {
reg = <0x1>;
label = "lan17";
phy-handle = <&switch2phy1_topaz>;
};
port@2 {
reg = <0x2>;
label = "lan18";
phy-handle = <&switch2phy2_topaz>;
};
port@3 {
reg = <0x3>;
label = "lan19";
phy-handle = <&switch2phy3_topaz>;
};
port@4 {
reg = <0x4>;
label = "lan20";
phy-handle = <&switch2phy4_topaz>;
};
port@5 {
reg = <0x5>;
label = "dsa";
phy-mode = "2500base-x";
managed = "in-band-status";
link = <&switch1port10 &switch0port10>;
};
};
};
};

View File

@ -215,6 +215,11 @@
function = "spi";
};
spi_cs1_pins: spi-cs1-pins {
groups = "spi_cs1";
function = "spi";
};
i2c1_pins: i2c1-pins {
groups = "i2c1";
function = "i2c";
@ -419,6 +424,13 @@
clocks = <&nb_periph_clk 15>;
};
rwtm: mailbox@b0000 {
compatible = "marvell,armada-3700-rwtm-mailbox";
reg = <0xb0000 0x100>;
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
};
sdhci1: sdhci@d0000 {
compatible = "marvell,armada-3700-sdhci",
"marvell,sdhci-xenon";

View File

@ -73,16 +73,6 @@
gpio = <&expander0 1 GPIO_ACTIVE_HIGH>;
vin-supply = <&cp0_exp_usb3_1_current_regulator>;
};
cp0_usb3_0_phy: cp0-usb3-0-phy {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_0_vbus>;
};
cp0_usb3_1_phy: cp0-usb3-1-phy {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_1_vbus>;
};
};
&i2c0 {
@ -124,6 +114,8 @@
&cp0_pcie2 {
status = "okay";
phys = <&cp0_comphy5 2>;
phy-names = "cp0-pcie2-x1-phy";
};
&cp0_i2c0 {
@ -219,15 +211,36 @@
&cp0_sata0 {
status = "okay";
sata-port@1 {
phys = <&cp0_comphy3 1>;
phy-names = "cp0-sata0-1-phy";
};
};
&cp0_comphy1 {
cp0_usbh0_con: connector {
compatible = "usb-a-connector";
phy-supply = <&cp0_reg_usb3_0_vbus>;
};
};
&cp0_usb3_0 {
usb-phy = <&cp0_usb3_0_phy>;
phys = <&cp0_comphy1 0>;
phy-names = "cp0-usb3h0-comphy";
status = "okay";
};
&cp0_comphy4 {
cp0_usbh1_con: connector {
compatible = "usb-a-connector";
phy-supply = <&cp0_reg_usb3_1_vbus>;
};
};
&cp0_usb3_1 {
usb-phy = <&cp0_usb3_1_phy>;
phys = <&cp0_comphy4 1>;
phy-names = "cp0-usb3h1-comphy";
status = "okay";
};

View File

@ -51,11 +51,6 @@
status = "okay";
};
usb3h0_phy: usb3_phy0 {
compatible = "usb-nop-xceiv";
vcc-supply = <&v_5v0_usb3_hst_vbus>;
};
sfp_cp0_eth0: sfp-cp0-eth0 {
compatible = "sff,sfp";
i2c-bus = <&cp0_i2c1>;
@ -243,6 +238,8 @@
pinctrl-names = "default";
pinctrl-0 = <&cp0_pci0_reset_pins &cp0_wlan_disable_pins>;
reset-gpios = <&cp0_gpio2 0 GPIO_ACTIVE_LOW>;
phys = <&cp0_comphy0 0>;
phy-names = "cp0-pcie0-x1-phy";
status = "okay";
};
@ -348,6 +345,11 @@
&cp1_sata0 {
pinctrl-0 = <&cp0_pci1_reset_pins>;
status = "okay";
sata-port@1 {
phys = <&cp1_comphy0 1>;
phy-names = "cp1-sata0-1-phy";
};
};
&cp1_mdio {
@ -467,7 +469,15 @@
};
};
&cp1_comphy2 {
cp1_usbh0_con: connector {
compatible = "usb-a-connector";
phy-supply = <&v_5v0_usb3_hst_vbus>;
};
};
&cp1_usb3_0 {
usb-phy = <&usb3h0_phy>;
phys = <&cp1_comphy2 0>;
phy-names = "cp1-usb3h0-comphy";
status = "okay";
};

View File

@ -54,11 +54,6 @@
vcc-supply = <&cp0_reg_usb3_0_vbus>;
};
cp0_usb3_1_phy: cp0-usb3-1-phy {
compatible = "usb-nop-xceiv";
vcc-supply = <&cp0_reg_usb3_1_vbus>;
};
cp1_reg_usb3_0_vbus: cp1-usb3-0-vbus {
compatible = "regulator-fixed";
regulator-name = "cp1-usb3h0-vbus";
@ -108,11 +103,15 @@
/* CON6 on CP0 expansion */
&cp0_pcie0 {
phys = <&cp0_comphy0 0>;
phy-names = "cp0-pcie0-x1-phy";
status = "okay";
};
/* CON5 on CP0 expansion */
&cp0_pcie2 {
phys = <&cp0_comphy5 2>;
phy-names = "cp0-pcie2-x1-phy";
status = "okay";
};
@ -143,6 +142,15 @@
/* CON4 on CP0 expansion */
&cp0_sata0 {
status = "okay";
sata-port@0 {
phys = <&cp0_comphy1 0>;
phy-names = "cp0-sata0-0-phy";
};
sata-port@1 {
phys = <&cp0_comphy3 1>;
phy-names = "cp0-sata0-1-phy";
};
};
/* CON9 on CP0 expansion */
@ -151,9 +159,17 @@
status = "okay";
};
&cp0_comphy4 {
cp0_usbh1_con: connector {
compatible = "usb-a-connector";
phy-supply = <&cp0_reg_usb3_1_vbus>;
};
};
/* CON10 on CP0 expansion */
&cp0_usb3_1 {
usb-phy = <&cp0_usb3_1_phy>;
phys = <&cp0_comphy4 1>;
phy-names = "cp0-usb3h1-comphy";
status = "okay";
};
@ -187,16 +203,22 @@
/* CON6 on CP1 expansion */
&cp1_pcie0 {
phys = <&cp1_comphy0 0>;
phy-names = "cp1-pcie0-x1-phy";
status = "okay";
};
/* CON7 on CP1 expansion */
&cp1_pcie1 {
phys = <&cp1_comphy4 1>;
phy-names = "cp1-pcie1-x1-phy";
status = "okay";
};
/* CON5 on CP1 expansion */
&cp1_pcie2 {
phys = <&cp1_comphy5 2>;
phy-names = "cp1-pcie2-x1-phy";
status = "okay";
};
@ -273,6 +295,15 @@
/* CON4 on CP1 expansion */
&cp1_sata0 {
status = "okay";
sata-port@0 {
phys = <&cp1_comphy1 0>;
phy-names = "cp1-sata0-0-phy";
};
sata-port@1 {
phys = <&cp1_comphy3 1>;
phy-names = "cp1-sata0-1-phy";
};
};
/* CON9 on CP1 expansion */

View File

@ -61,11 +61,6 @@
status = "okay";
};
usb3h0_phy: usb3_phy0 {
compatible = "usb-nop-xceiv";
vcc-supply = <&v_5v0_usb3_hst_vbus>;
};
sfp_eth0: sfp-eth0 {
/* CON15,16 - CPM lane 4 */
compatible = "sff,sfp";
@ -186,6 +181,10 @@
reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
ranges = <0x81000000 0x0 0xf9010000 0x0 0xf9010000 0x0 0x10000
0x82000000 0x0 0xc0000000 0x0 0xc0000000 0x0 0x20000000>;
phys = <&cp0_comphy0 0>, <&cp0_comphy1 0>,
<&cp0_comphy2 0>, <&cp0_comphy3 0>;
phy-names = "cp0-pcie0-x4-lane0-phy", "cp0-pcie0-x4-lane1-phy",
"cp0-pcie0-x4-lane2-phy", "cp0-pcie0-x4-lane3-phy";
status = "okay";
};
@ -239,8 +238,13 @@
};
&cp0_sata0 {
/* CPM Lane 0 - U29 */
status = "okay";
/* CPM Lane 5 - U29 */
sata-port@1 {
phys = <&cp0_comphy5 1>;
phy-names = "cp0-sata0-1-phy";
};
};
&cp0_sdhci0 {
@ -324,9 +328,19 @@
};
&cp1_sata0 {
/* CPS Lane 1 - U32 */
/* CPS Lane 3 - U31 */
status = "okay";
/* CPS Lane 1 - U32 */
sata-port@0 {
phys = <&cp1_comphy1 0>;
phy-names = "cp1-sata0-0-phy";
};
/* CPS Lane 3 - U31 */
sata-port@1 {
phys = <&cp1_comphy3 1>;
phy-names = "cp1-sata0-1-phy";
};
};
&cp1_spi1 {
@ -341,8 +355,16 @@
};
};
&cp1_comphy2 {
cp1_usbh0_con: connector {
compatible = "usb-a-connector";
phy-supply = <&v_5v0_usb3_hst_vbus>;
};
};
&cp1_usb3_0 {
/* CPS Lane 2 - CON7 */
usb-phy = <&usb3h0_phy>;
phys = <&cp1_comphy2 0>;
phy-names = "cp1-usb3h0-comphy";
status = "okay";
};

View File

@ -21,6 +21,7 @@
reg = <0x000>;
enable-method = "psci";
#cooling-cells = <2>;
clocks = <&cpu_clk 0>;
};
cpu1: cpu@1 {
device_type = "cpu";
@ -28,6 +29,7 @@
reg = <0x001>;
enable-method = "psci";
#cooling-cells = <2>;
clocks = <&cpu_clk 0>;
};
cpu2: cpu@100 {
device_type = "cpu";
@ -35,6 +37,7 @@
reg = <0x100>;
enable-method = "psci";
#cooling-cells = <2>;
clocks = <&cpu_clk 1>;
};
cpu3: cpu@101 {
device_type = "cpu";
@ -42,7 +45,7 @@
reg = <0x101>;
enable-method = "psci";
#cooling-cells = <2>;
clocks = <&cpu_clk 1>;
};
};
};

View File

@ -280,6 +280,13 @@
#address-cells = <1>;
#size-cells = <1>;
cpu_clk: clock-cpu@278 {
compatible = "marvell,ap806-cpu-clock";
clocks = <&ap_clk 0>, <&ap_clk 1>;
#clock-cells = <1>;
reg = <0x278 0xa30>;
};
ap_thermal: thermal-sensor@80 {
compatible = "marvell,armada-ap806-thermal";
reg = <0x80 0x10>;

View File

@ -133,6 +133,9 @@
compatible = "marvell,comphy-cp110";
reg = <0x120000 0x6000>;
marvell,system-controller = <&CP110_LABEL(syscon0)>;
clocks = <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
<&CP110_LABEL(clk) 1 18>;
clock-names = "mg_clk", "mg_core_clk", "axi_clk";
#address-cells = <1>;
#size-cells = <0>;
@ -306,7 +309,17 @@
interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CP110_LABEL(clk) 1 15>,
<&CP110_LABEL(clk) 1 16>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
sata-port@0 {
reg = <0>;
};
sata-port@1 {
reg = <1>;
};
};
CP110_LABEL(xor0): xor@6a0000 {

View File

@ -29,6 +29,16 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
help
Say yes here to add support for the module configuration bus found
on CZ.NIC's Turris Mox. This is needed for the ability to discover
the order in which the modules are connected and to get/set some of
their settings. For example the GPIOs on Mox SFP module are
configured through this bus.
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
depends on ARM64 && (ARCH_HISI || COMPILE_TEST)

View File

@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/

View File

@ -0,0 +1,886 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Turris Mox module configuration bus driver
*
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
*/
#include <dt-bindings/bus/moxtet.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moxtet.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
/*
* @name: module name for sysfs
* @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
* @nirqs: how many interrupts does the shift register provide
* @desc: module description for kernel log
*/
static const struct {
const char *name;
int hwirq_base;
int nirqs;
const char *desc;
} mox_module_table[] = {
/* do not change order of this array! */
{ NULL, 0, 0, NULL },
{ "sfp", -1, 0, "MOX D (SFP cage)" },
{ "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
{ "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
{ "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
{ "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
{ "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
};
static inline bool mox_module_known(unsigned int id)
{
return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
}
static inline const char *mox_module_name(unsigned int id)
{
if (mox_module_known(id))
return mox_module_table[id].name;
else
return "unknown";
}
#define DEF_MODULE_ATTR(name, fmt, ...) \
static ssize_t \
module_##name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct moxtet_device *mdev = to_moxtet_device(dev); \
return sprintf(buf, (fmt), __VA_ARGS__); \
} \
static DEVICE_ATTR_RO(module_##name)
DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
DEF_MODULE_ATTR(description, "%s\n",
mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
: "");
static struct attribute *moxtet_dev_attrs[] = {
&dev_attr_module_id.attr,
&dev_attr_module_name.attr,
&dev_attr_module_description.attr,
NULL,
};
static const struct attribute_group moxtet_dev_group = {
.attrs = moxtet_dev_attrs,
};
static const struct attribute_group *moxtet_dev_groups[] = {
&moxtet_dev_group,
NULL,
};
static int moxtet_match(struct device *dev, struct device_driver *drv)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet_driver *tdrv = to_moxtet_driver(drv);
const enum turris_mox_module_id *t;
if (of_driver_match_device(dev, drv))
return 1;
if (!tdrv->id_table)
return 0;
for (t = tdrv->id_table; *t; ++t)
if (*t == mdev->id)
return 1;
return 0;
}
struct bus_type moxtet_bus_type = {
.name = "moxtet",
.dev_groups = moxtet_dev_groups,
.match = moxtet_match,
};
EXPORT_SYMBOL_GPL(moxtet_bus_type);
int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv)
{
mdrv->driver.owner = owner;
mdrv->driver.bus = &moxtet_bus_type;
return driver_register(&mdrv->driver);
}
EXPORT_SYMBOL_GPL(__moxtet_register_driver);
static int moxtet_dev_check(struct device *dev, void *data)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet_device *new_dev = data;
if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
mdev->idx == new_dev->idx)
return -EBUSY;
return 0;
}
static void moxtet_dev_release(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
put_device(mdev->moxtet->dev);
kfree(mdev);
}
static struct moxtet_device *
moxtet_alloc_device(struct moxtet *moxtet)
{
struct moxtet_device *dev;
if (!get_device(moxtet->dev))
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
put_device(moxtet->dev);
return NULL;
}
dev->moxtet = moxtet;
dev->dev.parent = moxtet->dev;
dev->dev.bus = &moxtet_bus_type;
dev->dev.release = moxtet_dev_release;
device_initialize(&dev->dev);
return dev;
}
static int moxtet_add_device(struct moxtet_device *dev)
{
static DEFINE_MUTEX(add_mutex);
int ret;
if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
return -EINVAL;
dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
dev->idx);
mutex_lock(&add_mutex);
ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
moxtet_dev_check);
if (ret)
goto done;
ret = device_add(&dev->dev);
if (ret < 0)
dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
dev_name(dev->moxtet->dev), ret);
done:
mutex_unlock(&add_mutex);
return ret;
}
static int __unregister(struct device *dev, void *null)
{
if (dev->of_node) {
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_put(dev->of_node);
}
device_unregister(dev);
return 0;
}
static struct moxtet_device *
of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
{
struct moxtet_device *dev;
u32 val;
int ret;
dev = moxtet_alloc_device(moxtet);
if (!dev) {
dev_err(moxtet->dev,
"Moxtet device alloc error for %pOF\n", nc);
return ERR_PTR(-ENOMEM);
}
ret = of_property_read_u32(nc, "reg", &val);
if (ret) {
dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, ret);
goto err_put;
}
dev->idx = val;
if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
nc, dev->idx);
ret = -EINVAL;
goto err_put;
}
dev->id = moxtet->modules[dev->idx];
if (!dev->id) {
dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
dev->idx);
ret = -ENODEV;
goto err_put;
}
of_node_get(nc);
dev->dev.of_node = nc;
ret = moxtet_add_device(dev);
if (ret) {
dev_err(moxtet->dev,
"Moxtet device register error for %pOF\n", nc);
of_node_put(nc);
goto err_put;
}
return dev;
err_put:
put_device(&dev->dev);
return ERR_PTR(ret);
}
static void of_register_moxtet_devices(struct moxtet *moxtet)
{
struct moxtet_device *dev;
struct device_node *nc;
if (!moxtet->dev->of_node)
return;
for_each_available_child_of_node(moxtet->dev->of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
dev = of_register_moxtet_device(moxtet, nc);
if (IS_ERR(dev)) {
dev_warn(moxtet->dev,
"Failed to create Moxtet device for %pOF\n",
nc);
of_node_clear_flag(nc, OF_POPULATED);
}
}
}
static void
moxtet_register_devices_from_topology(struct moxtet *moxtet)
{
struct moxtet_device *dev;
int i, ret;
for (i = 0; i < moxtet->count; ++i) {
dev = moxtet_alloc_device(moxtet);
if (!dev) {
dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
i);
continue;
}
dev->idx = i;
dev->id = moxtet->modules[i];
ret = moxtet_add_device(dev);
if (ret && ret != -EBUSY) {
put_device(&dev->dev);
dev_err(moxtet->dev,
"Moxtet device %u register error: %i\n", i,
ret);
}
}
}
/*
* @nsame: how many modules with same id are already in moxtet->modules
*/
static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
{
int i, first;
struct moxtet_irqpos *pos;
first = mox_module_table[id].hwirq_base +
nsame * mox_module_table[id].nirqs;
if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
return -EINVAL;
for (i = 0; i < mox_module_table[id].nirqs; ++i) {
pos = &moxtet->irq.position[first + i];
pos->idx = idx;
pos->bit = i;
moxtet->irq.exists |= BIT(first + i);
}
return 0;
}
static int moxtet_find_topology(struct moxtet *moxtet)
{
u8 buf[TURRIS_MOX_MAX_MODULES];
int cnts[TURRIS_MOX_MODULE_LAST];
int i, ret;
memset(cnts, 0, sizeof(cnts));
ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
if (ret < 0)
return ret;
if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
} else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
} else {
dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
buf[0]);
return -ENODEV;
}
moxtet->count = 0;
for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
int id;
if (buf[i] == 0xff)
break;
id = buf[i] & 0xf;
moxtet->modules[i-1] = id;
++moxtet->count;
if (mox_module_known(id)) {
dev_info(moxtet->dev, "Found %s module\n",
mox_module_table[id].desc);
if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
dev_err(moxtet->dev,
" Cannot set IRQ for module %s\n",
mox_module_table[id].desc);
} else {
dev_warn(moxtet->dev,
"Unknown Moxtet module found (ID 0x%02x)\n",
id);
}
}
return 0;
}
static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
{
struct spi_transfer xfer = {
.rx_buf = buf,
.tx_buf = moxtet->tx,
.len = moxtet->count + 1
};
int ret;
mutex_lock(&moxtet->lock);
ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
mutex_unlock(&moxtet->lock);
return ret;
}
int moxtet_device_read(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
u8 buf[TURRIS_MOX_MAX_MODULES];
int ret;
if (mdev->idx >= moxtet->count)
return -EINVAL;
ret = moxtet_spi_read(moxtet, buf);
if (ret < 0)
return ret;
return buf[mdev->idx + 1] >> 4;
}
EXPORT_SYMBOL_GPL(moxtet_device_read);
int moxtet_device_write(struct device *dev, u8 val)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
int ret;
if (mdev->idx >= moxtet->count)
return -EINVAL;
mutex_lock(&moxtet->lock);
moxtet->tx[moxtet->count - mdev->idx] = val;
ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
moxtet->count + 1);
mutex_unlock(&moxtet->lock);
return ret;
}
EXPORT_SYMBOL_GPL(moxtet_device_write);
int moxtet_device_written(struct device *dev)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
struct moxtet *moxtet = mdev->moxtet;
if (mdev->idx >= moxtet->count)
return -EINVAL;
return moxtet->tx[moxtet->count - mdev->idx];
}
EXPORT_SYMBOL_GPL(moxtet_device_written);
#ifdef CONFIG_DEBUG_FS
static int moxtet_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return nonseekable_open(inode, file);
}
static ssize_t input_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
u8 hex[sizeof(buf) * 2 + 1];
int ret, n;
ret = moxtet_spi_read(moxtet, bin);
if (ret < 0)
return ret;
n = moxtet->count + 1;
bin2hex(hex, bin, n);
hex[2*n] = '\n';
return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
}
static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
.llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
u8 *p = hex;
int i;
mutex_lock(&moxtet->lock);
for (i = 0; i < moxtet->count; ++i)
p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
mutex_unlock(&moxtet->lock);
*p++ = '\n';
return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
}
static ssize_t output_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
u8 hex[sizeof(bin) * 2 + 1];
size_t res;
loff_t dummy = 0;
int err, i;
if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
return -EINVAL;
res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
if (res < 0)
return res;
if (len % 2 == 1 && hex[len - 1] != '\n')
return -EINVAL;
err = hex2bin(bin, hex, moxtet->count);
if (err < 0)
return -EINVAL;
mutex_lock(&moxtet->lock);
for (i = 0; i < moxtet->count; ++i)
moxtet->tx[moxtet->count - i] = bin[i];
err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
moxtet->count + 1);
mutex_unlock(&moxtet->lock);
return err < 0 ? err : len;
}
static const struct file_operations output_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
.llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
{
struct dentry *root, *entry;
root = debugfs_create_dir("moxtet", NULL);
if (IS_ERR(root))
return PTR_ERR(root);
entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
&input_fops);
if (IS_ERR(entry))
goto err_remove;
entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
&output_fops);
if (IS_ERR(entry))
goto err_remove;
moxtet->debugfs_root = root;
return 0;
err_remove:
debugfs_remove_recursive(root);
return PTR_ERR(entry);
}
static void moxtet_unregister_debugfs(struct moxtet *moxtet)
{
debugfs_remove_recursive(moxtet->debugfs_root);
}
#else
static inline int moxtet_register_debugfs(struct moxtet *moxtet)
{
return 0;
}
static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
{
}
#endif
static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct moxtet *moxtet = d->host_data;
if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
dev_err(moxtet->dev, "Invalid hw irq number\n");
return -EINVAL;
}
irq_set_chip_data(irq, d->host_data);
irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
return 0;
}
static int moxtet_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
struct moxtet *moxtet = d->host_data;
int irq;
if (WARN_ON(intsize < 1))
return -EINVAL;
irq = intspec[0];
if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
return -EINVAL;
*out_hwirq = irq;
*out_type = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops moxtet_irq_domain = {
.map = moxtet_irq_domain_map,
.xlate = moxtet_irq_domain_xlate,
};
static void moxtet_irq_mask(struct irq_data *d)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
moxtet->irq.masked |= BIT(d->hwirq);
}
static void moxtet_irq_unmask(struct irq_data *d)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
moxtet->irq.masked &= ~BIT(d->hwirq);
}
static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
int id;
id = moxtet->modules[pos->idx];
seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
static const struct irq_chip moxtet_irq_chip = {
.name = "moxtet",
.irq_mask = moxtet_irq_mask,
.irq_unmask = moxtet_irq_unmask,
.irq_print_chip = moxtet_irq_print_chip,
};
static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
{
struct moxtet_irqpos *pos = moxtet->irq.position;
u8 buf[TURRIS_MOX_MAX_MODULES];
int i, ret;
ret = moxtet_spi_read(moxtet, buf);
if (ret < 0)
return ret;
*map = 0;
for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
set_bit(i, map);
}
return 0;
}
static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
{
struct moxtet *moxtet = data;
unsigned long set;
int nhandled = 0, i, sub_irq, ret;
ret = moxtet_irq_read(moxtet, &set);
if (ret < 0)
goto out;
set &= ~moxtet->irq.masked;
do {
for_each_set_bit(i, &set, MOXTET_NIRQS) {
sub_irq = irq_find_mapping(moxtet->irq.domain, i);
handle_nested_irq(sub_irq);
dev_dbg(moxtet->dev, "%i irq\n", i);
++nhandled;
}
ret = moxtet_irq_read(moxtet, &set);
if (ret < 0)
goto out;
set &= ~moxtet->irq.masked;
} while (set);
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
static void moxtet_irq_free(struct moxtet *moxtet)
{
int i, irq;
for (i = 0; i < MOXTET_NIRQS; ++i) {
if (moxtet->irq.exists & BIT(i)) {
irq = irq_find_mapping(moxtet->irq.domain, i);
irq_dispose_mapping(irq);
}
}
irq_domain_remove(moxtet->irq.domain);
}
static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
MOXTET_NIRQS, 0,
&moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
}
for (i = 0; i < MOXTET_NIRQS; ++i)
if (moxtet->irq.exists & BIT(i))
irq_create_mapping(moxtet->irq.domain, i);
moxtet->irq.chip = moxtet_irq_chip;
moxtet->irq.masked = ~0;
ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
IRQF_ONESHOT, "moxtet", moxtet);
if (ret < 0)
goto err_free;
return 0;
err_free:
moxtet_irq_free(moxtet);
return ret;
}
static int moxtet_probe(struct spi_device *spi)
{
struct moxtet *moxtet;
int ret;
ret = spi_setup(spi);
if (ret < 0)
return ret;
moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
GFP_KERNEL);
if (!moxtet)
return -ENOMEM;
moxtet->dev = &spi->dev;
spi_set_drvdata(spi, moxtet);
mutex_init(&moxtet->lock);
moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
if (moxtet->dev_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (moxtet->dev_irq <= 0) {
dev_err(moxtet->dev, "No IRQ resource found\n");
return -ENXIO;
}
ret = moxtet_find_topology(moxtet);
if (ret < 0)
return ret;
if (moxtet->irq.exists) {
ret = moxtet_irq_setup(moxtet);
if (ret < 0)
return ret;
}
of_register_moxtet_devices(moxtet);
moxtet_register_devices_from_topology(moxtet);
ret = moxtet_register_debugfs(moxtet);
if (ret < 0)
dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
ret);
return 0;
}
static int moxtet_remove(struct spi_device *spi)
{
struct moxtet *moxtet = spi_get_drvdata(spi);
int dummy;
free_irq(moxtet->dev_irq, moxtet);
moxtet_irq_free(moxtet);
moxtet_unregister_debugfs(moxtet);
dummy = device_for_each_child(moxtet->dev, NULL, __unregister);
mutex_destroy(&moxtet->lock);
return 0;
}
static const struct of_device_id moxtet_dt_ids[] = {
{ .compatible = "cznic,moxtet" },
{},
};
MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
static struct spi_driver moxtet_spi_driver = {
.driver = {
.name = "moxtet",
.of_match_table = moxtet_dt_ids,
},
.probe = moxtet_probe,
.remove = moxtet_remove,
};
static int __init moxtet_init(void)
{
int ret;
ret = bus_register(&moxtet_bus_type);
if (ret < 0) {
pr_err("moxtet bus registration failed: %d\n", ret);
goto error;
}
ret = spi_register_driver(&moxtet_spi_driver);
if (ret < 0) {
pr_err("moxtet spi driver registration failed: %d\n", ret);
goto error_bus;
}
return 0;
error_bus:
bus_unregister(&moxtet_bus_type);
error:
return ret;
}
postcore_initcall_sync(moxtet_init);
static void __exit moxtet_exit(void)
{
spi_unregister_driver(&moxtet_spi_driver);
bus_unregister(&moxtet_bus_type);
}
module_exit(moxtet_exit);
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
MODULE_LICENSE("GPL v2");

View File

@ -69,7 +69,7 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct scmi_clk *clk = to_scmi_clk(hw);
return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate);
return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
}
static int scmi_clk_enable(struct clk_hw *hw)

View File

@ -2,5 +2,5 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o

View File

@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(id);
put_unaligned_le32(id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)

View File

@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct scmi_clock_set_rate {
__le32 flags;
#define CLOCK_SET_ASYNC BIT(0)
#define CLOCK_SET_DELAYED BIT(1)
#define CLOCK_SET_IGNORE_RESP BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id;
@ -67,6 +67,7 @@ struct scmi_clock_set_rate {
struct clock_info {
int num_clocks;
int max_async_req;
atomic_t cur_async_req;
struct scmi_clock_info *clk;
};
@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
put_unaligned_le32(clk_id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret) {
__le32 *pval = t->rx.buf;
*value = le32_to_cpu(*pval);
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
u32 config, u64 rate)
u64 rate)
{
int ret;
u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
struct clock_info *ci = handle->clk_priv;
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
if (ci->max_async_req &&
atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
flags |= CLOCK_SET_ASYNC;
cfg = t->tx.buf;
cfg->flags = cpu_to_le32(config);
cfg->flags = cpu_to_le32(flags);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
ret = scmi_do_xfer(handle, t);
if (flags & CLOCK_SET_ASYNC)
ret = scmi_do_xfer_with_response(handle, t);
else
ret = scmi_do_xfer(handle, t);
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
scmi_xfer_put(handle, t);
return ret;

View File

@ -15,6 +15,8 @@
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version {
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
* @id: The identifier of the command being sent
* @protocol_id: The identifier of the protocol used to send @id command
* @seq: The token to identify the message. when a message/command returns,
* the platform returns the whole message header unmodified including
* the token
* @id: The identifier of the message being sent
* @protocol_id: The identifier of the protocol used to send @id message
* @seq: The token to identify the message. When a message returns, the
* platform returns the whole message header unmodified including the
* token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
@ -84,17 +86,21 @@ struct scmi_msg {
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: completion event
* @done: command message transmit completion event
* @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
struct completion *async_done;
};
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle);

View File

@ -30,8 +30,14 @@
#include "common.h"
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
#define MSG_TYPE_COMMAND 0
#define MSG_TYPE_DELAYED_RESP 2
#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
@ -86,7 +92,7 @@ struct scmi_desc {
};
/**
* struct scmi_chan_info - Structure representing a SCMI channel informfation
* struct scmi_chan_info - Structure representing a SCMI channel information
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
@ -111,8 +117,9 @@ struct scmi_chan_info {
* @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
* @minfo: Message info
* @tx_idr: IDR object to map protocol id to channel info pointer
* @tx_minfo: Universal Transmit Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: List head
@ -123,8 +130,9 @@ struct scmi_info {
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info minfo;
struct scmi_xfers_info tx_minfo;
struct idr tx_idr;
struct idr rx_idr;
u8 *protocols_imp;
struct list_head node;
int users;
@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno)
static inline void scmi_dump_header_dbg(struct device *dev,
struct scmi_msg_hdr *hdr)
{
dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
hdr->id, hdr->seq, hdr->protocol_id);
}
@ -190,64 +198,20 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
struct scmi_shared_mem __iomem *mem)
{
xfer->hdr.status = ioread32(mem->msg_payload);
/* Skip the length of header and statues in payload area i.e 8 bytes*/
/* Skip the length of header and status in payload area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
}
/**
* scmi_rx_callback() - mailbox client callback for receive messages
*
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void scmi_rx_callback(struct mbox_client *cl, void *m)
{
u16 xfer_id;
struct scmi_xfer *xfer;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->minfo;
struct scmi_shared_mem __iomem *mem = cinfo->payload;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
return;
}
xfer = &minfo->xfer_block[xfer_id];
scmi_dump_header_dbg(dev, &xfer->hdr);
/* Is the message of valid length? */
if (xfer->rx.len > info->desc->max_msg_size) {
dev_err(dev, "unable to handle %zu xfer(max %d)\n",
xfer->rx.len, info->desc->max_msg_size);
return;
}
scmi_fetch_response(xfer, mem);
complete(&xfer->done);
}
/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
* Return: 32-bit packed command header to be sent to the platform.
* Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
@ -256,6 +220,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
}
/**
* unpack_scmi_header() - unpacks and records message and protocol id
*
* @msg_hdr: 32-bit packed message header sent from the platform
* @hdr: pointer to header to fetch message and protocol id.
*/
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
{
hdr->id = MSG_XTRACT_ID(msg_hdr);
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
}
/**
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
*
@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload;
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa
*/
spin_until_cond(ioread32(&mem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
*
* Helper function which is used by various command functions that are
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* This function can sleep depending on pending requests already in the system
@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
*
* Return: 0 if all went fine, else corresponding error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
struct scmi_xfers_info *minfo)
{
u16 xfer_id;
struct scmi_xfer *xfer;
unsigned long flags, bit_pos;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
}
/**
* scmi_xfer_put() - Release a message
* __scmi_xfer_put() - Release a message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
static void
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
unsigned long flags;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/*
* Keep the locked section as small as possible
@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
/**
* scmi_rx_callback() - mailbox client callback for receive messages
*
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void scmi_rx_callback(struct mbox_client *cl, void *m)
{
u8 msg_type;
u32 msg_hdr;
u16 xfer_id;
struct scmi_xfer *xfer;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct scmi_shared_mem __iomem *mem = cinfo->payload;
msg_hdr = ioread32(&mem->msg_header);
msg_type = MSG_XTRACT_TYPE(msg_hdr);
xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
if (msg_type == MSG_TYPE_NOTIFICATION)
return; /* Notifications not yet supported */
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
return;
}
xfer = &minfo->xfer_block[xfer_id];
scmi_dump_header_dbg(dev, &xfer->hdr);
scmi_fetch_response(xfer, mem);
if (msg_type == MSG_TYPE_DELAYED_RESP)
complete(xfer->async_done);
else
complete(&xfer->done);
}
/**
* scmi_xfer_put() - Release a transmit message
*
* @handle: Pointer to SCMI entity handle
* @xfer: message that was reserved by scmi_xfer_get
*/
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
__scmi_xfer_put(&info->tx_minfo, xfer);
}
static bool
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@ -435,8 +481,36 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
* scmi_xfer_get_init() - Allocate and initialise one message
* scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
*
* @handle: Pointer to SCMI entity handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
int scmi_do_xfer_with_response(const struct scmi_handle *handle,
struct scmi_xfer *xfer)
{
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
DECLARE_COMPLETION_ONSTACK(async_response);
xfer->async_done = &async_response;
ret = scmi_do_xfer(handle, xfer);
if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
ret = -ETIMEDOUT;
xfer->async_done = NULL;
return ret;
}
/**
* scmi_xfer_get_init() - Allocate and initialise one message for transmit
*
* @handle: Pointer to SCMI entity handle
* @msg_id: Message identifier
@ -457,6 +531,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
int ret;
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
@ -464,7 +539,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
xfer = scmi_xfer_get(handle);
xfer = scmi_xfer_get(handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@ -597,27 +672,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, scmi_of_match);
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
struct scmi_xfers_info *info = &sinfo->minfo;
struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@ -652,9 +713,189 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
static int scmi_mailbox_check(struct device_node *np)
static int scmi_mailbox_check(struct device_node *np, int idx)
{
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
idx, NULL);
}
static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
int prot_id, bool tx)
{
int ret, idx;
struct resource res;
resource_size_t size;
struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl;
struct idr *idr;
const char *desc = tx ? "Tx" : "Rx";
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
if (scmi_mailbox_check(np, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
goto idr_alloc;
}
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
cinfo->dev = dev;
cl = &cinfo->cl;
cl->dev = dev;
cl->rx_callback = scmi_rx_callback;
cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
cl->tx_block = false;
cl->knows_txdone = tx;
shmem = of_parse_phandle(np, "shmem", idx);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
return ret;
}
size = resource_size(&res);
cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!cinfo->payload) {
dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
return -EADDRNOTAVAIL;
}
cinfo->chan = mbox_request_channel(cl, idx);
if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to request SCMI %s mailbox\n",
desc);
return ret;
}
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
return ret;
}
cinfo->handle = &info->handle;
return 0;
}
static inline int
scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
if (!ret) /* Rx is optional, hence no error check */
scmi_mbox_chan_setup(info, dev, prot_id, false);
return ret;
}
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
int prot_id)
{
struct scmi_device *sdev;
sdev = scmi_device_create(np, info->dev, prot_id);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
return;
}
if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
}
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
}
static int scmi_probe(struct platform_device *pdev)
{
int ret;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
/* Only mailbox method supported, check for the presence of one */
if (scmi_mailbox_check(np, 0)) {
dev_err(dev, "no mailbox found in %pOF\n", np);
return -EINVAL;
}
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->desc = desc;
INIT_LIST_HEAD(&info->node);
ret = scmi_xfer_info_init(info);
if (ret)
return ret;
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
idr_init(&info->rx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
return ret;
}
mutex_lock(&scmi_list_mutex);
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
for_each_available_child_of_node(np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
continue;
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
dev_err(dev, "Out of range protocol %d\n", prot_id);
if (!scmi_is_protocol_implemented(handle, prot_id)) {
dev_err(dev, "SCMI protocol %d not implemented\n",
prot_id);
continue;
}
scmi_create_protocol_device(child, info, prot_id);
}
return 0;
}
static int scmi_mbox_free_channel(int id, void *p, void *data)
@ -692,167 +933,26 @@ static int scmi_remove(struct platform_device *pdev)
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
idr_destroy(&info->tx_idr);
idr = &info->rx_idr;
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
idr_destroy(&info->rx_idr);
return ret;
}
static inline int
scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
int ret;
struct resource res;
resource_size_t size;
struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl;
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
if (scmi_mailbox_check(np)) {
cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
goto idr_alloc;
}
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
{ /* Sentinel */ },
};
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
cinfo->dev = dev;
cl = &cinfo->cl;
cl->dev = dev;
cl->rx_callback = scmi_rx_callback;
cl->tx_prepare = scmi_tx_prepare;
cl->tx_block = false;
cl->knows_txdone = true;
shmem = of_parse_phandle(np, "shmem", 0);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
return ret;
}
size = resource_size(&res);
cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!cinfo->payload) {
dev_err(dev, "failed to ioremap SCMI Tx payload\n");
return -EADDRNOTAVAIL;
}
/* Transmit channel is first entry i.e. index 0 */
cinfo->chan = mbox_request_channel(cl, 0);
if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to request SCMI Tx mailbox\n");
return ret;
}
idr_alloc:
ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
return ret;
}
cinfo->handle = &info->handle;
return 0;
}
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
int prot_id)
{
struct scmi_device *sdev;
sdev = scmi_device_create(np, info->dev, prot_id);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
return;
}
if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
}
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
}
static int scmi_probe(struct platform_device *pdev)
{
int ret;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
/* Only mailbox method supported, check for the presence of one */
if (scmi_mailbox_check(np)) {
dev_err(dev, "no mailbox found in %pOF\n", np);
return -EINVAL;
}
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->desc = desc;
INIT_LIST_HEAD(&info->node);
ret = scmi_xfer_info_init(info);
if (ret)
return ret;
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
return ret;
}
mutex_lock(&scmi_list_mutex);
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
for_each_available_child_of_node(np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
continue;
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
dev_err(dev, "Out of range protocol %d\n", prot_id);
if (!scmi_is_protocol_implemented(handle, prot_id)) {
dev_err(dev, "SCMI protocol %d not implemented\n",
prot_id);
continue;
}
scmi_create_protocol_device(child, info, prot_id);
}
return 0;
}
MODULE_DEVICE_TABLE(of, scmi_of_match);
static struct platform_driver scmi_driver = {
.driver = {

View File

@ -5,7 +5,10 @@
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/sort.h>
@ -21,6 +24,7 @@ enum scmi_performance_protocol_cmd {
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
struct scmi_opp {
@ -44,6 +48,7 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
@ -87,17 +92,56 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[0];
};
struct scmi_perf_get_fc_info {
__le32 domain;
__le32 message_id;
};
struct scmi_msg_resp_perf_desc_fc {
__le32 attr;
#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
__le32 rate_limit;
__le32 chan_addr_low;
__le32 chan_addr_high;
__le32 chan_size;
__le32 db_addr_low;
__le32 db_addr_high;
__le32 db_set_lmask;
__le32 db_set_hmask;
__le32 db_preserve_lmask;
__le32 db_preserve_hmask;
};
struct scmi_fc_db_info {
int width;
u64 set;
u64 mask;
void __iomem *addr;
};
struct scmi_fc_info {
void __iomem *level_set_addr;
void __iomem *limit_set_addr;
void __iomem *level_get_addr;
void __iomem *limit_get_addr;
struct scmi_fc_db_info *level_set_db;
struct scmi_fc_db_info *limit_set_db;
};
struct perf_dom_info {
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
bool perf_fastchannels;
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
struct scmi_fc_info *fc_info;
};
struct scmi_perf_info {
@ -151,7 +195,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@ -162,6 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
@ -249,8 +294,42 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
u32 max_perf, u32 min_perf)
#define SCMI_PERF_FC_RING_DB(w) \
do { \
u##w val = 0; \
\
if (db->mask) \
val = ioread##w(db->addr) & db->mask; \
iowrite##w((u##w)db->set | val, db->addr); \
} while (0)
static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
{
if (!db || !db->addr)
return;
if (db->width == 1)
SCMI_PERF_FC_RING_DB(8);
else if (db->width == 2)
SCMI_PERF_FC_RING_DB(16);
else if (db->width == 4)
SCMI_PERF_FC_RING_DB(32);
else /* db->width == 8 */
#ifdef CONFIG_64BIT
SCMI_PERF_FC_RING_DB(64);
#else
{
u64 val = 0;
if (db->mask)
val = ioread64_hi_lo(db->addr) & db->mask;
iowrite64_hi_lo(db->set, db->addr);
}
#endif
}
static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
@ -272,8 +351,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
u32 *max_perf, u32 *min_perf)
static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
u32 max_perf, u32 min_perf)
{
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_set_addr) {
iowrite32(max_perf, dom->fc_info->limit_set_addr);
iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
return 0;
}
return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
}
static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
@ -284,7 +379,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret) {
@ -298,8 +393,23 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
u32 level, bool poll)
static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
u32 *max_perf, u32 *min_perf)
{
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->limit_get_addr) {
*max_perf = ioread32(dom->fc_info->limit_get_addr);
*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
return 0;
}
return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
}
static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
@ -321,8 +431,23 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll)
static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
u32 level, bool poll)
{
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_set_addr) {
iowrite32(level, dom->fc_info->level_set_addr);
scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
return 0;
}
return scmi_perf_mb_level_set(handle, domain, level, poll);
}
static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
@ -333,16 +458,128 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return ret;
t->hdr.poll_completion = poll;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
*level = le32_to_cpu(*(__le32 *)t->rx.buf);
*level = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll)
{
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
if (dom->fc_info && dom->fc_info->level_get_addr) {
*level = ioread32(dom->fc_info->level_get_addr);
return 0;
}
return scmi_perf_mb_level_get(handle, domain, level, poll);
}
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
{
if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
return true;
if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
return true;
return false;
}
static void
scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
u32 message_id, void __iomem **p_addr,
struct scmi_fc_db_info **p_db)
{
int ret;
u32 flags;
u64 phys_addr;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
struct scmi_fc_db_info *db;
struct scmi_perf_get_fc_info *info;
struct scmi_msg_resp_perf_desc_fc *resp;
if (!p_addr)
return;
ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
SCMI_PROTOCOL_PERF,
sizeof(*info), sizeof(*resp), &t);
if (ret)
return;
info = t->tx.buf;
info->domain = cpu_to_le32(domain);
info->message_id = cpu_to_le32(message_id);
ret = scmi_do_xfer(handle, t);
if (ret)
goto err_xfer;
resp = t->rx.buf;
flags = le32_to_cpu(resp->attr);
size = le32_to_cpu(resp->chan_size);
if (!scmi_perf_fc_size_is_valid(message_id, size))
goto err_xfer;
phys_addr = le32_to_cpu(resp->chan_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
addr = devm_ioremap(handle->dev, phys_addr, size);
if (!addr)
goto err_xfer;
*p_addr = addr;
if (p_db && SUPPORTS_DOORBELL(flags)) {
db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
if (!db)
goto err_xfer;
size = 1 << DOORBELL_REG_WIDTH(flags);
phys_addr = le32_to_cpu(resp->db_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
addr = devm_ioremap(handle->dev, phys_addr, size);
if (!addr)
goto err_xfer;
db->addr = addr;
db->width = size;
db->set = le32_to_cpu(resp->db_set_lmask);
db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
db->mask = le32_to_cpu(resp->db_preserve_lmask);
db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
*p_db = db;
}
err_xfer:
scmi_xfer_put(handle, t);
}
static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
&fc->level_set_addr, &fc->level_set_db);
scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
&fc->level_get_addr, NULL);
scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
&fc->limit_set_addr, &fc->limit_set_db);
scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
&fc->limit_get_addr, NULL);
*p_fc = fc;
}
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
@ -494,6 +731,9 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_attributes_get(handle, domain, dom);
scmi_perf_describe_levels_get(handle, domain, dom);
if (dom->perf_fastchannels)
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
handle->perf_ops = &perf_ops;

View File

@ -96,7 +96,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@ -147,11 +147,11 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
*state = le32_to_cpu(*(__le32 *)t->rx.buf);
*state = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;

View File

@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
* Copyright (C) 2019 ARM Ltd.
*/
#include "common.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
RESET = 0x4,
RESET_NOTIFY = 0x5,
};
enum scmi_reset_protocol_notify {
RESET_ISSUED = 0x0,
};
#define NUM_RESET_DOMAIN_MASK 0xffff
#define RESET_NOTIFY_ENABLE BIT(0)
struct scmi_msg_resp_reset_domain_attributes {
__le32 attributes;
#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
__le32 latency;
u8 name[SCMI_MAX_STR_SIZE];
};
struct scmi_msg_reset_domain_reset {
__le32 domain_id;
__le32 flags;
#define AUTONOMOUS_RESET BIT(0)
#define EXPLICIT_RESET_ASSERT BIT(1)
#define ASYNCHRONOUS_RESET BIT(2)
__le32 reset_state;
#define ARCH_RESET_TYPE BIT(31)
#define COLD_RESET_STATE BIT(0)
#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
};
struct reset_dom_info {
bool async_reset;
bool reset_notify;
u32 latency_us;
char name[SCMI_MAX_STR_SIZE];
};
struct scmi_reset_info {
int num_domains;
struct reset_dom_info *dom_info;
};
static int scmi_reset_attributes_get(const struct scmi_handle *handle,
struct scmi_reset_info *pi)
{
int ret;
struct scmi_xfer *t;
u32 attr;
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
if (ret)
return ret;
ret = scmi_do_xfer(handle, t);
if (!ret) {
attr = get_unaligned_le32(t->rx.buf);
pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
}
scmi_xfer_put(handle, t);
return ret;
}
static int
scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
struct reset_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
SCMI_PROTOCOL_RESET, sizeof(domain),
sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
u32 attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
dom_info->latency_us = le32_to_cpu(attr->latency);
if (dom_info->latency_us == U32_MAX)
dom_info->latency_us = 0;
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
{
struct scmi_reset_info *pi = handle->reset_priv;
return pi->num_domains;
}
static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
{
struct scmi_reset_info *pi = handle->reset_priv;
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
{
struct scmi_reset_info *pi = handle->reset_priv;
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->latency_us;
}
static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
u32 flags, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = handle->reset_priv;
struct reset_dom_info *rdom = pi->dom_info + domain;
if (rdom->async_reset)
flags |= ASYNCHRONOUS_RESET;
ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
sizeof(*dom), 0, &t);
if (ret)
return ret;
dom = t->tx.buf;
dom->domain_id = cpu_to_le32(domain);
dom->flags = cpu_to_le32(flags);
dom->domain_id = cpu_to_le32(state);
if (rdom->async_reset)
ret = scmi_do_xfer_with_response(handle, t);
else
ret = scmi_do_xfer(handle, t);
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
{
return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
ARCH_COLD_RESET);
}
static int
scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
{
return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
ARCH_COLD_RESET);
}
static int
scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
{
return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
}
static struct scmi_reset_ops reset_ops = {
.num_domains_get = scmi_reset_num_domains_get,
.name_get = scmi_reset_name_get,
.latency_get = scmi_reset_latency_get,
.reset = scmi_reset_domain_reset,
.assert = scmi_reset_domain_assert,
.deassert = scmi_reset_domain_deassert,
};
static int scmi_reset_protocol_init(struct scmi_handle *handle)
{
int domain;
u32 version;
struct scmi_reset_info *pinfo;
scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
dev_dbg(handle->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
scmi_reset_attributes_get(handle, pinfo);
pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
scmi_reset_domain_attributes_get(handle, domain, dom);
}
handle->reset_ops = &reset_ops;
handle->reset_priv = pinfo;
return 0;
}
static int __init scmi_reset_init(void)
{
return scmi_protocol_register(SCMI_PROTOCOL_RESET,
&scmi_reset_protocol_init);
}
subsys_initcall(scmi_reset_init);

View File

@ -9,8 +9,8 @@
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_CONFIG_SET = 0x4,
SENSOR_TRIP_POINT_SET = 0x5,
SENSOR_TRIP_POINT_NOTIFY = 0x4,
SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
};
@ -42,9 +42,10 @@ struct scmi_msg_resp_sensor_description {
} desc[0];
};
struct scmi_msg_set_sensor_config {
struct scmi_msg_sensor_trip_point_notify {
__le32 id;
__le32 event_control;
#define SENSOR_TP_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@ -119,7 +120,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
do {
/* Set the number of sensors to be skipped/already read */
*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
put_unaligned_le32(desc_index, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (ret)
@ -135,9 +136,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
for (cnt = 0; cnt < num_returned; cnt++) {
u32 attrh;
u32 attrh, attrl;
struct scmi_sensor_info *s;
attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
s->id = le32_to_cpu(buf->desc[cnt].id);
@ -146,6 +148,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
/* Sign extend to a full s8 */
if (s->scale & SENSOR_SCALE_SIGN)
s->scale |= SENSOR_SCALE_EXTEND;
s->async = SUPPORTS_ASYNC_READ(attrl);
s->num_trip_points = NUM_TRIP_POINTS(attrl);
strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
}
@ -160,15 +164,15 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
return ret;
}
static int
scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
u32 sensor_id, bool enable)
{
int ret;
u32 evt_cntl = BIT(0);
u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_config *cfg;
struct scmi_msg_sensor_trip_point_notify *cfg;
ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@ -183,15 +187,16 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
return ret;
}
static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
u32 sensor_id, u8 trip_id, u64 trip_value)
static int
scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
if (ret)
return ret;
@ -209,11 +214,13 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
}
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
u32 sensor_id, bool async, u64 *value)
u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
struct sensors_info *si = handle->sensor_priv;
struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
@ -223,14 +230,18 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
ret = scmi_do_xfer(handle, t);
if (!ret) {
__le32 *pval = t->rx.buf;
*value = le32_to_cpu(*pval);
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = scmi_do_xfer_with_response(handle, t);
if (!ret)
*value = get_unaligned_le64((void *)
((__le32 *)t->rx.buf + 1));
} else {
sensor->flags = cpu_to_le32(0);
ret = scmi_do_xfer(handle, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
}
scmi_xfer_put(handle, t);
@ -255,8 +266,8 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.configuration_set = scmi_sensor_configuration_set,
.trip_point_set = scmi_sensor_trip_point_set,
.trip_point_notify = scmi_sensor_trip_point_notify,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
};

View File

@ -1445,6 +1445,15 @@ config GPIO_XRA1403
help
GPIO driver for EXAR XRA1403 16-bit SPI-based GPIO expander.
config GPIO_MOXTET
tristate "Turris Mox Moxtet bus GPIO expander"
depends on MOXTET
help
Say yes here if you are building for the Turris Mox router.
This is the driver needed for configuring the GPIOs via the Moxtet
bus. For example the Mox module with SFP cage needs this driver
so that phylink can use corresponding GPIOs.
endmenu
menu "USB GPIO expanders"

View File

@ -93,6 +93,7 @@ obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o

View File

@ -0,0 +1,179 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Turris Mox Moxtet GPIO expander
*
* Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
*/
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/moxtet.h>
#include <linux/module.h>
#define MOXTET_GPIO_NGPIOS 12
#define MOXTET_GPIO_INPUTS 4
struct moxtet_gpio_desc {
u16 in_mask;
u16 out_mask;
};
static const struct moxtet_gpio_desc descs[] = {
[TURRIS_MOX_MODULE_SFP] = {
.in_mask = GENMASK(2, 0),
.out_mask = GENMASK(5, 4),
},
};
struct moxtet_gpio_chip {
struct device *dev;
struct gpio_chip gpio_chip;
const struct moxtet_gpio_desc *desc;
};
static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
int ret;
if (chip->desc->in_mask & BIT(offset)) {
ret = moxtet_device_read(chip->dev);
} else if (chip->desc->out_mask & BIT(offset)) {
ret = moxtet_device_written(chip->dev);
if (ret >= 0)
ret <<= MOXTET_GPIO_INPUTS;
} else {
return -EINVAL;
}
if (ret < 0)
return ret;
return !!(ret & BIT(offset));
}
static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
int val)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
int state;
state = moxtet_device_written(chip->dev);
if (state < 0)
return;
offset -= MOXTET_GPIO_INPUTS;
if (val)
state |= BIT(offset);
else
state &= ~BIT(offset);
moxtet_device_write(chip->dev, state);
}
static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
/* All lines are hard wired to be either input or output, not both. */
if (chip->desc->in_mask & BIT(offset))
return 1;
else if (chip->desc->out_mask & BIT(offset))
return 0;
else
return -EINVAL;
}
static int moxtet_gpio_direction_input(struct gpio_chip *gc,
unsigned int offset)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
if (chip->desc->in_mask & BIT(offset))
return 0;
else if (chip->desc->out_mask & BIT(offset))
return -ENOTSUPP;
else
return -EINVAL;
}
static int moxtet_gpio_direction_output(struct gpio_chip *gc,
unsigned int offset, int val)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
if (chip->desc->out_mask & BIT(offset))
moxtet_gpio_set_value(gc, offset, val);
else if (chip->desc->in_mask & BIT(offset))
return -ENOTSUPP;
else
return -EINVAL;
return 0;
}
static int moxtet_gpio_probe(struct device *dev)
{
struct moxtet_gpio_chip *chip;
struct device_node *nc = dev->of_node;
int id;
id = to_moxtet_device(dev)->id;
if (id >= ARRAY_SIZE(descs)) {
dev_err(dev, "%pOF Moxtet device id 0x%x is not supported by gpio-moxtet driver\n",
nc, id);
return -ENOTSUPP;
}
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = dev;
chip->gpio_chip.parent = dev;
chip->desc = &descs[id];
dev_set_drvdata(dev, chip);
chip->gpio_chip.label = dev_name(dev);
chip->gpio_chip.get_direction = moxtet_gpio_get_direction;
chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
chip->gpio_chip.get = moxtet_gpio_get_value;
chip->gpio_chip.set = moxtet_gpio_set_value;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
chip->gpio_chip.can_sleep = true;
chip->gpio_chip.owner = THIS_MODULE;
return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip);
}
static const struct of_device_id moxtet_gpio_dt_ids[] = {
{ .compatible = "cznic,moxtet-gpio", },
{},
};
MODULE_DEVICE_TABLE(of, moxtet_gpio_dt_ids);
static const enum turris_mox_module_id moxtet_gpio_module_table[] = {
TURRIS_MOX_MODULE_SFP,
0,
};
static struct moxtet_driver moxtet_gpio_driver = {
.driver = {
.name = "moxtet-gpio",
.of_match_table = moxtet_gpio_dt_ids,
.probe = moxtet_gpio_probe,
},
.id_table = moxtet_gpio_module_table,
};
module_moxtet_driver(moxtet_gpio_driver);
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
MODULE_LICENSE("GPL v2");

View File

@ -72,7 +72,7 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
const struct scmi_handle *h = scmi_sensors->handle;
sensor = *(scmi_sensors->info[type] + channel);
ret = h->sensor_ops->reading_get(h, sensor->id, false, &value);
ret = h->sensor_ops->reading_get(h, sensor->id, &value);
if (ret)
return ret;

View File

@ -116,9 +116,20 @@ config RESET_QCOM_PDC
to control reset signals provided by PDC for Modem, Compute,
Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS.
config RESET_SCMI
tristate "Reset driver controlled via ARM SCMI interface"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
default ARM_SCMI_PROTOCOL
help
This driver provides support for reset signal/domains that are
controlled by firmware that implements the SCMI interface.
This driver uses SCMI Message Protocol to interact with the
firmware controlling all the reset signals.
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN
default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN || ARC
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,

View File

@ -18,6 +18,7 @@ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o

View File

@ -169,9 +169,9 @@ static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
[IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
[IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
[IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
[IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
[IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
[IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
[IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
[IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
BIT(2) | BIT(1) },
[IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
@ -220,9 +220,9 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
value = assert ? 0 : bit;

View File

@ -1,58 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Amlogic Meson Reset Controller driver
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* BSD LICENSE
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/err.h>
#include <linux/init.h>

View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ARM System Control and Management Interface (ARM SCMI) reset driver
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/reset-controller.h>
#include <linux/scmi_protocol.h>
/**
* struct scmi_reset_data - reset controller information structure
* @rcdev: reset controller entity
* @handle: ARM SCMI handle used for communication with system controller
*/
struct scmi_reset_data {
struct reset_controller_dev rcdev;
const struct scmi_handle *handle;
};
#define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev)
#define to_scmi_handle(p) (to_scmi_reset_data(p)->handle)
/**
* scmi_reset_assert() - assert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be asserted
*
* This function implements the reset driver op to assert a device's reset
* using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_handle *handle = to_scmi_handle(rcdev);
return handle->reset_ops->assert(handle, id);
}
/**
* scmi_reset_deassert() - deassert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be deasserted
*
* This function implements the reset driver op to deassert a device's reset
* using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_handle *handle = to_scmi_handle(rcdev);
return handle->reset_ops->deassert(handle, id);
}
/**
* scmi_reset_reset() - reset the device
* @rcdev: reset controller entity
* @id: ID of the reset signal to be reset(assert + deassert)
*
* This function implements the reset driver op to trigger a device's
* reset signal using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_handle *handle = to_scmi_handle(rcdev);
return handle->reset_ops->reset(handle, id);
}
static const struct reset_control_ops scmi_reset_ops = {
.assert = scmi_reset_assert,
.deassert = scmi_reset_deassert,
.reset = scmi_reset_reset,
};
static int scmi_reset_probe(struct scmi_device *sdev)
{
struct scmi_reset_data *data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
if (!handle || !handle->reset_ops)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->rcdev.ops = &scmi_reset_ops;
data->rcdev.owner = THIS_MODULE;
data->rcdev.of_node = np;
data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle);
return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_RESET },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_reset_driver = {
.name = "scmi-reset",
.probe = scmi_reset_probe,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_reset_driver);
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI reset controller driver");
MODULE_LICENSE("GPL v2");

View File

@ -127,6 +127,9 @@ static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "aspeed,ast2500-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
{ .compatible = "snps,dw-high-reset" },
{ .compatible = "snps,dw-low-reset",
.data = &reset_simple_active_low },
{ /* sentinel */ },
};

View File

@ -73,7 +73,7 @@ static u64 get_mc_fw_base_address(void)
mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
if (!mcfbaregs) {
pr_err("could not map MC Firmaware Base registers\n");
pr_err("could not map MC Firmware Base registers\n");
return 0;
}

View File

@ -10,6 +10,7 @@
* General Purpose functions for the global management of the
* QUICC Engine (QE).
*/
#include <linux/bitmap.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@ -39,29 +40,32 @@ static DEFINE_SPINLOCK(qe_lock);
DEFINE_SPINLOCK(cmxgcr_lock);
EXPORT_SYMBOL(cmxgcr_lock);
/* QE snum state */
enum qe_snum_state {
QE_SNUM_STATE_USED,
QE_SNUM_STATE_FREE
};
/* QE snum */
struct qe_snum {
u8 num;
enum qe_snum_state state;
};
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
struct qe_immap __iomem *qe_immr;
EXPORT_SYMBOL(qe_immr);
static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM);
static unsigned int qe_num_of_snum;
static phys_addr_t qebase = -1;
static struct device_node *qe_get_device_node(void)
{
struct device_node *qe;
/*
* Newer device trees have an "fsl,qe" compatible property for the QE
* node, but we still need to support older device trees.
*/
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (qe)
return qe;
return of_find_node_by_type(NULL, "qe");
}
static phys_addr_t get_qe_base(void)
{
struct device_node *qe;
@ -71,12 +75,9 @@ static phys_addr_t get_qe_base(void)
if (qebase != -1)
return qebase;
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!qe) {
qe = of_find_node_by_type(NULL, "qe");
if (!qe)
return qebase;
}
qe = qe_get_device_node();
if (!qe)
return qebase;
ret = of_address_to_resource(qe, 0, &res);
if (!ret)
@ -170,12 +171,9 @@ unsigned int qe_get_brg_clk(void)
if (brg_clk)
return brg_clk;
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!qe) {
qe = of_find_node_by_type(NULL, "qe");
if (!qe)
return brg_clk;
}
qe = qe_get_device_node();
if (!qe)
return brg_clk;
prop = of_get_property(qe, "brg-frequency", &size);
if (prop && size == sizeof(*prop))
@ -281,7 +279,6 @@ EXPORT_SYMBOL(qe_clock_source);
*/
static void qe_snums_init(void)
{
int i;
static const u8 snum_init_76[] = {
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
@ -302,19 +299,39 @@ static void qe_snums_init(void)
0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
};
static const u8 *snum_init;
struct device_node *qe;
const u8 *snum_init;
int i;
qe_num_of_snum = qe_get_num_of_snums();
if (qe_num_of_snum == 76)
snum_init = snum_init_76;
else
snum_init = snum_init_46;
for (i = 0; i < qe_num_of_snum; i++) {
snums[i].num = snum_init[i];
snums[i].state = QE_SNUM_STATE_FREE;
bitmap_zero(snum_state, QE_NUM_OF_SNUM);
qe_num_of_snum = 28; /* The default number of snum for threads is 28 */
qe = qe_get_device_node();
if (qe) {
i = of_property_read_variable_u8_array(qe, "fsl,qe-snums",
snums, 1, QE_NUM_OF_SNUM);
if (i > 0) {
of_node_put(qe);
qe_num_of_snum = i;
return;
}
/*
* Fall back to legacy binding of using the value of
* fsl,qe-num-snums to choose one of the static arrays
* above.
*/
of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum);
of_node_put(qe);
}
if (qe_num_of_snum == 76) {
snum_init = snum_init_76;
} else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) {
snum_init = snum_init_46;
} else {
pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum);
return;
}
memcpy(snums, snum_init, qe_num_of_snum);
}
int qe_get_snum(void)
@ -324,12 +341,10 @@ int qe_get_snum(void)
int i;
spin_lock_irqsave(&qe_lock, flags);
for (i = 0; i < qe_num_of_snum; i++) {
if (snums[i].state == QE_SNUM_STATE_FREE) {
snums[i].state = QE_SNUM_STATE_USED;
snum = snums[i].num;
break;
}
i = find_first_zero_bit(snum_state, qe_num_of_snum);
if (i < qe_num_of_snum) {
set_bit(i, snum_state);
snum = snums[i];
}
spin_unlock_irqrestore(&qe_lock, flags);
@ -339,14 +354,10 @@ EXPORT_SYMBOL(qe_get_snum);
void qe_put_snum(u8 snum)
{
int i;
const u8 *p = memchr(snums, snum, qe_num_of_snum);
for (i = 0; i < qe_num_of_snum; i++) {
if (snums[i].num == snum) {
snums[i].state = QE_SNUM_STATE_FREE;
break;
}
}
if (p)
clear_bit(p - snums, snum_state);
}
EXPORT_SYMBOL(qe_put_snum);
@ -572,16 +583,9 @@ struct qe_firmware_info *qe_get_firmware_info(void)
initialized = 1;
/*
* Newer device trees have an "fsl,qe" compatible property for the QE
* node, but we still need to support older device trees.
*/
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!qe) {
qe = of_find_node_by_type(NULL, "qe");
if (!qe)
return NULL;
}
qe = qe_get_device_node();
if (!qe)
return NULL;
/* Find the 'firmware' child node */
fw = of_get_child_by_name(qe, "firmware");
@ -627,16 +631,9 @@ unsigned int qe_get_num_of_risc(void)
unsigned int num_of_risc = 0;
const u32 *prop;
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!qe) {
/* Older devices trees did not have an "fsl,qe"
* compatible property, so we need to look for
* the QE node by name.
*/
qe = of_find_node_by_type(NULL, "qe");
if (!qe)
return num_of_risc;
}
qe = qe_get_device_node();
if (!qe)
return num_of_risc;
prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
if (prop && size == sizeof(*prop))
@ -650,37 +647,7 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
unsigned int qe_get_num_of_snums(void)
{
struct device_node *qe;
int size;
unsigned int num_of_snums;
const u32 *prop;
num_of_snums = 28; /* The default number of snum for threads is 28 */
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!qe) {
/* Older devices trees did not have an "fsl,qe"
* compatible property, so we need to look for
* the QE node by name.
*/
qe = of_find_node_by_type(NULL, "qe");
if (!qe)
return num_of_snums;
}
prop = of_get_property(qe, "fsl,qe-num-snums", &size);
if (prop && size == sizeof(*prop)) {
num_of_snums = *prop;
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
/* No QE ever has fewer than 28 SNUMs */
pr_err("QE: number of snum is invalid\n");
of_node_put(qe);
return -EINVAL;
}
}
of_node_put(qe);
return num_of_snums;
return qe_num_of_snum;
}
EXPORT_SYMBOL(qe_get_num_of_snums);

View File

@ -170,7 +170,7 @@ struct rcar_sysc_pd {
struct generic_pm_domain genpd;
struct rcar_sysc_ch ch;
unsigned int flags;
char name[0];
char name[];
};
static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)

View File

@ -148,6 +148,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
might_sleep();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Constant for device tree bindings for Turris Mox module configuration bus
*
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
*/
#ifndef _DT_BINDINGS_BUS_MOXTET_H
#define _DT_BINDINGS_BUS_MOXTET_H
#define MOXTET_IRQ_PCI 0
#define MOXTET_IRQ_USB3 4
#define MOXTET_IRQ_PERIDOT(n) (8 + (n))
#define MOXTET_IRQ_TOPAZ 12
#endif /* _DT_BINDINGS_BUS_MOXTET_H */

View File

@ -1,56 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* BSD LICENSE
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H
#define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H

View File

@ -1,56 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* BSD LICENSE
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H
#define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H

View File

@ -31,33 +31,33 @@
#define IMX8MQ_RESET_OTG2_PHY_RESET 20
#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21
#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22
#define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N 23
#define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N 24
#define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N 25
#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23
#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24
#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25
#define IMX8MQ_RESET_PCIEPHY 26
#define IMX8MQ_RESET_PCIEPHY_PERST 27
#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28
#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29
#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30
#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_DISP_RESET 31
#define IMX8MQ_RESET_GPU_RESET 32
#define IMX8MQ_RESET_VPU_RESET 33
#define IMX8MQ_RESET_PCIEPHY2 34
#define IMX8MQ_RESET_PCIEPHY2_PERST 35
#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36
#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37
#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38
#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39
#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40
#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41
#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42
#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43
#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_DDRC1_PRST 44
#define IMX8MQ_RESET_DDRC1_CORE_RESET 45
#define IMX8MQ_RESET_DDRC1_PHY_RESET 46
#define IMX8MQ_RESET_DDRC2_PRST 47
#define IMX8MQ_RESET_DDRC2_CORE_RESET 48
#define IMX8MQ_RESET_DDRC2_PHY_RESET 49
#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */
#define IMX8MQ_RESET_NUM 50

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Turris Mox module configuration bus driver
*
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
*/
#ifndef __LINUX_MOXTET_H
#define __LINUX_MOXTET_H
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/mutex.h>
#define TURRIS_MOX_MAX_MODULES 10
enum turris_mox_cpu_module_id {
TURRIS_MOX_CPU_ID_EMMC = 0x00,
TURRIS_MOX_CPU_ID_SD = 0x10,
};
enum turris_mox_module_id {
TURRIS_MOX_MODULE_FIRST = 0x01,
TURRIS_MOX_MODULE_SFP = 0x01,
TURRIS_MOX_MODULE_PCI = 0x02,
TURRIS_MOX_MODULE_TOPAZ = 0x03,
TURRIS_MOX_MODULE_PERIDOT = 0x04,
TURRIS_MOX_MODULE_USB3 = 0x05,
TURRIS_MOX_MODULE_PCI_BRIDGE = 0x06,
TURRIS_MOX_MODULE_LAST = 0x06,
};
#define MOXTET_NIRQS 16
extern struct bus_type moxtet_type;
struct moxtet {
struct device *dev;
struct mutex lock;
u8 modules[TURRIS_MOX_MAX_MODULES];
int count;
u8 tx[TURRIS_MOX_MAX_MODULES];
int dev_irq;
struct {
struct irq_domain *domain;
struct irq_chip chip;
unsigned long masked, exists;
struct moxtet_irqpos {
u8 idx;
u8 bit;
} position[MOXTET_NIRQS];
} irq;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
#endif
};
struct moxtet_driver {
const enum turris_mox_module_id *id_table;
struct device_driver driver;
};
static inline struct moxtet_driver *
to_moxtet_driver(struct device_driver *drv)
{
if (!drv)
return NULL;
return container_of(drv, struct moxtet_driver, driver);
}
extern int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv);
static inline void moxtet_unregister_driver(struct moxtet_driver *mdrv)
{
if (mdrv)
driver_unregister(&mdrv->driver);
}
#define moxtet_register_driver(driver) \
__moxtet_register_driver(THIS_MODULE, driver)
#define module_moxtet_driver(__moxtet_driver) \
module_driver(__moxtet_driver, moxtet_register_driver, \
moxtet_unregister_driver)
struct moxtet_device {
struct device dev;
struct moxtet *moxtet;
enum turris_mox_module_id id;
unsigned int idx;
};
extern int moxtet_device_read(struct device *dev);
extern int moxtet_device_write(struct device *dev, u8 val);
extern int moxtet_device_written(struct device *dev);
static inline struct moxtet_device *
to_moxtet_device(struct device *dev)
{
if (!dev)
return NULL;
return container_of(dev, struct moxtet_device, dev);
}
#endif /* __LINUX_MOXTET_H */

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SCMI Message Protocol driver header
*
@ -71,7 +71,7 @@ struct scmi_clk_ops {
int (*rate_get)(const struct scmi_handle *handle, u32 clk_id,
u64 *rate);
int (*rate_set)(const struct scmi_handle *handle, u32 clk_id,
u32 config, u64 rate);
u64 rate);
int (*enable)(const struct scmi_handle *handle, u32 clk_id);
int (*disable)(const struct scmi_handle *handle, u32 clk_id);
};
@ -145,6 +145,8 @@ struct scmi_sensor_info {
u32 id;
u8 type;
s8 scale;
u8 num_trip_points;
bool async;
char name[SCMI_MAX_STR_SIZE];
};
@ -167,9 +169,9 @@ enum scmi_sensor_class {
*
* @count_get: get the count of sensors provided by SCMI
* @info_get: get the information of the specified sensor
* @configuration_set: control notifications on cross-over events for
* @trip_point_notify: control notifications on cross-over events for
* the trip-points
* @trip_point_set: selects and configures a trip-point of interest
* @trip_point_config: selects and configures a trip-point of interest
* @reading_get: gets the current value of the sensor
*/
struct scmi_sensor_ops {
@ -177,12 +179,32 @@ struct scmi_sensor_ops {
const struct scmi_sensor_info *(*info_get)
(const struct scmi_handle *handle, u32 sensor_id);
int (*configuration_set)(const struct scmi_handle *handle,
u32 sensor_id);
int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id,
u8 trip_id, u64 trip_value);
int (*trip_point_notify)(const struct scmi_handle *handle,
u32 sensor_id, bool enable);
int (*trip_point_config)(const struct scmi_handle *handle,
u32 sensor_id, u8 trip_id, u64 trip_value);
int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
bool async, u64 *value);
u64 *value);
};
/**
* struct scmi_reset_ops - represents the various operations provided
* by SCMI Reset Protocol
*
* @num_domains_get: get the count of reset domains provided by SCMI
* @name_get: gets the name of a reset domain
* @latency_get: gets the reset latency for the specified reset domain
* @reset: resets the specified reset domain
* @assert: explicitly assert reset signal of the specified reset domain
* @deassert: explicitly deassert reset signal of the specified reset domain
*/
struct scmi_reset_ops {
int (*num_domains_get)(const struct scmi_handle *handle);
char *(*name_get)(const struct scmi_handle *handle, u32 domain);
int (*latency_get)(const struct scmi_handle *handle, u32 domain);
int (*reset)(const struct scmi_handle *handle, u32 domain);
int (*assert)(const struct scmi_handle *handle, u32 domain);
int (*deassert)(const struct scmi_handle *handle, u32 domain);
};
/**
@ -194,6 +216,7 @@ struct scmi_sensor_ops {
* @perf_ops: pointer to set of performance protocol operations
* @clk_ops: pointer to set of clock protocol operations
* @sensor_ops: pointer to set of sensor protocol operations
* @reset_ops: pointer to set of reset protocol operations
* @perf_priv: pointer to private data structure specific to performance
* protocol(for internal use only)
* @clk_priv: pointer to private data structure specific to clock
@ -202,6 +225,8 @@ struct scmi_sensor_ops {
* protocol(for internal use only)
* @sensor_priv: pointer to private data structure specific to sensors
* protocol(for internal use only)
* @reset_priv: pointer to private data structure specific to reset
* protocol(for internal use only)
*/
struct scmi_handle {
struct device *dev;
@ -210,11 +235,13 @@ struct scmi_handle {
struct scmi_clk_ops *clk_ops;
struct scmi_power_ops *power_ops;
struct scmi_sensor_ops *sensor_ops;
struct scmi_reset_ops *reset_ops;
/* for protocol internal use */
void *perf_priv;
void *clk_priv;
void *power_priv;
void *sensor_priv;
void *reset_priv;
};
enum scmi_std_protocol {
@ -224,6 +251,7 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_PERF = 0x13,
SCMI_PROTOCOL_CLOCK = 0x14,
SCMI_PROTOCOL_SENSOR = 0x15,
SCMI_PROTOCOL_RESET = 0x16,
};
struct scmi_device {