1
0
Fork 0

Merge branch 'display/next' into next

* display/next: (340 commits)
  LF-94: drm: hdmi: imx: Add hdmi phy video mode valid function
  drm: hdmi: imx8: fix wrong hdmi type with non-SCDC HDMI sinks
  Revert "drm/imx/hdp: fix issue with non-SCDC HDMI sinks"
  drm/bridge: nwl-dsi Correct the DSI init sequence
  gpu: imx: framegen: Use crtc_clock instead of mode clock
  ...
5.4-rM2-2.2.x-imx-squashed
Dong Aisheng 2019-12-02 18:02:24 +08:00
commit 240dd9ec8c
219 changed files with 72224 additions and 1573 deletions

View File

@ -37,6 +37,8 @@ Optional properties:
Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
to be used for the framebuffer; if not present, the framebuffer may
be located anywhere in memory.
- arm,malidp-arqos-high-level: integer of u32 value describing the ARQoS
levels of DP500's QoS signaling.
Example:
@ -54,6 +56,7 @@ Example:
clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
clock-names = "pxlclk", "mclk", "aclk", "pclk";
arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
arm,malidp-arqos-high-level = <0xd000d000>;
port {
dp0_output: endpoint {
remote-endpoint = <&tda998x_2_input>;

View File

@ -1,10 +1,10 @@
Analog Device ADV7511(W)/13/33 HDMI Encoders
Analog Device ADV7511(W)/13/33/35 HDMI Encoders
-----------------------------------------
The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
the others support RGB interface.
The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
conversion, S/PDIF, CEC and HDCP. ADV7533 and ADV7535 support the DSI interface
for input pixels, while the others support RGB interface.
Required properties:
@ -13,6 +13,7 @@ Required properties:
"adi,adv7511w"
"adi,adv7513"
"adi,adv7533"
"adi,adv7535"
- reg: I2C slave addresses
The ADV7511 internal registers are split into four pages exposed through
@ -52,7 +53,7 @@ The following input format properties are required except in "rgb 1x" and
- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
needed only for ADV7511.
The following properties are required for ADV7533:
The following properties are required for ADV7533 and ADV7535:
- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
be one of 1, 2, 3 or 4.
@ -71,23 +72,31 @@ Optional properties:
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
generator. The chip will rely on the sync signals in the DSI data lanes,
rather than generate its own timings for HDMI output.
- adi,disable-timing-generator: Only for ADV7533 and ADV7535. Disables the
internal timing generator. The chip will rely on the sync signals in the DSI
data lanes, rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
- reg-names : Names of maps with programmable addresses.
It can contain any map needing a non-default address.
Possible maps names are : "main", "edid", "cec", "packet"
- adi,dsi-channel: Only for ADV7533 and ADV7535. DSI channel number to be used
when communicating with the DSI peripheral. It should be one of 0, 1, 2 or 3.
- adi,addr-cec: Only for ADV7533 and ADV7535. The I2C DSI-CEC register map
address to be programmed into the MAIN register map.
- adi,addr-edid: Only for ADV7533 and ADV7535. The I2C EDID register map
to be programmed into the MAIN register map.
- adi,addr-pkt: Only for ADV7533 and ADV7535. The I2C PACKET register map
to be programmed into the MAIN register map.
Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
remote endpoint phandle should be a reference to a valid mipi_dsi_host device
node.
- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533 and
ADV7535, the remote endpoint phandle should be a reference to a valid
mipi_dsi_host device node.
- Video port 1 for the HDMI output
- Audio port 2 for the HDMI audio input

View File

@ -0,0 +1,27 @@
ITE IT6263 LVDS to HDMI bridge bindings
Required properties:
- compatible: "ite,it6263"
- reg: i2c address of the bridge
- video input: this subnode can contain a video input port node
to connect the bridge to a LVDS output interface (See this
documentation [1]).
Optional properties:
- split-mode: boolean. if this exists, split mode is enabled,
otherwise, single mode is enabled.
- reset-gpios: OF device-tree gpio specification for SYSRSTN pin.
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
lvds-to-hdmi-bridge@4c {
compatible = "ite,it6263";
reg = <0x4c>;
port {
it6263_0_in: endpoint {
remote-endpoint = <&lvds0_out>;
};
};
};

View File

@ -0,0 +1,161 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/bridge/nwl-dsi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Northwest Logic MIPI-DSI controller on i.MX SoCs
maintainers:
- Guido Gúnther <agx@sigxcpu.org>
- Robert Chiras <robert.chiras@nxp.com>
description: |
NWL MIPI-DSI host controller found on i.MX8 platforms. This is a dsi bridge for
the SOCs NWL MIPI-DSI host controller.
properties:
compatible:
const: fsl,imx8mq-nwl-dsi
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: DSI core clock
- description: RX_ESC clock (used in escape mode)
- description: TX_ESC clock (used in escape mode)
- description: PHY_REF clock
- description: VIDEO_PLL clock
clock-names:
items:
- const: core
- const: rx_esc
- const: tx_esc
- const: phy_ref
- const: video_pll
mux-controls:
description:
mux controller node to use for operating the input mux
phys:
maxItems: 1
description:
A phandle to the phy module representing the DPHY
phy-names:
items:
- const: dphy
power-domains:
maxItems: 1
description:
A phandle to the power domain
resets:
description:
phandles to the reset controller
items:
- description: dsi byte reset line
- description: dsi dpi reset line
- description: dsi esc reset line
- description: dsi pclk reset line
reset-names:
items:
- const: byte
- const: dpi
- const: esc
- const: pclk
ports:
type: object
description:
A node containing DSI input & output port nodes with endpoint
definitions as documented in
Documentation/devicetree/bindings/graph.txt.
port@0:
type: object
description:
Input port node to receive pixel data from the
display controller
port@1:
type: object
description:
DSI output port node to the panel or the next bridge
in the chain
fsl,clock-drop-level:
description:
Specifies the level at wich the crtc_clock should be dropped
patternProperties:
"^panel@[0-9]+$": true
required:
- clock-names
- clocks
- compatible
- interrupts
- mux-controls
- phy-names
- phys
- ports
- reg
- reset-names
- resets
examples:
- |
mipi_dsi: mipi_dsi@30a00000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx8mq-nwl-dsi";
reg = <0x30A00000 0x300>;
clocks = <&clk 163>, <&clk 244>, <&clk 245>, <&clk 164>;
clock-names = "core", "rx_esc", "tx_esc", "phy_ref";
interrupts = <0 34 4>;
mux-controls = <&mux 0>;
power-domains = <&pgc_mipi>;
resets = <&src 0>, <&src 1>, <&src 2>, <&src 3>;
reset-names = "byte", "dpi", "esc", "pclk";
phys = <&dphy>;
phy-names = "dphy";
panel@0 {
compatible = "rocktech,jh057n00900";
reg = <0>;
port@0 {
panel_in: endpoint {
remote-endpoint = <&mipi_dsi_out>;
};
};
};
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mipi_dsi_in: endpoint {
remote-endpoint = <&lcdif_mipi_dsi>;
};
};
port@1 {
reg = <1>;
mipi_dsi_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
};

View File

@ -0,0 +1,60 @@
Samsung MIPI DSIM bridge bindings
The MIPI DSIM host controller drives the video signals from
display controller to video peripherals using DSI protocol.
This is an un-managed DSI bridge. In order to use this bridge,
an encoder or bridge must be implemented to manage the platform
specific initializations.
Required properties:
- compatible: "fsl,imx8mm-mipi-dsim"
- reg: the register range of the MIPI DSIM controller
- interrupts: the interrupt number for this module
- clock, clock-names: phandles to the MIPI-DSI clocks described in
Documentation/devicetree/bindings/clock/clock-bindings.txt
"cfg" - DSIM access clock
"pll-ref" - DSIM PHY PLL reference clock
- assigned-clocks: phandles to clocks that requires initial configuration
- assigned-clock-rates: rates of the clocks that requires initial configuration
- pref-clk: Assign DPHY PLL reference clock frequency. If not exists,
DSIM bridge driver will use the default lock frequency
which is 27MHz.
- port: input and output port nodes with endpoint definitions as
defined in Documentation/devicetree/bindings/graph.txt;
the input port should be connected to an encoder or a
bridge that manages this MIPI DSIM host and the output
port should be connected to a panel or a bridge input
port
Optional properties:
-dsi-gpr: a phandle which provides the MIPI DSIM control and gpr registers
example:
mipi_dsi: mipi_dsi@32E10000 {
compatible = "fsl,imx8mm-mipi-dsim";
reg = <0x0 0x32e10000 0x0 0x400>;
clocks = <&clk IMX8MM_CLK_DSI_CORE_DIV>,
<&clk IMX8MM_CLK_DSI_PHY_REF_DIV>;
clock-names = "cfg", "pll-ref";
assigned-clocks = <&clk IMX8MM_CLK_DSI_CORE_SRC>,
<&clk IMX8MM_CLK_DSI_PHY_REF_SRC>;
assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_266M>,
<&clk IMX8MM_VIDEO_PLL1_OUT>;
assigned-clock-rates = <266000000>, <594000000>;
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
dsi-gpr = <&dispmix_gpr>;
status = "disabled";
port@0 {
dsim_from_lcdif: endpoint {
remote-endpoint = <&lcdif_to_dsim>;
};
};
port@1 {
dsim_to_adv7535: endpoint {
remote-endpoint = <&adv7535_from_dsim>;
};
};
};

View File

@ -110,6 +110,218 @@ prg@21cc000 {
fsl,pres = <&pre1>, <&pre2>, <&pre3>;
};
Freescale i.MX DPU
====================
Required properties:
- compatible: Should be "fsl,<chip>-dpu"
- reg: should be register base and length as documented in the
datasheet
- interrupt-parent: phandle pointing to the parent interrupt controller.
- interrupts, interrupt-names: Should contain interrupts and names as
documented in the datasheet.
- clocks, clock-names: phandles to the DPU clocks described in
Documentation/devicetree/bindings/clock/clock-bindings.txt
The following clocks are expected on i.MX8qxp:
"pll0" - PLL clock for display interface 0
"pll1" - PLL clock for display interface 1
"disp0" - pixel clock for display interface 0
"disp1" - pixel clock for display interface 1
The needed clock numbers for each are documented in
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- power-domains: phandles pointing to power domain.
- power-domain-names: power domain names relevant to power-domains phandles.
- fsl,dpr-channels: phandles to the DPR channels attached to this DPU,
sorted by memory map addresses.
- fsl,pixel-combiner: phandle to the pixel combiner unit attached to this DPU.
Optional properties:
- port@[0-1]: Port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
ports 0 and 1 should correspond to display interface 0 and
display interface 1, respectively.
example:
dpu: dpu@56180000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx8qxp-dpu";
reg = <0x56180000 0x40000>;
interrupt-parent = <&irqsteer_dpu>;
interrupts = <448>, <449>, <450>, <64>,
<65>, <66>, <67>, <68>,
<69>, <70>, <193>, <194>,
<195>, <196>, <197>, <72>,
<73>, <74>, <75>, <76>,
<77>, <78>, <79>, <80>,
<81>, <199>, <200>, <201>,
<202>, <203>, <204>, <205>,
<206>, <207>, <208>, <0>,
<1>, <2>, <3>, <4>,
<82>, <83>, <84>, <85>,
<209>, <210>, <211>, <212>;
interrupt-names = "store9_shdload",
"store9_framecomplete",
"store9_seqcomplete",
"extdst0_shdload",
"extdst0_framecomplete",
"extdst0_seqcomplete",
"extdst4_shdload",
"extdst4_framecomplete",
"extdst4_seqcomplete",
"extdst1_shdload",
"extdst1_framecomplete",
"extdst1_seqcomplete",
"extdst5_shdload",
"extdst5_framecomplete",
"extdst5_seqcomplete",
"disengcfg_shdload0",
"disengcfg_framecomplete0",
"disengcfg_seqcomplete0",
"framegen0_int0",
"framegen0_int1",
"framegen0_int2",
"framegen0_int3",
"sig0_shdload",
"sig0_valid",
"sig0_error",
"disengcfg_shdload1",
"disengcfg_framecomplete1",
"disengcfg_seqcomplete1",
"framegen1_int0",
"framegen1_int1",
"framegen1_int2",
"framegen1_int3",
"sig1_shdload",
"sig1_valid",
"sig1_error",
"cmdseq_error",
"comctrl_sw0",
"comctrl_sw1",
"comctrl_sw2",
"comctrl_sw3",
"framegen0_primsync_on",
"framegen0_primsync_off",
"framegen0_secsync_on",
"framegen0_secsync_off",
"framegen1_primsync_on",
"framegen1_primsync_off",
"framegen1_secsync_on",
"framegen1_secsync_off";
clocks = <&dc_lpcg IMX_DC0_PLL0_CLK>,
<&dc_lpcg IMX_DC0_PLL1_CLK>,
<&dc_lpcg IMX_DC0_DISP0_CLK>,
<&dc_lpcg IMX_DC0_DISP1_CLK>;
clock-names = "pll0", "pll1", "disp0", "disp1";
power-domains = <&pd IMX_SC_R_DC_0>,
<&pd IMX_SC_R_DC_0_PLL_0>,
<&pd IMX_SC_R_DC_0_PLL_1>;
power-domain-names = "dc", "pll0", "pll1";
fsl,dpr-channels = <&dc0_dpr1_channel1>, <&dc0_dpr1_channel2>,
<&dc0_dpr1_channel3>, <&dc0_dpr2_channel1>,
<&dc0_dpr2_channel2>, <&dc0_dpr2_channel3>;
fsl,pixel-combiner = <&dc0_pc>;
dpu_disp0: port@0 {
reg = <0>;
dpu_disp0_lvds0_ch0: endpoint@0 {
remote-endpoint = <&ldb1_ch0>;
};
dpu_disp0_lvds0_ch1: endpoint@1 {
remote-endpoint = <&ldb1_ch1>;
};
dpu_disp0_mipi_dsi: endpoint@2 {
};
};
dpu_disp1: port@1 {
reg = <1>;
dpu_disp1_lvds1_ch0: endpoint@0 {
remote-endpoint = <&ldb2_ch0>;
};
dpu_disp1_lvds1_ch1: endpoint@1 {
remote-endpoint = <&ldb2_ch1>;
};
dpu_disp1_mipi_dsi: endpoint@2 {
};
};
};
Freescale i.MX8 PC (Pixel Combiner)
=============================================
Required properties:
- compatible: should be "fsl,<chip>-pixel-combiner"
- reg: should be register base and length as documented in the
datasheet
- power-domains: phandle pointing to power domain
example:
pixel-combiner@56020000 {
compatible = "fsl,imx8qm-pixel-combiner";
reg = <0x56020000 0x10000>;
power-domains = <&pd IMX_SC_R_DC_0>;
};
Freescale i.MX8 PRG (Prefetch Resolve Gasket)
=============================================
Required properties:
- compatible: should be "fsl,<chip>-prg"
- reg: should be register base and length as documented in the
datasheet
- clocks: phandles to the PRG apb and rtram clocks, as described in
Documentation/devicetree/bindings/clock/clock-bindings.txt and
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- clock-names: should be "apb" and "rtram"
- power-domains: phandle pointing to power domain
example:
prg@56040000 {
compatible = "fsl,imx8qm-prg";
reg = <0x56040000 0x10000>;
clocks = <&dc0_prg0_lpcg 0>, <&dc0_prg0_lpcg 1>;
clock-names = "apb", "rtram";
power-domains = <&pd IMX_SC_R_DC_0>;
};
Freescale i.MX8 DPRC (Display Prefetch Resolve Channel)
=======================================================
Required properties:
- compatible: should be "fsl,<chip>-dpr-channel"
- reg: should be register base and length as documented in the
datasheet
- fsl,sc-resource: SCU resource number as defined in
include/dt-bindings/firmware/imx/rsrc.h
- fsl,prgs: phandles to the PRG unit(s) attached to this DPRC, the first one
is the primary PRG and the second one(if available) is the auxiliary PRG
which is used to fetch luma chunk of a YUV frame with 2 planars.
- clocks: phandles to the DPRC apb, b and rtram clocks, as described in
Documentation/devicetree/bindings/clock/clock-bindings.txt and
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- clock-names: should be "apb", "b" and "rtram"
- power-domains: phandle pointing to power domain
example:
dpr-channel@560e0000 {
compatible = "fsl,imx8qm-dpr-channel";
reg = <0x560e0000 0x10000>;
fsl,sc-resource = <IMX_SC_R_DC_0_BLIT1>;
fsl,prgs = <&dc0_prg2>, <&dc0_prg1>;
clocks = <&dc0_dpr0_lpcg 0>,
<&dc0_dpr0_lpcg 1>,
<&dc0_rtram0_lpcg 0>;
clock-names = "apb", "b", "rtram";
power-domains = <&pd IMX_SC_R_DC_0>;
};
Parallel display support
========================

View File

@ -9,15 +9,24 @@ nodes describing each of the two LVDS encoder channels of the bridge.
Required properties:
- #address-cells : should be <1>
- #size-cells : should be <0>
- compatible : should be "fsl,imx53-ldb" or "fsl,imx6q-ldb".
Both LDB versions are similar, but i.MX6 has an additional
multiplexer in the front to select any of the four IPU display
interfaces as input for each LVDS channel.
- compatible : should be "fsl,imx53-ldb" or "fsl,imx6q-ldb" or
"fsl,imx8qm-ldb" or "fsl,imx8qxp-ldb".
All LDB versions are similar.
i.MX6q/dl has an additional multiplexer in the front to select
any of the two or four IPU display interfaces as input for each
LVDS channel.
i.MX8qm LDB supports 10bit RGB input and needs an additional
phy.
i.MX8qxp LDB only supports one LVDS encoder channel(either
channel0 or channel1).
- gpr : should be <&gpr> on i.MX53 and i.MX6q.
The phandle points to the iomuxc-gpr region containing the LVDS
control register.
- fsl,auxldb : phandle to auxiliary LDB which is used in dual channel mode.
Only required by i.MX8qxp.
- clocks, clock-names : phandles to the LDB divider and selector clocks and to
the display interface selector clocks, as described in
the display interface selector clocks or pixel and
bypass clocks as described in
Documentation/devicetree/bindings/clock/clock-bindings.txt
The following clocks are expected on i.MX53:
"di0_pll" - LDB LVDS channel 0 mux
@ -29,14 +38,25 @@ Required properties:
On i.MX6q the following additional clocks are needed:
"di2_sel" - IPU2 DI0 mux
"di3_sel" - IPU2 DI1 mux
The following clocks are expected on i.MX8qm and i.MX8qxp:
"pixel" - pixel clock
"bypass" - bypass clock
The following clocks are expected on i.MX8qxp:
"aux_pixel" - auxiliary pixel clock in dual channel mode
"aux_bypass" - auxiliary bypass clock in dual channel mode
The needed clock numbers for each are documented in
Documentation/devicetree/bindings/clock/imx5-clock.txt, and in
Documentation/devicetree/bindings/clock/imx6q-clock.txt.
Documentation/devicetree/bindings/clock/imx6q-clock.txt, and in
Documentation/devicetree/bindings/clock/imx8qm-lpcg.txt, and in
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- power-domains : phandle pointing to power domain, only required by i.MX8qm and
i.MX8qxp.
Optional properties:
- pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
- pinctrl-names : should be "default" on i.MX53, not used on i.MX6q, i.MX8qm
and i.MX8qxp
- pinctrl-0 : a phandle pointing to LVDS pin settings on i.MX53,
not used on i.MX6q
not used on i.MX6q, i.MX8qm and i.MX8qxp
- fsl,dual-channel : boolean. if it exists, only LVDS channel 0 should
be configured - one input will be distributed on both outputs in dual
channel mode
@ -57,9 +77,14 @@ Required properties:
(lvds-channel@[0,1], respectively).
On i.MX6, there should be four input ports (port@[0-3]) that correspond
to the four LVDS multiplexer inputs.
A single output port (port@2 on i.MX5, port@4 on i.MX6) must be connected
to a panel input port. Optionally, the output port can be left out if
display-timings are used instead.
On i.MX8qm, the two channels of LDB connect to one display interface of DPU.
A single output port (port@2 on i.MX5, port@4 on i.MX6, port@1 on i.MX8qm
and i.MX8qxp) must be connected to a panel input port or a bridge input port.
Optionally, the output port can be left out if display-timings are used
instead.
- phys: the phandle for the LVDS PHY device. Valid only on i.MX8qm and
i.MX8qxp.
- phy-names: should be "ldb_phy". Valid only on i.MX8qm and i.MX8qxp.
Optional properties (required if display-timings are used):
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
@ -69,6 +94,7 @@ Optional properties (required if display-timings are used):
This describes how the color bits are laid out in the
serialized LVDS signal.
- fsl,data-width : should be <18> or <24>
Additionally, <30> for i.MX8qm.
example:

View File

@ -0,0 +1,93 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 NXP
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: iMX8MQ Display Controller Subsystem (DCSS)
maintainers:
- Laurentiu Palcu <laurentiu.palcu@nxp.com>
description:
The DCSS (display controller sub system) is used to source up to three
display buffers, compose them, and drive a display using HDMI 2.0a(with HDCP
2.2) or MIPI-DSI. The DCSS is intended to support up to 4kp60 displays. HDR10
image processing capabilities are included to provide a solution capable of
driving next generation high dynamic range displays.
properties:
compatible:
const: nxp,imx8mq-dcss
reg:
maxItems: 2
interrupts:
maxItems: 3
items:
- description: Context loader completion and error interrupt
- description: DTG interrupt used to signal context loader trigger time
- description: DTG interrupt for Vblank
interrupt-names:
maxItems: 3
items:
- const: ctx_ld
- const: ctxld_kick
- const: vblank
- const: dtrc_ch1
- const: dtrc_ch2
clocks:
maxItems: 5
items:
- description: Display APB clock for all peripheral PIO access interfaces
- description: Display AXI clock needed by DPR, Scaler, RTRAM_CTRL
- description: RTRAM clock
- description: Pixel clock, can be driver either by HDMI phy clock or MIPI
- description: DTRC clock, needed by video decompressor
- description: PLL source clock, usually VIDEO2_PLL, used when output is HDMI;
- description: PLL PHY reference clock, used when output is HDMI;
clock-names:
items:
- const: apb
- const: axi
- const: rtrm
- const: pix
- const: dtrc
- const: pll_src
- const: pll_phy_ref
port@0:
type: object
description: A port node pointing to a hdmi_in or mipi_in port node.
examples:
- |
dcss: display-controller@32e00000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "nxp,imx8mq-dcss";
reg = <0x32e00000 0x2d000>, <0x32e2f000 0x1000>;
interrupts = <6>, <8>, <9>, <16>, <17>;
interrupt-names = "ctx_ld", "ctxld_kick", "vblank", "dtrc_ch1", "dtrc_ch2";
interrupt-parent = <&irqsteer>;
clocks = <&clk 248>, <&clk 247>, <&clk 249>,
<&clk 254>,<&clk 122>, <&clk 266>, <&clk 267>;
clock-names = "apb", "axi", "rtrm", "pix", "dtrc",
"pll_src", "pll_phy_ref";
assigned-clocks = <&clk 107>, <&clk 109>, <&clk 266>;
assigned-clock-parents = <&clk 78>, <&clk 78>, <&clk 3>;
assigned-clock-rates = <800000000>,
<400000000>;
port@0 {
dcss_out: endpoint {
remote-endpoint = <&hdmi_in>;
};
};
};

View File

@ -14,6 +14,11 @@ Required properties:
- "pix" for the LCDIF block clock
- (MX6SX-only) "axi", "disp_axi" for the bus interface clock
Optional properties:
- max-memory-bandwidth: maximum bandwidth in bytes per second that the
controller can handle; if not present, the memory
interface is fast enough to handle all possible video modes
Required sub-nodes:
- port: The connection to an encoder chip.

View File

@ -0,0 +1,9 @@
Japan Display Inc. 10.1" WUXGA (1920x1200) TFT LCD panel
The panel has dual LVDS channels.
Required properties:
- compatible: should be "jdi,tx26d202vm0bwa"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -461,6 +461,8 @@ patternProperties:
description: Intersil
"^issi,.*":
description: Integrated Silicon Solutions Inc.
"^ite,.*":
description: ITE Tech. Inc.
"^itead,.*":
description: ITEAD Intelligent Systems Co.Ltd
"^iwave,.*":

View File

@ -3,5 +3,5 @@
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += imx/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/

View File

@ -98,7 +98,7 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-y += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/

View File

@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
ret = of_property_read_u32(dev->of_node,
"arm,malidp-arqos-value",
&hwdev->arqos_value);
if (ret)
hwdev->arqos_value = 0x0;
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",

View File

@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
/*
* Program the RQoS register to avoid high resolutions flicker
* issue on the LS1028A.
*/
if (hwdev->arqos_value) {
val = hwdev->arqos_value;
malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
}
}
int malidp_format_get_bpp(u32 fmt)

View File

@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
/* priority level of RQOS register used for driven the ARQOS signal */
u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)

View File

@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
/*
* The quality of service (QoS) register on the DP500. RQOS register values
* are driven by the ARQOS signal, using AXI transacations, dependent on the
* FIFO input level.
* The RQOS register can also set QoS levels for:
* - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
* - GREEN_ARQOS @ A 4-bit signal value for normal conditions
*/
#define MALIDP500_RQOS_QUALITY 0x00500
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010

View File

@ -65,6 +65,15 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
config DRM_SEC_MIPI_DSIM
tristate "Samsung MIPI DSIM Bridge"
depends on OF
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
help
The Samsung MPI DSIM Bridge driver.
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
@ -154,6 +163,18 @@ source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
source "drivers/gpu/drm/bridge/nwl-dsi/Kconfig"
source "drivers/gpu/drm/bridge/cadence/Kconfig"
source "drivers/gpu/drm/bridge/synopsys/Kconfig"
config DRM_ITE_IT6263
tristate "ITE IT6263 LVDS/HDMI bridge"
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
---help---
ITE IT6263 bridge chip driver.
endmenu

View File

@ -16,4 +16,8 @@ obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi/
obj-y += cadence/
obj-y += synopsys/
obj-$(CONFIG_DRM_ITE_IT6263) += it6263.o
obj-$(CONFIG_DRM_SEC_MIPI_DSIM) += sec-dsim.o

View File

@ -219,6 +219,9 @@
#define ADV7511_REG_CEC_SOFT_RESET 0x50
#define ADV7533_REG_CEC_OFFSET 0x70
#define FORMAT_RATIO(x, y) (((x) * 100) / (y))
#define RATIO_16_9 FORMAT_RATIO(16, 9)
#define RATIO_4_3 FORMAT_RATIO(4, 3)
enum adv7511_input_clock {
ADV7511_INPUT_CLOCK_1X,
@ -320,6 +323,7 @@ struct adv7511_video_config {
enum adv7511_type {
ADV7511,
ADV7533,
ADV7535,
};
#define ADV7511_MAX_ADDRS 3
@ -330,6 +334,10 @@ struct adv7511 {
struct i2c_client *i2c_packet;
struct i2c_client *i2c_cec;
u32 addr_cec;
u32 addr_edid;
u32 addr_pkt;
struct regmap *regmap;
struct regmap *regmap_cec;
enum drm_connector_status status;
@ -365,6 +373,7 @@ struct adv7511 {
struct device_node *host_node;
struct mipi_dsi_device *dsi;
u8 num_dsi_lanes;
u8 channel_id;
bool use_timing_gen;
enum adv7511_type type;
@ -396,7 +405,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);

View File

@ -302,7 +302,8 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
unsigned int offset = adv7511->type == ADV7533 ?
unsigned int offset = (adv7511->type == ADV7533 ||
adv7511->type == ADV7535) ?
ADV7533_REG_CEC_OFFSET : 0;
int ret = adv7511_cec_parse_dt(dev, adv7511);

View File

@ -9,7 +9,9 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/slab.h>
#include <media/cec.h>
@ -74,6 +76,26 @@ static const uint8_t adv7511_register_defaults[] = {
0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
/*
* TODO: Currently, filter-out unsupported modes by their clocks.
* Need to find a better way to do this.
* These are the pixel clocks that the converter can handle successfully.
*/
static const int valid_clocks[] = {
148500,
135000,
132000,
119000,
108000,
78750,
74250,
65000,
49500,
40000,
31500,
};
static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
@ -329,6 +351,7 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
/* 01-02 Power */
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
if (adv7511->i2c_main->irq) {
@ -346,6 +369,7 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
}
/*
* 01-01 HPD Manual Override
* Per spec it is allowed to pulse the HPD signal to indicate that the
* EDID information has changed. Some monitors do this when they wakeup
* from standby or are enabled. When the HPD goes low the adv7511 is
@ -367,7 +391,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
if (adv7511->type == ADV7533)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@ -387,7 +411,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
if (adv7511->type == ADV7533)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@ -417,17 +441,16 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
static void adv7511_hpd_work(struct work_struct *work)
{
struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
enum drm_connector_status status;
enum drm_connector_status status = connector_status_disconnected;
unsigned int val;
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
if (ret < 0)
status = connector_status_disconnected;
else if (val & ADV7511_STATUS_HPD)
if (ret >= 0 && (val & ADV7511_STATUS_HPD))
status = connector_status_connected;
else
status = connector_status_disconnected;
DRM_DEV_DEBUG_DRIVER(adv7511->connector.kdev, "HDMI HPD event: %s\n",
drm_get_connector_status_name(status));
/*
* The bridge resets its registers on unplug. So when we get a plug
@ -594,6 +617,8 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
{
struct edid *edid;
unsigned int count;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
int ret;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
@ -623,6 +648,14 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
kfree(edid);
connector->display_info.bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_PIXDATA_NEGEDGE;
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
if (ret)
return ret;
return count;
}
@ -669,9 +702,21 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
struct drm_display_mode *mode)
{
size_t i, num_modes = ARRAY_SIZE(valid_clocks);
bool clock_ok = false;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
for (i = 0; i < num_modes; i++)
if (mode->clock == valid_clocks[i]) {
clock_ok = true;
break;
}
if (!clock_ok)
return MODE_NOCLOCK;
return MODE_OK;
}
@ -756,14 +801,16 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
regmap_update_bits(adv7511->regmap, 0xfb,
0x6, low_refresh_rate << 1);
if (adv7511->type == ADV7535)
regmap_update_bits(adv7511->regmap, 0x4a,
0xc, low_refresh_rate << 2);
else
regmap_update_bits(adv7511->regmap, 0xfb,
0x6, low_refresh_rate << 1);
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
if (adv7511->type == ADV7533)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
/*
@ -874,7 +921,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
if (adv->type == ADV7533)
if (adv->type == ADV7533 || adv->type == ADV7535)
ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq)
@ -982,7 +1029,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
int ret;
adv->i2c_cec = i2c_new_ancillary_device(adv->i2c_main, "cec",
ADV7511_CEC_I2C_ADDR_DEFAULT);
adv->addr_cec);
if (IS_ERR(adv->i2c_cec))
return PTR_ERR(adv->i2c_cec);
i2c_set_clientdata(adv->i2c_cec, adv);
@ -994,7 +1041,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
if (adv->type == ADV7533) {
if (adv->type == ADV7533 || adv->type == ADV7535) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
@ -1094,6 +1141,14 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
struct device_node *remote_node = NULL, *endpoint = NULL;
struct of_changeset ocs;
#endif
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
unsigned int cec_i2c_addr = main_i2c_addr - 2;
unsigned int pkt_i2c_addr = main_i2c_addr - 0xa;
unsigned int val;
int ret;
@ -1128,6 +1183,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
}
if (adv7511->addr_cec != 0)
cec_i2c_addr = adv7511->addr_cec << 1;
else
adv7511->addr_cec = cec_i2c_addr >> 1;
if (adv7511->addr_edid != 0)
edid_i2c_addr = adv7511->addr_edid << 1;
else
adv7511->addr_edid = edid_i2c_addr >> 1;
if (adv7511->addr_pkt != 0)
pkt_i2c_addr = adv7511->addr_pkt << 1;
else
adv7511->addr_pkt = pkt_i2c_addr >> 1;
/*
* The power down GPIO is optional. If present, toggle it from active to
* inactive to wake up the encoder.
@ -1165,33 +1235,33 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_packet_disable(adv7511, 0xffff);
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
edid_i2c_addr);
adv7511->i2c_edid = i2c_new_ancillary_device(i2c, "edid",
ADV7511_EDID_I2C_ADDR_DEFAULT);
adv7511->addr_edid);
if (IS_ERR(adv7511->i2c_edid)) {
ret = PTR_ERR(adv7511->i2c_edid);
goto uninit_regulators;
}
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
adv7511->i2c_edid->addr << 1);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
pkt_i2c_addr);
adv7511->i2c_packet = i2c_new_ancillary_device(i2c, "packet",
ADV7511_PACKET_I2C_ADDR_DEFAULT);
adv7511->addr_pkt);
if (IS_ERR(adv7511->i2c_packet)) {
ret = PTR_ERR(adv7511->i2c_packet);
goto err_i2c_unregister_edid;
}
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
adv7511->i2c_packet->addr << 1);
regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
cec_i2c_addr);
ret = adv7511_init_cec_regmap(adv7511);
if (ret)
goto err_i2c_unregister_packet;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
adv7511->i2c_cec->addr << 1);
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
if (i2c->irq) {
@ -1234,6 +1304,37 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (endpoint)
remote_node = of_graph_get_remote_port_parent(endpoint);
if (!remote_node)
return ret;
/* Find remote's endpoint connected to us and detach it */
endpoint = NULL;
while ((endpoint = of_graph_get_next_endpoint(remote_node,
endpoint))) {
struct device_node *us;
us = of_graph_get_remote_port_parent(endpoint);
if (us == dev->of_node)
break;
}
of_node_put(remote_node);
if (!endpoint)
return ret;
of_changeset_init(&ocs);
of_changeset_detach_node(&ocs, endpoint);
ret = of_changeset_apply(&ocs);
if (!ret)
dev_warn(dev,
"Probe failed. Remote port '%s' disabled\n",
remote_node->full_name);
#endif
return ret;
}
@ -1242,7 +1343,7 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
if (adv7511->type == ADV7533)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
@ -1268,6 +1369,7 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7513", ADV7511 },
#ifdef CONFIG_DRM_I2C_ADV7533
{ "adv7533", ADV7533 },
{ "adv7535", ADV7535 },
#endif
{ }
};
@ -1279,6 +1381,7 @@ static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
#ifdef CONFIG_DRM_I2C_ADV7533
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
{ .compatible = "adi,adv7535", .data = (void *)ADV7535 },
#endif
{ }
};

View File

@ -26,10 +26,8 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
hsw = mode->hsync_end - mode->hsync_start;
hfp = mode->hsync_start - mode->hdisplay;
@ -38,9 +36,10 @@ static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
/* set pixel clock divider mode */
regmap_write(adv->regmap_cec, 0x16,
clock_div_by_lanes[dsi->lanes - 2] << 3);
/* 03-01 Enable Internal Timing Generator */
regmap_write(adv->regmap_cec, 0x27, 0xcb);
/* 03-08 Timing Configuration */
/* horizontal porch params */
regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
@ -61,35 +60,66 @@ static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
/* 03-03 Reset Internal Timing Generator */
regmap_write(adv->regmap_cec, 0x27, 0xcb);
regmap_write(adv->regmap_cec, 0x27, 0x8b);
regmap_write(adv->regmap_cec, 0x27, 0xcb);
}
void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
/* Gate DSI LP Oscillator */
regmap_update_bits(adv->regmap_cec, 0x03, 0x02, 0x00);
/* 01-03 Initialisation (Fixed) Registers */
regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
ARRAY_SIZE(adv7533_cec_fixed_registers));
/* 02-04 DSI Lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
/* 02-05 DSI Pixel Clock Divider */
regmap_write(adv->regmap_cec, 0x16,
clock_div_by_lanes[dsi->lanes - 2] << 3);
if (adv->use_timing_gen)
adv7511_dsi_config_timing_gen(adv);
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
if (adv->use_timing_gen) {
/* reset internal timing generator */
regmap_write(adv->regmap_cec, 0x27, 0xcb);
regmap_write(adv->regmap_cec, 0x27, 0x8b);
regmap_write(adv->regmap_cec, 0x27, 0xcb);
} else {
/* disable internal timing generator */
else
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
/* enable hdmi */
/* 04-01 HDMI Output */
regmap_write(adv->regmap, 0xaf, 0x16);
/* 09-03 AVI Infoframe - RGB - 16-9 Aspect Ratio */
regmap_write(adv->regmap, ADV7511_REG_AVI_INFOFRAME(0), 0x10);
if (FORMAT_RATIO(mode->hdisplay, mode->vdisplay) == RATIO_16_9)
regmap_write(adv->regmap, ADV7511_REG_AVI_INFOFRAME(1), 0x28);
else if (FORMAT_RATIO(mode->hdisplay, mode->vdisplay) == RATIO_4_3)
regmap_write(adv->regmap, ADV7511_REG_AVI_INFOFRAME(1), 0x18);
/* 04-04 GC Packet Enable */
regmap_write(adv->regmap, ADV7511_REG_PACKET_ENABLE0, 0x80);
/* 04-06 GC Colour Depth - 24 Bit */
regmap_write(adv->regmap, 0x4c, 0x04);
/* 04-09 Down Dither Output Colour Depth - 8 Bit (default) */
regmap_write(adv->regmap, 0x49, 0x00);
/* 07-01 CEC Power Mode - Always Active */
regmap_write(adv->regmap_cec, 0xbe, 0x3d);
/* 04-03 HDMI Output Enable */
regmap_write(adv->regmap_cec, 0x03, 0x89);
/* disable test mode */
regmap_write(adv->regmap_cec, 0x55, 0x00);
regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
ARRAY_SIZE(adv7533_cec_fixed_registers));
}
void adv7533_dsi_power_off(struct adv7511 *adv)
@ -100,28 +130,6 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
{
struct mipi_dsi_device *dsi = adv->dsi;
int lanes, ret;
if (adv->num_dsi_lanes != 4)
return;
if (mode->clock > 80000)
lanes = 4;
else
lanes = 3;
if (lanes != dsi->lanes) {
mipi_dsi_detach(dsi);
dsi->lanes = lanes;
ret = mipi_dsi_attach(dsi);
if (ret)
dev_err(&dsi->dev, "failed to change host lanes\n");
}
}
int adv7533_patch_registers(struct adv7511 *adv)
{
return regmap_register_patch(adv->regmap,
@ -143,7 +151,7 @@ int adv7533_attach_dsi(struct adv7511 *adv)
struct mipi_dsi_device *dsi;
int ret = 0;
const struct mipi_dsi_device_info info = { .type = "adv7533",
.channel = 0,
.channel = adv->channel_id,
.node = NULL,
};
@ -189,14 +197,24 @@ void adv7533_detach_dsi(struct adv7511 *adv)
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
{
u32 num_lanes;
struct device *dev = &adv->i2c_main->dev;
u32 num_lanes = 0, channel_id = 0;
of_property_read_u32(np, "adi,dsi-channel", &channel_id);
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
if (num_lanes < 1 || num_lanes > 4)
if (num_lanes < 1 || num_lanes > 4) {
dev_err(dev, "Invalid dsi-lanes: %d\n", num_lanes);
return -EINVAL;
}
if (channel_id > 3) {
dev_err(dev, "Invalid dsi-channel: %d\n", channel_id);
return -EINVAL;
}
adv->num_dsi_lanes = num_lanes;
adv->channel_id = channel_id;
adv->host_node = of_graph_get_remote_node(np, 0, 0);
if (!adv->host_node)
@ -207,6 +225,10 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");
of_property_read_u32(np, "adi,addr-cec", &adv->addr_cec);
of_property_read_u32(np, "adi,addr-edid", &adv->addr_edid);
of_property_read_u32(np, "adi,addr-pkt", &adv->addr_pkt);
/* TODO: Check if these need to be parsed by DT or not */
adv->rgb = true;
adv->embedded_sync = false;

View File

@ -0,0 +1,25 @@
config DRM_CDNS_MHDP
tristate "Cadence MHDP COMMON API driver"
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
depends on OF
help
Support Cadence MHDP API library.
config DRM_CDNS_HDMI
tristate "Cadence HDMI DRM driver"
depends on DRM_CDNS_MHDP
config DRM_CDNS_DP
tristate "Cadence DP DRM driver"
depends on DRM_CDNS_MHDP
config DRM_CDNS_AUDIO
tristate "Cadence MHDP Audio driver"
depends on DRM_CDNS_MHDP
config DRM_CDNS_HDMI_CEC
tristate "Cadence MHDP HDMI CEC driver"
depends on DRM_CDNS_HDMI
select CEC_CORE
select CEC_NOTIFIER

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_DRM_CDNS_MHDP) += cdns-mhdp-common.o cdns-mhdp-hdmi.o
obj-$(CONFIG_DRM_CDNS_HDMI) += cdns-hdmi-core.o
obj-$(CONFIG_DRM_CDNS_DP) += cdns-dp-core.o
obj-$(CONFIG_DRM_CDNS_AUDIO) += cdns-mhdp-audio.o
obj-$(CONFIG_DRM_CDNS_HDMI_CEC) += cdns-mhdp-cec.o

View File

@ -0,0 +1,587 @@
/*
* Cadence Display Port Interface (DP) driver
*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <drm/bridge/cdns-mhdp-common.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drmP.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
/*
* This function only implements native DPDC reads and writes
*/
static ssize_t dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
int ret;
/* Ignore address only message */
if ((msg->size == 0) || (msg->buffer == NULL)) {
msg->reply = native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
return msg->size;
}
if (!native) {
dev_err(mhdp->dev, "%s: only native messages supported\n", __func__);
return -EINVAL;
}
/* msg sanity check */
if (msg->size > DP_AUX_MAX_PAYLOAD_BYTES) {
dev_err(mhdp->dev, "%s: invalid msg: size(%zu), request(%x)\n",
__func__, msg->size, (unsigned int)msg->request);
return -EINVAL;
}
if (msg->request == DP_AUX_NATIVE_WRITE) {
const u8 *buf = msg->buffer;
int i;
for (i = 0; i < msg->size; ++i) {
ret = cdns_mhdp_dpcd_write(mhdp,
msg->address + i, buf[i]);
if (!ret)
continue;
DRM_DEV_ERROR(mhdp->dev, "Failed to write DPCD\n");
return ret;
}
}
if (msg->request == DP_AUX_NATIVE_READ) {
ret = cdns_mhdp_dpcd_read(mhdp, msg->address, msg->buffer, msg->size);
if (ret < 0)
return -EIO;
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
return msg->size;
}
return 0;
}
static int dp_aux_init(struct cdns_mhdp_device *mhdp,
struct device *dev)
{
int ret;
mhdp->dp.aux.name = "imx_dp_aux";
mhdp->dp.aux.dev = dev;
mhdp->dp.aux.transfer = dp_aux_transfer;
ret = drm_dp_aux_register(&mhdp->dp.aux);
return ret;
}
static int dp_aux_destroy(struct cdns_mhdp_device *mhdp)
{
drm_dp_aux_unregister(&mhdp->dp.aux);
return 0;
}
static void dp_pixel_clk_reset(struct cdns_mhdp_device *mhdp)
{
u32 val;
/* reset pixel clk */
val = cdns_mhdp_reg_read(mhdp, SOURCE_HDTX_CAR);
cdns_mhdp_reg_write(mhdp, SOURCE_HDTX_CAR, val & 0xFD);
cdns_mhdp_reg_write(mhdp, SOURCE_HDTX_CAR, val);
}
static void cdns_dp_mode_set(struct cdns_mhdp_device *mhdp)
{
u32 lane_mapping = mhdp->lane_mapping;
struct drm_dp_link *link = &mhdp->dp.link;
char linkid[6];
int ret;
cdns_mhdp_plat_call(mhdp, pclk_rate);
/* delay for DP FW stable after pixel clock relock */
msleep(50);
dp_pixel_clk_reset(mhdp);
ret = drm_dp_downstream_id(&mhdp->dp.aux, linkid);
if (ret < 0) {
DRM_INFO("Failed to Get DP link ID: %d\n", ret);
return;
}
DRM_INFO("DP link id: %s, 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
linkid, linkid[0], linkid[1], linkid[2], linkid[3], linkid[4],
linkid[5]);
/* Check dp link */
ret = drm_dp_link_probe(&mhdp->dp.aux, link);
if (ret < 0) {
DRM_INFO("Failed to probe DP link: %d\n", ret);
return;
}
DRM_INFO("DP revision: 0x%x\n", link->revision);
DRM_INFO("DP rate: %d Mbps\n", link->rate);
DRM_INFO("DP number of lanes: %d\n", link->num_lanes);
DRM_INFO("DP capabilities: 0x%lx\n", link->capabilities);
/* check the max link rate */
if (link->rate > CDNS_DP_MAX_LINK_RATE)
link->rate = CDNS_DP_MAX_LINK_RATE;
drm_dp_link_power_up(&mhdp->dp.aux, link);
if (ret < 0) {
DRM_INFO("Failed to power DP link: %d\n", ret);
return;
}
/* Initialize link rate/num_lanes as panel max link rate/max_num_lanes */
cdns_mhdp_plat_call(mhdp, phy_set);
/* Video off */
ret = cdns_mhdp_set_video_status(mhdp, CONTROL_VIDEO_IDLE);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "Failed to valid video %d\n", ret);
return;
}
/* Line swaping */
cdns_mhdp_reg_write(mhdp, LANES_CONFIG, 0x00400000 | lane_mapping);
/* Set DP host capability */
ret = cdns_mhdp_set_host_cap(mhdp, false);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "Failed to set host cap %d\n", ret);
return;
}
ret = cdns_mhdp_config_video(mhdp);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "Failed to config video %d\n", ret);
return;
}
/* Link trainning */
ret = cdns_mhdp_train_link(mhdp);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "Failed link train %d\n", ret);
return;
}
ret = cdns_mhdp_set_video_status(mhdp, CONTROL_VIDEO_VALID);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "Failed to valid video %d\n", ret);
return;
}
return;
}
/* -----------------------------------------------------------------------------
* dp TX Setup
*/
static enum drm_connector_status
cdns_dp_connector_detect(struct drm_connector *connector, bool force)
{
struct cdns_mhdp_device *mhdp = container_of(connector,
struct cdns_mhdp_device, connector.base);
u8 hpd = 0xf;
hpd = cdns_mhdp_read_hpd(mhdp);
if (hpd == 1)
/* Cable Connected */
return connector_status_connected;
else if (hpd == 0)
/* Cable Disconnedted */
return connector_status_disconnected;
else {
/* Cable status unknown */
DRM_INFO("Unknow cable status, hdp=%u\n", hpd);
return connector_status_unknown;
}
}
static int cdns_dp_connector_get_modes(struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp = container_of(connector,
struct cdns_mhdp_device, connector.base);
int num_modes = 0;
struct edid *edid;
edid = drm_do_get_edid(&mhdp->connector.base,
cdns_mhdp_get_edid_block, mhdp);
if (edid) {
dev_info(mhdp->dev, "%x,%x,%x,%x,%x,%x,%x,%x\n",
edid->header[0], edid->header[1],
edid->header[2], edid->header[3],
edid->header[4], edid->header[5],
edid->header[6], edid->header[7]);
drm_connector_update_edid_property(connector, edid);
num_modes = drm_add_edid_modes(connector, edid);
kfree(edid);
}
if (num_modes == 0)
DRM_ERROR("Invalid edid\n");
return num_modes;
}
static const struct drm_connector_funcs cdns_dp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = cdns_dp_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs cdns_dp_connector_helper_funcs = {
.get_modes = cdns_dp_connector_get_modes,
};
static int cdns_dp_bridge_attach(struct drm_bridge *bridge)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
struct drm_connector *connector = &mhdp->connector.base;
connector->interlace_allowed = 1;
if (mhdp->is_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_helper_add(connector, &cdns_dp_connector_helper_funcs);
drm_connector_init(bridge->dev, connector, &cdns_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
static enum drm_mode_status
cdns_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
enum drm_mode_status mode_status = MODE_OK;
/* We don't support double-clocked modes */
if (mode->flags & DRM_MODE_FLAG_DBLCLK ||
mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_BAD;
/* MAX support pixel clock rate 594MHz */
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
/* 4096x2160 is not supported now */
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
if (mode->vdisplay > 2160)
return MODE_BAD_VVALUE;
return mode_status;
}
static void cdns_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
struct drm_display_info *display_info = &mhdp->connector.base.display_info;
struct video_info *video = &mhdp->video_info;
switch (display_info->bpc) {
case 10:
video->color_depth = 10;
break;
case 6:
video->color_depth = 6;
break;
default:
video->color_depth = 8;
break;
}
video->color_fmt = PXL_RGB;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
DRM_INFO("Mode: %dx%dp%d\n", mode->hdisplay, mode->vdisplay, mode->clock);
memcpy(&mhdp->mode, mode, sizeof(struct drm_display_mode));
mutex_lock(&mhdp->lock);
cdns_dp_mode_set(mhdp);
mutex_unlock(&mhdp->lock);
}
static void cdn_hdp_bridge_enable(struct drm_bridge *bridge)
{
}
static void cdn_hdp_bridge_disable(struct drm_bridge *bridge)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
cdns_mhdp_set_video_status(mhdp, CONTROL_VIDEO_IDLE);
drm_dp_link_power_down(&mhdp->dp.aux, &mhdp->dp.link);
}
static const struct drm_bridge_funcs cdns_dp_bridge_funcs = {
.attach = cdns_dp_bridge_attach,
.enable = cdn_hdp_bridge_enable,
.disable = cdn_hdp_bridge_disable,
.mode_set = cdns_dp_bridge_mode_set,
.mode_valid = cdns_dp_bridge_mode_valid,
};
static void hotplug_work_func(struct work_struct *work)
{
struct cdns_mhdp_device *mhdp = container_of(work,
struct cdns_mhdp_device, hotplug_work.work);
struct drm_connector *connector = &mhdp->connector.base;
drm_helper_hpd_irq_event(connector->dev);
if (connector->status == connector_status_connected) {
/* reset video mode after cable plugin */
mutex_lock(&mhdp->lock);
cdns_dp_mode_set(mhdp);
mutex_unlock(&mhdp->lock);
DRM_INFO("HDMI/DP Cable Plug In\n");
enable_irq(mhdp->irq[IRQ_OUT]);
} else if (connector->status == connector_status_disconnected) {
/* Cable Disconnedted */
DRM_INFO("HDMI/DP Cable Plug Out\n");
enable_irq(mhdp->irq[IRQ_IN]);
}
}
static irqreturn_t cdns_dp_irq_thread(int irq, void *data)
{
struct cdns_mhdp_device *mhdp = data;
disable_irq_nosync(irq);
mod_delayed_work(system_wq, &mhdp->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
}
static void cdns_dp_parse_dt(struct cdns_mhdp_device *mhdp)
{
struct device_node *of_node = mhdp->dev->of_node;
int ret;
ret = of_property_read_u32(of_node, "lane-mapping",
&mhdp->lane_mapping);
if (ret) {
mhdp->lane_mapping = 0xc6;
dev_warn(mhdp->dev, "Failed to get lane_mapping - using default 0xc6\n");
}
dev_info(mhdp->dev, "lane-mapping 0x%02x\n", mhdp->lane_mapping);
}
static int __cdns_dp_probe(struct platform_device *pdev,
struct cdns_mhdp_device *mhdp)
{
struct device *dev = &pdev->dev;
struct resource *iores = NULL;
int ret;
mutex_init(&mhdp->lock);
mutex_init(&mhdp->iolock);
INIT_DELAYED_WORK(&mhdp->hotplug_work, hotplug_work_func);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (iores) {
mhdp->regs_base = devm_ioremap(dev, iores->start,
resource_size(iores));
if (IS_ERR(mhdp->regs_base))
return -ENOMEM;
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (iores) {
mhdp->regs_sec = devm_ioremap(dev, iores->start,
resource_size(iores));
if (IS_ERR(mhdp->regs_sec))
return -ENOMEM;
}
mhdp->is_hpd = true;
mhdp->is_ls1028a = false;
mhdp->irq[IRQ_IN] = platform_get_irq_byname(pdev, "plug_in");
if (mhdp->irq[IRQ_IN] < 0) {
mhdp->is_hpd = false;
dev_info(dev, "No plug_in irq number\n");
}
mhdp->irq[IRQ_OUT] = platform_get_irq_byname(pdev, "plug_out");
if (mhdp->irq[IRQ_OUT] < 0) {
mhdp->is_hpd = false;
dev_info(dev, "No plug_out irq number\n");
}
cdns_dp_parse_dt(mhdp);
if (of_device_is_compatible(dev->of_node, "cdn,ls1028a-dp"))
mhdp->is_ls1028a = true;
cdns_mhdp_plat_call(mhdp, power_on);
cdns_mhdp_plat_call(mhdp, firmware_init);
/* DP FW alive check */
ret = cdns_mhdp_check_alive(mhdp);
if (ret == false) {
DRM_ERROR("NO dp FW running\n");
return -ENXIO;
}
/* DP PHY init before AUX init */
cdns_mhdp_plat_call(mhdp, phy_set);
/* Enable Hotplug Detect IRQ thread */
if (mhdp->is_hpd) {
irq_set_status_flags(mhdp->irq[IRQ_IN], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, mhdp->irq[IRQ_IN],
NULL, cdns_dp_irq_thread,
IRQF_ONESHOT, dev_name(dev),
mhdp);
if (ret) {
dev_err(dev, "can't claim irq %d\n",
mhdp->irq[IRQ_IN]);
return -EINVAL;
}
irq_set_status_flags(mhdp->irq[IRQ_OUT], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, mhdp->irq[IRQ_OUT],
NULL, cdns_dp_irq_thread,
IRQF_ONESHOT, dev_name(dev),
mhdp);
if (ret) {
dev_err(dev, "can't claim irq %d\n",
mhdp->irq[IRQ_OUT]);
return -EINVAL;
}
if (cdns_mhdp_read_hpd(mhdp))
enable_irq(mhdp->irq[IRQ_OUT]);
else
enable_irq(mhdp->irq[IRQ_IN]);
}
mhdp->bridge.base.driver_private = mhdp;
mhdp->bridge.base.funcs = &cdns_dp_bridge_funcs;
#ifdef CONFIG_OF
mhdp->bridge.base.of_node = dev->of_node;
#endif
dev_set_drvdata(dev, mhdp);
/* register audio driver */
cdns_mhdp_register_audio_driver(dev);
dp_aux_init(mhdp, dev);
return 0;
}
static void __cdns_dp_remove(struct cdns_mhdp_device *mhdp)
{
dp_aux_destroy(mhdp);
cdns_mhdp_unregister_audio_driver(mhdp->dev);
}
/* -----------------------------------------------------------------------------
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
int cdns_dp_probe(struct platform_device *pdev,
struct cdns_mhdp_device *mhdp)
{
int ret;
ret = __cdns_dp_probe(pdev, mhdp);
if (ret)
return ret;
drm_bridge_add(&mhdp->bridge.base);
return 0;
}
EXPORT_SYMBOL_GPL(cdns_dp_probe);
void cdns_dp_remove(struct platform_device *pdev)
{
struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
drm_bridge_remove(&mhdp->bridge.base);
__cdns_dp_remove(mhdp);
}
EXPORT_SYMBOL_GPL(cdns_dp_remove);
/* -----------------------------------------------------------------------------
* Bind/unbind API, used from platforms based on the component framework.
*/
int cdns_dp_bind(struct platform_device *pdev, struct drm_encoder *encoder,
struct cdns_mhdp_device *mhdp)
{
int ret;
ret = __cdns_dp_probe(pdev, mhdp);
if (ret < 0)
return ret;
ret = drm_bridge_attach(encoder, &mhdp->bridge.base, NULL);
if (ret) {
cdns_dp_remove(pdev);
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(cdns_dp_bind);
void cdns_dp_unbind(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
__cdns_dp_remove(mhdp);
}
EXPORT_SYMBOL_GPL(cdns_dp_unbind);
MODULE_AUTHOR("Sandor Yu <sandor.yu@nxp.com>");
MODULE_DESCRIPTION("Cadence Display Port transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdn-dp");

View File

@ -0,0 +1,683 @@
/*
* Cadence High-Definition Multimedia Interface (HDMI) driver
*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <drm/bridge/cdns-mhdp-common.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_scdc_helper.h>
#include <drm/drmP.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
static void hdmi_sink_config(struct cdns_mhdp_device *mhdp)
{
struct drm_scdc *scdc = &mhdp->connector.base.display_info.hdmi.scdc;
u8 buff = 0;
/* Default work in HDMI1.4 */
mhdp->hdmi.hdmi_type = MODE_HDMI_1_4;
/* check sink support SCDC or not */
if (scdc->supported != true) {
DRM_INFO("Sink Not Support SCDC\n");
return;
}
if (mhdp->hdmi.char_rate > 340000) {
/*
* TMDS Character Rate above 340MHz should working in HDMI2.0
* Enable scrambling and TMDS_Bit_Clock_Ratio
*/
buff = SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE;
mhdp->hdmi.hdmi_type = MODE_HDMI_2_0;
} else if (scdc->scrambling.low_rates) {
/*
* Enable scrambling and HDMI2.0 when scrambling capability of sink
* be indicated in the HF-VSDB LTE_340Mcsc_scramble bit
*/
buff = SCDC_SCRAMBLING_ENABLE;
mhdp->hdmi.hdmi_type = MODE_HDMI_2_0;
}
/* TMDS config */
cdns_hdmi_scdc_write(mhdp, 0x20, buff);
}
static void hdmi_lanes_config(struct cdns_mhdp_device *mhdp)
{
/* Line swaping */
cdns_mhdp_reg_write(mhdp, LANES_CONFIG, 0x00400000 | mhdp->lane_mapping);
}
static int hdmi_avi_info_set(struct cdns_mhdp_device *mhdp,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
int format = mhdp->video_info.color_fmt;
struct drm_connector_state *conn_state = mhdp->connector.base.state;
struct drm_display_mode *adj_mode;
enum hdmi_quantization_range qr;
u8 buf[32];
int ret;
/* Initialise info frame from DRM mode */
drm_hdmi_avi_infoframe_from_display_mode(&frame, &mhdp->connector.base,
mode);
switch (format) {
case YCBCR_4_4_4:
frame.colorspace = HDMI_COLORSPACE_YUV444;
break;
case YCBCR_4_2_2:
frame.colorspace = HDMI_COLORSPACE_YUV422;
break;
case YCBCR_4_2_0:
frame.colorspace = HDMI_COLORSPACE_YUV420;
break;
default:
frame.colorspace = HDMI_COLORSPACE_RGB;
break;
}
drm_hdmi_avi_infoframe_colorspace(&frame, conn_state);
adj_mode = &mhdp->bridge.base.encoder->crtc->state->adjusted_mode;
qr = drm_default_rgb_quant_range(adj_mode);
drm_hdmi_avi_infoframe_quant_range(&frame, &mhdp->connector.base,
adj_mode, qr);
ret = hdmi_avi_infoframe_check(&frame);
if (WARN_ON(ret))
return false;
ret = hdmi_avi_infoframe_pack(&frame, buf + 1, sizeof(buf) - 1);
if (ret < 0) {
DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
return -1;
}
buf[0] = 0;
cdns_mhdp_infoframe_set(mhdp, 0, sizeof(buf), buf, HDMI_INFOFRAME_TYPE_AVI);
return 0;
}
static void hdmi_vendor_info_set(struct cdns_mhdp_device *mhdp,
struct drm_display_mode *mode)
{
struct hdmi_vendor_infoframe frame;
u8 buf[32];
int ret;
/* Initialise vendor frame from DRM mode */
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame, &mhdp->connector.base, mode);
if (ret < 0) {
DRM_INFO("No vendor infoframe\n");
return;
}
ret = hdmi_vendor_infoframe_pack(&frame, buf + 1, sizeof(buf) - 1);
if (ret < 0) {
DRM_WARN("Unable to pack vendor infoframe: %d\n", ret);
return;
}
buf[0] = 0;
cdns_mhdp_infoframe_set(mhdp, 3, sizeof(buf), buf, HDMI_INFOFRAME_TYPE_VENDOR);
}
static void hdmi_drm_info_set(struct cdns_mhdp_device *mhdp)
{
struct drm_connector_state *conn_state;
struct hdmi_drm_infoframe frame;
u8 buf[32];
int ret;
conn_state = mhdp->connector.base.state;
if (!conn_state->hdr_output_metadata)
return;
ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
if (ret < 0) {
DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
return;
}
ret = hdmi_drm_infoframe_pack(&frame, buf + 1, sizeof(buf) - 1);
if (ret < 0) {
DRM_DEBUG_KMS("couldn't pack HDR infoframe\n");
return;
}
buf[0] = 0;
cdns_mhdp_infoframe_set(mhdp, 3, sizeof(buf),
buf, HDMI_INFOFRAME_TYPE_DRM);
}
void cdns_hdmi_mode_set(struct cdns_mhdp_device *mhdp)
{
struct drm_display_mode *mode = &mhdp->mode;
int ret;
hdmi_lanes_config(mhdp);
cdns_mhdp_plat_call(mhdp, pclk_rate);
/* delay for HDMI FW stable after pixel clock relock */
msleep(20);
cdns_mhdp_plat_call(mhdp, phy_set);
hdmi_sink_config(mhdp);
ret = cdns_hdmi_ctrl_init(mhdp, mhdp->hdmi.hdmi_type, mhdp->hdmi.char_rate);
if (ret < 0) {
DRM_ERROR("%s, ret = %d\n", __func__, ret);
return;
}
/* Config GCP */
if (mhdp->video_info.color_depth == 8)
cdns_hdmi_disable_gcp(mhdp);
else
cdns_hdmi_enable_gcp(mhdp);
ret = hdmi_avi_info_set(mhdp, mode);
if (ret < 0) {
DRM_ERROR("%s ret = %d\n", __func__, ret);
return;
}
/* vendor info frame is enable only when HDMI1.4 4K mode */
hdmi_vendor_info_set(mhdp, mode);
hdmi_drm_info_set(mhdp);
ret = cdns_hdmi_mode_config(mhdp, mode, &mhdp->video_info);
if (ret < 0) {
DRM_ERROR("CDN_API_HDMITX_SetVic_blocking ret = %d\n", ret);
return;
}
}
static enum drm_connector_status
cdns_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct cdns_mhdp_device *mhdp =
container_of(connector, struct cdns_mhdp_device, connector.base);
u8 hpd = 0xf;
hpd = cdns_mhdp_read_hpd(mhdp);
if (hpd == 1)
/* Cable Connected */
return connector_status_connected;
else if (hpd == 0)
/* Cable Disconnedted */
return connector_status_disconnected;
else {
/* Cable status unknown */
DRM_INFO("Unknow cable status, hdp=%u\n", hpd);
return connector_status_unknown;
}
}
static int cdns_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct cdns_mhdp_device *mhdp =
container_of(connector, struct cdns_mhdp_device, connector.base);
int num_modes = 0;
struct edid *edid;
edid = drm_do_get_edid(&mhdp->connector.base,
cdns_hdmi_get_edid_block, mhdp);
if (edid) {
dev_info(mhdp->dev, "%x,%x,%x,%x,%x,%x,%x,%x\n",
edid->header[0], edid->header[1],
edid->header[2], edid->header[3],
edid->header[4], edid->header[5],
edid->header[6], edid->header[7]);
drm_connector_update_edid_property(connector, edid);
num_modes = drm_add_edid_modes(connector, edid);
kfree(edid);
}
if (num_modes == 0)
DRM_ERROR("Invalid edid\n");
return num_modes;
}
static bool blob_equal(const struct drm_property_blob *a,
const struct drm_property_blob *b)
{
if (a && b)
return a->length == b->length &&
!memcmp(a->data, b->data, a->length);
return !a == !b;
}
static int cdns_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *new_con_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_con_state =
drm_atomic_get_old_connector_state(state, connector);
struct drm_crtc *crtc = new_con_state->crtc;
struct drm_crtc_state *new_crtc_state;
if (!blob_equal(new_con_state->hdr_output_metadata,
old_con_state->hdr_output_metadata) ||
new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
return PTR_ERR(new_crtc_state);
new_crtc_state->mode_changed =
!new_con_state->hdr_output_metadata ||
!old_con_state->hdr_output_metadata ||
new_con_state->colorspace != old_con_state->colorspace;
}
return 0;
}
static const struct drm_connector_funcs cdns_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = cdns_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs cdns_hdmi_connector_helper_funcs = {
.get_modes = cdns_hdmi_connector_get_modes,
.atomic_check = cdns_hdmi_connector_atomic_check,
};
static int cdns_hdmi_bridge_attach(struct drm_bridge *bridge)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
struct drm_mode_config *config = &bridge->dev->mode_config;
struct drm_encoder *encoder = bridge->encoder;
struct drm_connector *connector = &mhdp->connector.base;
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_helper_add(connector, &cdns_hdmi_connector_helper_funcs);
drm_connector_init(bridge->dev, connector, &cdns_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
if (!strncmp("imx8mq-hdmi", mhdp->plat_data->plat_name, 11)) {
drm_object_attach_property(&connector->base,
config->hdr_output_metadata_property,
0);
if (!drm_mode_create_colorspace_property(connector))
drm_object_attach_property(&connector->base,
connector->colorspace_property,
0);
}
drm_connector_attach_encoder(connector, encoder);
return 0;
}
static enum drm_mode_status
cdns_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
enum drm_mode_status mode_status = MODE_OK;
int ret;
/* We don't support double-clocked and Interlaced modes */
if (mode->flags & DRM_MODE_FLAG_DBLCLK ||
mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_BAD;
/* MAX support pixel clock rate 594MHz */
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
/* 4096x2160 is not supported */
if (mode->hdisplay > 3840 || mode->vdisplay > 2160)
return MODE_BAD_HVALUE;
mhdp->valid_mode = mode;
ret = cdns_mhdp_plat_call(mhdp, phy_video_valid);
if (ret == false)
return MODE_CLOCK_RANGE;
return mode_status;
}
static void cdns_hdmi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
struct video_info *video = &mhdp->video_info;
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
DRM_INFO("Mode: %dx%dp%d\n", mode->hdisplay, mode->vdisplay, mode->clock);
memcpy(&mhdp->mode, mode, sizeof(struct drm_display_mode));
mutex_lock(&mhdp->lock);
cdns_hdmi_mode_set(mhdp);
mutex_unlock(&mhdp->lock);
}
bool cdns_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct cdns_mhdp_device *mhdp = bridge->driver_private;
struct drm_display_info *di = &mhdp->connector.base.display_info;
struct video_info *video = &mhdp->video_info;
int vic = drm_match_cea_mode(mode);
video->color_depth = 8;
video->color_fmt = PXL_RGB;
/* for all other platforms, other than imx8mq */
if (strncmp("imx8mq-hdmi", mhdp->plat_data->plat_name, 11)) {
if (di->bpc == 10 || di->bpc == 6)
video->color_depth = di->bpc;
return true;
}
/* imx8mq */
if (vic == 97 || vic == 96) {
if (di->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
video->color_depth = 12;
else if (di->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
video->color_depth = 10;
if (drm_mode_is_420_only(di, mode) ||
(drm_mode_is_420_also(di, mode) &&
video->color_depth > 8)) {
video->color_fmt = YCBCR_4_2_0;
adjusted_mode->private_flags = 1;
return true;
}
video->color_depth = 8;
return true;
}
/* Any defined maximum tmds clock limit we must not exceed*/
if ((di->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) &&
(mode->clock * 3 / 2 <= di->max_tmds_clock))
video->color_depth = 12;
else if ((di->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode->clock * 5 / 4 <= di->max_tmds_clock))
video->color_depth = 10;
/* 10-bit color depth for the following modes is not supported */
if ((vic == 95 || vic == 94 || vic == 93) && video->color_depth == 10)
video->color_depth = 8;
return true;
}
static const struct drm_bridge_funcs cdns_hdmi_bridge_funcs = {
.attach = cdns_hdmi_bridge_attach,
.mode_set = cdns_hdmi_bridge_mode_set,
.mode_valid = cdns_hdmi_bridge_mode_valid,
.mode_fixup = cdns_hdmi_bridge_mode_fixup,
};
static void hotplug_work_func(struct work_struct *work)
{
struct cdns_mhdp_device *mhdp = container_of(work,
struct cdns_mhdp_device, hotplug_work.work);
struct drm_connector *connector = &mhdp->connector.base;
drm_helper_hpd_irq_event(connector->dev);
if (connector->status == connector_status_connected) {
DRM_INFO("HDMI Cable Plug In\n");
/* reset video mode after cable plugin */
mutex_lock(&mhdp->lock);
cdns_hdmi_mode_set(mhdp);
mutex_unlock(&mhdp->lock);
enable_irq(mhdp->irq[IRQ_OUT]);
} else if (connector->status == connector_status_disconnected) {
/* Cable Disconnedted */
DRM_INFO("HDMI Cable Plug Out\n");
enable_irq(mhdp->irq[IRQ_IN]);
}
}
static irqreturn_t cdns_hdmi_irq_thread(int irq, void *data)
{
struct cdns_mhdp_device *mhdp = data;
disable_irq_nosync(irq);
mod_delayed_work(system_wq, &mhdp->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;
}
static void cdns_hdmi_parse_dt(struct cdns_mhdp_device *mhdp)
{
struct device_node *of_node = mhdp->dev->of_node;
int ret;
ret = of_property_read_u32(of_node, "lane-mapping", &mhdp->lane_mapping);
if (ret) {
mhdp->lane_mapping = 0xc6;
dev_warn(mhdp->dev, "Failed to get lane_mapping - using default 0xc6\n");
}
dev_info(mhdp->dev, "lane-mapping 0x%02x\n", mhdp->lane_mapping);
}
static int __cdns_hdmi_probe(struct platform_device *pdev,
struct cdns_mhdp_device *mhdp)
{
struct device *dev = &pdev->dev;
struct platform_device_info pdevinfo;
struct resource *iores = NULL;
int ret;
mutex_init(&mhdp->lock);
mutex_init(&mhdp->iolock);
INIT_DELAYED_WORK(&mhdp->hotplug_work, hotplug_work_func);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mhdp->regs_base = devm_ioremap(dev, iores->start, resource_size(iores));
if (IS_ERR(mhdp->regs_base)) {
dev_err(dev, "No regs_base memory\n");
return -ENOMEM;
}
/* sec register base */
iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
mhdp->regs_sec = devm_ioremap(dev, iores->start, resource_size(iores));
if (IS_ERR(mhdp->regs_sec)) {
dev_err(dev, "No regs_sec memory\n");
return -ENOMEM;
}
mhdp->irq[IRQ_IN] = platform_get_irq_byname(pdev, "plug_in");
if (mhdp->irq[IRQ_IN] < 0) {
dev_info(dev, "No plug_in irq number\n");
return -EPROBE_DEFER;
}
mhdp->irq[IRQ_OUT] = platform_get_irq_byname(pdev, "plug_out");
if (mhdp->irq[IRQ_OUT] < 0) {
dev_info(dev, "No plug_out irq number\n");
return -EPROBE_DEFER;
}
cdns_mhdp_plat_call(mhdp, power_on);
/* Initialize FW */
cdns_mhdp_plat_call(mhdp, firmware_init);
/* HDMI FW alive check */
ret = cdns_mhdp_check_alive(mhdp);
if (ret == false) {
dev_err(dev, "NO HDMI FW running\n");
return -ENXIO;
}
/* Enable Hotplug Detect thread */
irq_set_status_flags(mhdp->irq[IRQ_IN], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, mhdp->irq[IRQ_IN],
NULL, cdns_hdmi_irq_thread,
IRQF_ONESHOT, dev_name(dev),
mhdp);
if (ret < 0) {
dev_err(dev, "can't claim irq %d\n",
mhdp->irq[IRQ_IN]);
return -EINVAL;
}
irq_set_status_flags(mhdp->irq[IRQ_OUT], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, mhdp->irq[IRQ_OUT],
NULL, cdns_hdmi_irq_thread,
IRQF_ONESHOT, dev_name(dev),
mhdp);
if (ret < 0) {
dev_err(dev, "can't claim irq %d\n",
mhdp->irq[IRQ_OUT]);
return -EINVAL;
}
cdns_hdmi_parse_dt(mhdp);
if (cdns_mhdp_read_hpd(mhdp))
enable_irq(mhdp->irq[IRQ_OUT]);
else
enable_irq(mhdp->irq[IRQ_IN]);
mhdp->bridge.base.driver_private = mhdp;
mhdp->bridge.base.funcs = &cdns_hdmi_bridge_funcs;
#ifdef CONFIG_OF
mhdp->bridge.base.of_node = dev->of_node;
#endif
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
dev_set_drvdata(dev, mhdp);
/* register audio driver */
cdns_mhdp_register_audio_driver(dev);
/* register cec driver */
#ifdef CONFIG_DRM_CDNS_HDMI_CEC
cdns_mhdp_register_cec_driver(dev);
#endif
return 0;
}
static void __cdns_hdmi_remove(struct cdns_mhdp_device *mhdp)
{
/* unregister cec driver */
#ifdef CONFIG_DRM_CDNS_HDMI_CEC
cdns_mhdp_unregister_cec_driver(mhdp->dev);
#endif
cdns_mhdp_unregister_audio_driver(mhdp->dev);
}
/* -----------------------------------------------------------------------------
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
int cdns_hdmi_probe(struct platform_device *pdev,
struct cdns_mhdp_device *mhdp)
{
int ret;
ret = __cdns_hdmi_probe(pdev, mhdp);
if (ret < 0)
return ret;
drm_bridge_add(&mhdp->bridge.base);
return 0;
}
EXPORT_SYMBOL_GPL(cdns_hdmi_probe);
void cdns_hdmi_remove(struct platform_device *pdev)
{
struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
drm_bridge_remove(&mhdp->bridge.base);
__cdns_hdmi_remove(mhdp);
}
EXPORT_SYMBOL_GPL(cdns_hdmi_remove);
/* -----------------------------------------------------------------------------
* Bind/unbind API, used from platforms based on the component framework.
*/
int cdns_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
struct cdns_mhdp_device *mhdp)
{
int ret;
ret = __cdns_hdmi_probe(pdev, mhdp);
if (ret)
return ret;
ret = drm_bridge_attach(encoder, &mhdp->bridge.base, NULL);
if (ret) {
cdns_hdmi_remove(pdev);
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(cdns_hdmi_bind);
void cdns_hdmi_unbind(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
__cdns_hdmi_remove(mhdp);
}
EXPORT_SYMBOL_GPL(cdns_hdmi_unbind);
MODULE_AUTHOR("Sandor Yu <sandor.yu@nxp.com>");
MODULE_DESCRIPTION("Cadence HDMI transmitter driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdn-hdmi");

View File

@ -0,0 +1,394 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author: Chris Zhong <zyw@rock-chips.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/reset.h>
#include <drm/bridge/cdns-mhdp-common.h>
#include <sound/hdmi-codec.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
#define CDNS_DP_SPDIF_CLK 200000000
static u32 TMDS_rate_table[7] = {
25200, 27000, 54000, 74250, 148500, 297000, 594000,
};
static u32 N_table_32k[7] = {
/* 25200/27000/54000/74250/148500/297000/594000 */
4096, 4096, 4096, 4096, 4096, 3072, 3072,
};
static u32 N_table_44k[7] = {
6272, 6272, 6272, 6272, 6272, 4704, 9408,
};
static u32 N_table_48k[7] = {
6144, 6144, 6144, 6144, 6144, 5120, 6144,
};
static int select_N_index(u32 pclk)
{
int num = sizeof(TMDS_rate_table)/sizeof(int);
int i = 0;
for (i = 0; i < num ; i++)
if (pclk == TMDS_rate_table[i])
break;
if (i == num) {
DRM_WARN("pclkc %d is not supported!\n", pclk);
return num-1;
}
return i;
}
static void hdmi_audio_avi_set(struct cdns_mhdp_device *mhdp,
u32 channels)
{
struct hdmi_audio_infoframe frame;
u8 buf[32];
int ret;
hdmi_audio_infoframe_init(&frame);
frame.channels = channels;
frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
if (channels == 2)
frame.channel_allocation = 0;
else if (channels == 4)
frame.channel_allocation = 0x3;
else if (channels == 8)
frame.channel_allocation = 0x13;
ret = hdmi_audio_infoframe_pack(&frame, buf + 1, sizeof(buf) - 1);
if (ret < 0) {
DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
return;
}
buf[0] = 0;
cdns_mhdp_infoframe_set(mhdp, 1, sizeof(buf), buf, HDMI_INFOFRAME_TYPE_AUDIO);
}
int cdns_mhdp_audio_stop(struct cdns_mhdp_device *mhdp,
struct audio_info *audio)
{
int ret;
if (audio->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
ret = cdns_mhdp_reg_write(mhdp, AUDIO_PACK_CONTROL, 0);
if (ret) {
DRM_DEV_ERROR(mhdp->dev, "audio stop failed: %d\n", ret);
return ret;
}
}
cdns_mhdp_bus_write(0, mhdp, SPDIF_CTRL_ADDR);
/* clearn the audio config and reset */
cdns_mhdp_bus_write(0, mhdp, AUDIO_SRC_CNTL);
cdns_mhdp_bus_write(0, mhdp, AUDIO_SRC_CNFG);
cdns_mhdp_bus_write(AUDIO_SW_RST, mhdp, AUDIO_SRC_CNTL);
cdns_mhdp_bus_write(0, mhdp, AUDIO_SRC_CNTL);
/* reset smpl2pckt component */
cdns_mhdp_bus_write(0, mhdp, SMPL2PKT_CNTL);
cdns_mhdp_bus_write(AUDIO_SW_RST, mhdp, SMPL2PKT_CNTL);
cdns_mhdp_bus_write(0, mhdp, SMPL2PKT_CNTL);
/* reset FIFO */
cdns_mhdp_bus_write(AUDIO_SW_RST, mhdp, FIFO_CNTL);
cdns_mhdp_bus_write(0, mhdp, FIFO_CNTL);
if (audio->format == AFMT_SPDIF_INT)
clk_disable_unprepare(mhdp->spdif_clk);
return 0;
}
EXPORT_SYMBOL(cdns_mhdp_audio_stop);
int cdns_mhdp_audio_mute(struct cdns_mhdp_device *mhdp, bool enable)
{
struct audio_info *audio = &mhdp->audio_info;
int ret = true;
if (audio->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
ret = cdns_mhdp_reg_write_bit(mhdp, DP_VB_ID, 4, 1, enable);
if (ret)
DRM_DEV_ERROR(mhdp->dev, "audio mute failed: %d\n", ret);
}
return ret;
}
EXPORT_SYMBOL(cdns_mhdp_audio_mute);
static void cdns_mhdp_audio_config_i2s(struct cdns_mhdp_device *mhdp,
struct audio_info *audio)
{
int sub_pckt_num = 1, i2s_port_en_val = 0xf, i;
int idx = select_N_index(mhdp->mode.clock);
u32 val, ncts;
if (audio->channels == 2) {
if (mhdp->dp.link.num_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
i2s_port_en_val = 1;
} else if (audio->channels == 4) {
i2s_port_en_val = 3;
}
cdns_mhdp_bus_write(0x0, mhdp, SPDIF_CTRL_ADDR);
cdns_mhdp_bus_write(SYNC_WR_TO_CH_ZERO, mhdp, FIFO_CNTL);
val = MAX_NUM_CH(audio->channels);
val |= NUM_OF_I2S_PORTS(audio->channels);
val |= AUDIO_TYPE_LPCM;
val |= CFG_SUB_PCKT_NUM(sub_pckt_num);
cdns_mhdp_bus_write(val, mhdp, SMPL2PKT_CNFG);
if (audio->sample_width == 16)
val = 0;
else if (audio->sample_width == 24)
val = 1 << 9;
else
val = 2 << 9;
val |= AUDIO_CH_NUM(audio->channels);
val |= I2S_DEC_PORT_EN(i2s_port_en_val);
val |= TRANS_SMPL_WIDTH_32;
cdns_mhdp_bus_write(val, mhdp, AUDIO_SRC_CNFG);
for (i = 0; i < (audio->channels + 1) / 2; i++) {
if (audio->sample_width == 16)
val = (0x02 << 8) | (0x02 << 20);
else if (audio->sample_width == 24)
val = (0x0b << 8) | (0x0b << 20);
val |= ((2 * i) << 4) | ((2 * i + 1) << 16);
cdns_mhdp_bus_write(val, mhdp, STTS_BIT_CH(i));
}
switch (audio->sample_rate) {
case 32000:
val = SAMPLING_FREQ(3) |
ORIGINAL_SAMP_FREQ(0xc);
ncts = N_table_32k[idx];
break;
case 44100:
val = SAMPLING_FREQ(0) |
ORIGINAL_SAMP_FREQ(0xf);
ncts = N_table_44k[idx];
break;
case 48000:
val = SAMPLING_FREQ(2) |
ORIGINAL_SAMP_FREQ(0xd);
ncts = N_table_48k[idx];
break;
case 88200:
val = SAMPLING_FREQ(8) |
ORIGINAL_SAMP_FREQ(0x7);
ncts = N_table_44k[idx] * 2;
break;
case 96000:
val = SAMPLING_FREQ(0xa) |
ORIGINAL_SAMP_FREQ(5);
ncts = N_table_48k[idx] * 2;
break;
case 176400:
val = SAMPLING_FREQ(0xc) |
ORIGINAL_SAMP_FREQ(3);
ncts = N_table_44k[idx] * 4;
break;
case 192000:
default:
val = SAMPLING_FREQ(0xe) |
ORIGINAL_SAMP_FREQ(1);
ncts = N_table_48k[idx] * 4;
break;
}
val |= 4;
cdns_mhdp_bus_write(val, mhdp, COM_CH_STTS_BITS);
if (audio->connector_type == DRM_MODE_CONNECTOR_HDMIA)
cdns_mhdp_reg_write(mhdp, CM_I2S_CTRL, ncts | 0x4000000);
cdns_mhdp_bus_write(SMPL2PKT_EN, mhdp, SMPL2PKT_CNTL);
cdns_mhdp_bus_write(I2S_DEC_START, mhdp, AUDIO_SRC_CNTL);
}
static void cdns_mhdp_audio_config_spdif(struct cdns_mhdp_device *mhdp)
{
u32 val;
cdns_mhdp_bus_write(SYNC_WR_TO_CH_ZERO, mhdp, FIFO_CNTL);
val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
cdns_mhdp_bus_write(val, mhdp, SMPL2PKT_CNFG);
cdns_mhdp_bus_write(SMPL2PKT_EN, mhdp, SMPL2PKT_CNTL);
val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
cdns_mhdp_bus_write(val, mhdp, SPDIF_CTRL_ADDR);
clk_prepare_enable(mhdp->spdif_clk);
clk_set_rate(mhdp->spdif_clk, CDNS_DP_SPDIF_CLK);
}
int cdns_mhdp_audio_config(struct cdns_mhdp_device *mhdp,
struct audio_info *audio)
{
int ret;
/* reset the spdif clk before config */
if (audio->format == AFMT_SPDIF_INT) {
reset_control_assert(mhdp->spdif_rst);
reset_control_deassert(mhdp->spdif_rst);
}
if (audio->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
ret = cdns_mhdp_reg_write(mhdp, CM_LANE_CTRL, LANE_REF_CYC);
if (ret)
goto err_audio_config;
ret = cdns_mhdp_reg_write(mhdp, CM_CTRL, 0);
if (ret)
goto err_audio_config;
} else {
/* HDMI Mode */
ret = cdns_mhdp_reg_write(mhdp, CM_CTRL, 8);
if (ret)
goto err_audio_config;
}
if (audio->format == AFMT_I2S)
cdns_mhdp_audio_config_i2s(mhdp, audio);
else if (audio->format == AFMT_SPDIF_INT)
cdns_mhdp_audio_config_spdif(mhdp);
if (audio->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
ret = cdns_mhdp_reg_write(mhdp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN);
if (audio->connector_type == DRM_MODE_CONNECTOR_HDMIA)
hdmi_audio_avi_set(mhdp, audio->channels);
err_audio_config:
if (ret)
DRM_DEV_ERROR(mhdp->dev, "audio config failed: %d\n", ret);
return ret;
}
EXPORT_SYMBOL(cdns_mhdp_audio_config);
static int audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
struct audio_info audio = {
.sample_width = params->sample_width,
.sample_rate = params->sample_rate,
.channels = params->channels,
.connector_type = mhdp->connector.base.connector_type,
};
int ret;
switch (daifmt->fmt) {
case HDMI_I2S:
audio.format = AFMT_I2S;
break;
case HDMI_SPDIF:
audio.format = AFMT_SPDIF_EXT;
break;
default:
DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
ret = cdns_mhdp_audio_config(mhdp, &audio);
if (!ret)
mhdp->audio_info = audio;
out:
return ret;
}
static void audio_shutdown(struct device *dev, void *data)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
int ret;
ret = cdns_mhdp_audio_stop(mhdp, &mhdp->audio_info);
if (!ret)
mhdp->audio_info.format = AFMT_UNUSED;
}
static int audio_digital_mute(struct device *dev, void *data,
bool enable)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
int ret;
ret = cdns_mhdp_audio_mute(mhdp, enable);
return ret;
}
static int audio_get_eld(struct device *dev, void *data,
u8 *buf, size_t len)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
memcpy(buf, mhdp->connector.base.eld,
min(sizeof(mhdp->connector.base.eld), len));
return 0;
}
static const struct hdmi_codec_ops audio_codec_ops = {
.hw_params = audio_hw_params,
.audio_shutdown = audio_shutdown,
.digital_mute = audio_digital_mute,
.get_eld = audio_get_eld,
};
int cdns_mhdp_register_audio_driver(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.i2s = 1,
.spdif = 1,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
};
mhdp->audio_pdev = platform_device_register_data(
dev, HDMI_CODEC_DRV_NAME, 1,
&codec_data, sizeof(codec_data));
return PTR_ERR_OR_ZERO(mhdp->audio_pdev);
}
void cdns_mhdp_unregister_audio_driver(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
platform_device_unregister(mhdp->audio_pdev);
}

View File

@ -0,0 +1,341 @@
/*
* Copyright 2019 NXP
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <drm/bridge/cdns-mhdp-common.h>
#define CEC_NAME "cdns-mhdp-cec"
#define REG_ADDR_OFF 4
/* regsiter define */
#define TX_MSG_HEADER 0x33800
#define TX_MSG_LENGTH 0x33840
#define TX_MSG_CMD 0x33844
#define RX_MSG_CMD 0x33850
#define RX_CLEAR_BUF 0x33854
#define LOGICAL_ADDRESS_LA0 0x33858
#define CLK_DIV_MSB 0x3386c
#define CLK_DIV_LSB 0x33870
#define RX_MSG_DATA1 0x33900
#define RX_MSG_LENGTH 0x33940
#define RX_MSG_STATUS 0x33944
#define NUM_OF_MSG_RX_BUF 0x33948
#define TX_MSG_STATUS 0x3394c
#define DB_L_TIMER 0x33980
/**
* CEC Transceiver operation.
*/
enum {
CEC_TX_STOP,
CEC_TX_TRANSMIT,
CEC_TX_ABORT,
CEC_TX_ABORT_AND_TRANSMIT
};
/**
* CEC Transceiver status.
*/
enum {
CEC_STS_IDLE,
CEC_STS_BUSY,
CEC_STS_SUCCESS,
CEC_STS_ERROR
};
/**
* CEC Receiver operation.
*/
enum {
CEC_RX_STOP,
CEC_RX_READ,
CEC_RX_DISABLE,
CEC_RX_ABORT_AND_CLR_FIFO
};
/**
* Maximum number of Messages in the RX Buffers.
*/
#define CEC_MAX_RX_MSGS 2
static u32 mhdp_cec_read(struct cdns_mhdp_cec *cec, u32 offset)
{
struct cdns_mhdp_device *mhdp =
container_of(cec, struct cdns_mhdp_device, hdmi.cec);
return cdns_mhdp_bus_read(mhdp, offset);
}
static void mhdp_cec_write(struct cdns_mhdp_cec *cec, u32 offset, u32 val)
{
struct cdns_mhdp_device *mhdp =
container_of(cec, struct cdns_mhdp_device, hdmi.cec);
cdns_mhdp_bus_write(val, mhdp, offset);
}
static void mhdp_cec_clear_rx_buffer(struct cdns_mhdp_cec *cec)
{
mhdp_cec_write(cec, RX_CLEAR_BUF, 1);
mhdp_cec_write(cec, RX_CLEAR_BUF, 0);
}
static void mhdp_cec_set_divider(struct cdns_mhdp_cec *cec)
{
struct cdns_mhdp_device *mhdp =
container_of(cec, struct cdns_mhdp_device, hdmi.cec);
u32 clk_div;
/* Set clock divider */
clk_div = cdns_mhdp_get_fw_clk(mhdp) * 10;
mhdp_cec_write(cec, CLK_DIV_MSB,
(clk_div >> 8) & 0xFF);
mhdp_cec_write(cec, CLK_DIV_LSB, clk_div & 0xFF);
}
static u32 mhdp_cec_read_message(struct cdns_mhdp_cec *cec)
{
struct cec_msg *msg = &cec->msg;
int len;
int i;
mhdp_cec_write(cec, RX_MSG_CMD, CEC_RX_READ);
len = mhdp_cec_read(cec, RX_MSG_LENGTH);
msg->len = len + 1;
dev_dbg(cec->dev, "RX MSG len =%d\n", len);
/* Read RX MSG bytes */
for (i = 0; i < msg->len; ++i) {
msg->msg[i] = (u8) mhdp_cec_read(cec, RX_MSG_DATA1 + (i * REG_ADDR_OFF));
dev_dbg(cec->dev, "RX MSG[%d]=0x%x\n", i, msg->msg[i]);
}
mhdp_cec_write(cec, RX_MSG_CMD, CEC_RX_STOP);
return true;
}
static u32 mhdp_cec_write_message(struct cdns_mhdp_cec *cec, struct cec_msg *msg)
{
u8 i;
mhdp_cec_write(cec, TX_MSG_CMD, CEC_TX_STOP);
if (msg->len > CEC_MAX_MSG_SIZE) {
dev_err(cec->dev, "Invalid MSG size!\n");
return -EINVAL;
}
for (i = 0; i < msg->len; ++i)
printk("msg[%d]=0x%x\n",i, msg->msg[i]);
/* Write Message to register */
for (i = 0; i < msg->len; ++i) {
mhdp_cec_write(cec, TX_MSG_HEADER + (i * REG_ADDR_OFF),
msg->msg[i]);
}
/* Write Message Length (payload + opcode) */
mhdp_cec_write(cec, TX_MSG_LENGTH, msg->len - 1);
mhdp_cec_write(cec, TX_MSG_CMD, CEC_TX_TRANSMIT);
return true;
}
static int mhdp_cec_set_logical_addr(struct cdns_mhdp_cec *cec, u32 la)
{
u8 la_reg;
u8 i;
if (la == CEC_LOG_ADDR_INVALID)
/* invalid all LA address */
for (i = 0; i < CEC_MAX_LOG_ADDRS; ++i) {
mhdp_cec_write(cec, LOGICAL_ADDRESS_LA0 + (i * REG_ADDR_OFF), 0);
return 0;
}
/* In fact cdns mhdp cec could support max 5 La address */
for (i = 0; i < CEC_MAX_LOG_ADDRS; ++i) {
la_reg = mhdp_cec_read(cec, LOGICAL_ADDRESS_LA0 + (i * REG_ADDR_OFF));
/* Check LA already used */
if (la_reg & 0x10)
continue;
if ((la_reg & 0xF) == la) {
dev_warn(cec->dev, "Warning. LA already in use.\n");
return 0;
}
la = (la & 0xF) | (1 << 4);
mhdp_cec_write(cec, LOGICAL_ADDRESS_LA0 + (i * REG_ADDR_OFF), la);
return 0;
}
dev_warn(cec->dev, "All LA in use\n");
return -ENXIO;
}
static int mhdp_cec_poll_worker(void *_cec)
{
struct cdns_mhdp_cec *cec = (struct cdns_mhdp_cec *)_cec;
int num_rx_msgs, i;
int sts;
set_freezable();
for (;;) {
if (kthread_freezable_should_stop(NULL))
break;
/* Check TX State */
sts = mhdp_cec_read(cec, TX_MSG_STATUS);
switch (sts) {
case CEC_STS_SUCCESS:
cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0,
0);
mhdp_cec_write(cec, TX_MSG_CMD, CEC_TX_STOP);
break;
case CEC_STS_ERROR:
mhdp_cec_write(cec, TX_MSG_CMD, CEC_TX_STOP);
cec_transmit_done(cec->adap,
CEC_TX_STATUS_MAX_RETRIES |
CEC_TX_STATUS_NACK, 0, 1, 0, 0);
break;
case CEC_STS_BUSY:
default:
break;
}
/* Check RX State */
sts = mhdp_cec_read(cec, RX_MSG_STATUS);
num_rx_msgs = mhdp_cec_read(cec, NUM_OF_MSG_RX_BUF);
switch (sts) {
case CEC_STS_SUCCESS:
if (num_rx_msgs == 0xf)
num_rx_msgs = CEC_MAX_RX_MSGS;
if (num_rx_msgs > CEC_MAX_RX_MSGS) {
dev_err(cec->dev, "Error rx msg num %d\n",
num_rx_msgs);
mhdp_cec_clear_rx_buffer(cec);
break;
}
/* Rx FIFO Depth 2 RX MSG */
for (i = 0; i < num_rx_msgs; i++) {
mhdp_cec_read_message(cec);
cec->msg.rx_status = CEC_RX_STATUS_OK;
cec_received_msg(cec->adap, &cec->msg);
}
break;
default:
break;
}
if (!kthread_should_stop())
schedule_timeout_idle(20);
}
return 0;
}
static int mhdp_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct cdns_mhdp_cec *cec = cec_get_drvdata(adap);
if (enable) {
mhdp_cec_write(cec, DB_L_TIMER, 0x10);
mhdp_cec_set_divider(cec);
} else
mhdp_cec_set_divider(cec);
return 0;
}
static int mhdp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct cdns_mhdp_cec *cec = cec_get_drvdata(adap);
return mhdp_cec_set_logical_addr(cec, addr);
}
static int mhdp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct cdns_mhdp_cec *cec = cec_get_drvdata(adap);
mhdp_cec_write_message(cec, msg);
return 0;
}
static const struct cec_adap_ops cdns_mhdp_cec_adap_ops = {
.adap_enable = mhdp_cec_adap_enable,
.adap_log_addr = mhdp_cec_adap_log_addr,
.adap_transmit = mhdp_cec_adap_transmit,
};
int cdns_mhdp_register_cec_driver(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
struct cdns_mhdp_cec *cec = &mhdp->hdmi.cec;
int ret;
cec->adap = cec_allocate_adapter(&cdns_mhdp_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS |
CEC_CAP_TRANSMIT | CEC_CAP_PASSTHROUGH
| CEC_CAP_RC, CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
ret = cec_register_adapter(cec->adap, dev);
if (ret) {
cec_delete_adapter(cec->adap);
return ret;
}
cec->dev = dev;
cec->cec_worker = kthread_create(mhdp_cec_poll_worker, cec, "cdns-mhdp-cec");
if (IS_ERR(cec->cec_worker))
dev_err(cec->dev, "failed create hdp cec thread\n");
wake_up_process(cec->cec_worker);
dev_dbg(dev, "CEC successfuly probed\n");
return 0;
}
int cdns_mhdp_unregister_cec_driver(struct device *dev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(dev);
struct cdns_mhdp_cec *cec = &mhdp->hdmi.cec;
if (cec->cec_worker) {
kthread_stop(cec->cec_worker);
cec->cec_worker = NULL;
}
cec_unregister_adapter(cec->adap);
return 0;
}
MODULE_AUTHOR("Sandor.Yu@NXP.com");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("NXP CDNS MHDP HDMI CEC driver");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,331 @@
/*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <drm/drmP.h>
#include <linux/io.h>
#include <drm/bridge/cdns-mhdp-common.h>
#include <linux/regmap.h>
void cdns_mhdp_infoframe_set(struct cdns_mhdp_device *mhdp,
u8 entry_id, u8 packet_len, u8 *packet, u8 packet_type)
{
u32 *packet32, len32;
u32 val, i;
/* invalidate entry */
val = F_ACTIVE_IDLE_TYPE(1) | F_PKT_ALLOC_ADDRESS(entry_id);
cdns_mhdp_bus_write(val, mhdp, SOURCE_PIF_PKT_ALLOC_REG);
cdns_mhdp_bus_write(F_PKT_ALLOC_WR_EN(1), mhdp, SOURCE_PIF_PKT_ALLOC_WR_EN);
/* flush fifo 1 */
cdns_mhdp_bus_write(F_FIFO1_FLUSH(1), mhdp, SOURCE_PIF_FIFO1_FLUSH);
/* write packet into memory */
packet32 = (u32 *)packet;
len32 = packet_len / 4;
for (i = 0; i < len32; i++)
cdns_mhdp_bus_write(F_DATA_WR(packet32[i]), mhdp, SOURCE_PIF_DATA_WR);
/* write entry id */
cdns_mhdp_bus_write(F_WR_ADDR(entry_id), mhdp, SOURCE_PIF_WR_ADDR);
/* write request */
cdns_mhdp_bus_write(F_HOST_WR(1), mhdp, SOURCE_PIF_WR_REQ);
/* update entry */
val = F_ACTIVE_IDLE_TYPE(1) | F_TYPE_VALID(1) |
F_PACKET_TYPE(packet_type) | F_PKT_ALLOC_ADDRESS(entry_id);
cdns_mhdp_bus_write(val, mhdp, SOURCE_PIF_PKT_ALLOC_REG);
cdns_mhdp_bus_write(F_PKT_ALLOC_WR_EN(1), mhdp, SOURCE_PIF_PKT_ALLOC_WR_EN);
}
int cdns_hdmi_get_edid_block(void *data, u8 *edid,
u32 block, size_t length)
{
struct cdns_mhdp_device *mhdp = data;
u8 msg[2], reg[5], i;
int ret;
for (i = 0; i < 4; i++) {
msg[0] = block / 2;
msg[1] = block % 2;
ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_HDMI_TX, HDMI_TX_EDID,
sizeof(msg), msg);
if (ret)
continue;
ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_HDMI_TX,
HDMI_TX_EDID, sizeof(reg) + length);
if (ret)
continue;
ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
if (ret)
continue;
ret = cdns_mhdp_mailbox_read_receive(mhdp, edid, length);
if (ret)
continue;
if ((reg[3] << 8 | reg[4]) == length)
break;
}
if (ret)
DRM_ERROR("get block[%d] edid failed: %d\n", block, ret);
return ret;
}
int cdns_hdmi_scdc_read(struct cdns_mhdp_device *mhdp, u8 addr, u8 *data)
{
u8 msg[4], reg[6];
int ret;
msg[0] = 0x54;
msg[1] = addr;
msg[2] = 0;
msg[3] = 1;
ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_HDMI_TX, HDMI_TX_READ,
sizeof(msg), msg);
if (ret)
goto err_scdc_read;
ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_HDMI_TX,
HDMI_TX_READ, sizeof(reg));
if (ret)
goto err_scdc_read;
ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
if (ret)
goto err_scdc_read;
*data = reg[5];
err_scdc_read:
if (ret)
DRM_ERROR("scdc read failed: %d\n", ret);
return ret;
}
int cdns_hdmi_scdc_write(struct cdns_mhdp_device *mhdp, u8 addr, u8 value)
{
u8 msg[5], reg[5];
int ret;
msg[0] = 0x54;
msg[1] = addr;
msg[2] = 0;
msg[3] = 1;
msg[4] = value;
ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_HDMI_TX, HDMI_TX_WRITE,
sizeof(msg), msg);
if (ret)
goto err_scdc_write;
ret = cdns_mhdp_mailbox_validate_receive(mhdp, MB_MODULE_ID_HDMI_TX,
HDMI_TX_WRITE, sizeof(reg));
if (ret)
goto err_scdc_write;
ret = cdns_mhdp_mailbox_read_receive(mhdp, reg, sizeof(reg));
if (ret)
goto err_scdc_write;
if (reg[0] != 0)
ret = -EINVAL;
err_scdc_write:
if (ret)
DRM_ERROR("scdc write failed: %d\n", ret);
return ret;
}
int cdns_hdmi_ctrl_init(struct cdns_mhdp_device *mhdp,
int protocol,
u32 char_rate)
{
u32 reg0;
u32 reg1;
u32 val;
int ret;
/* Set PHY to HDMI data */
ret = cdns_mhdp_reg_write(mhdp, PHY_DATA_SEL, F_SOURCE_PHY_MHDP_SEL(1));
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HDTX_HPD,
F_HPD_VALID_WIDTH(4) | F_HPD_GLITCH_WIDTH(0));
if (ret < 0)
return ret;
/* open CARS */
ret = cdns_mhdp_reg_write(mhdp, SOURCE_PHY_CAR, 0xF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_HDTX_CAR, 0xFF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_PKT_CAR, 0xF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_AIF_CAR, 0xF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_CIPHER_CAR, 0xF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_CRYPTO_CAR, 0xF);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SOURCE_CEC_CAR, 3);
if (ret < 0)
return ret;
reg0 = reg1 = 0x7c1f;
if (protocol == MODE_HDMI_2_0 && char_rate >= 340000) {
reg0 = 0;
reg1 = 0xFFFFF;
}
ret = cdns_mhdp_reg_write(mhdp, HDTX_CLOCK_REG_0, reg0);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HDTX_CLOCK_REG_1, reg1);
if (ret < 0)
return ret;
/* set hdmi mode and preemble mode data enable */
val = F_HDMI_MODE(protocol) | F_HDMI2_PREAMBLE_EN(1) | F_DATA_EN(1) |
F_HDMI2_CTRL_IL_MODE(1) | F_BCH_EN(1) | F_PIC_3D(0XF);
ret = cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
return ret;
}
int cdns_hdmi_mode_config(struct cdns_mhdp_device *mhdp,
struct drm_display_mode *mode,
struct video_info *video_info)
{
int ret;
u32 val;
u32 vsync_lines = mode->vsync_end - mode->vsync_start;
u32 eof_lines = mode->vsync_start - mode->vdisplay;
u32 sof_lines = mode->vtotal - mode->vsync_end;
u32 hblank = mode->htotal - mode->hdisplay;
u32 hactive = mode->hdisplay;
u32 vblank = mode->vtotal - mode->vdisplay;
u32 vactive = mode->vdisplay;
u32 hfront = mode->hsync_start - mode->hdisplay;
u32 hback = mode->htotal - mode->hsync_end;
u32 vfront = eof_lines;
u32 hsync = hblank - hfront - hback;
u32 vsync = vsync_lines;
u32 vback = sof_lines;
u32 v_h_polarity = ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1) +
((mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : 2);
ret = cdns_mhdp_reg_write(mhdp, SCHEDULER_H_SIZE, (hactive << 16) + hblank);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, SCHEDULER_V_SIZE, (vactive << 16) + vblank);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HDTX_SIGNAL_FRONT_WIDTH, (vfront << 16) + hfront);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HDTX_SIGNAL_SYNC_WIDTH, (vsync << 16) + hsync);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HDTX_SIGNAL_BACK_WIDTH, (vback << 16) + hback);
if (ret < 0)
return ret;
ret = cdns_mhdp_reg_write(mhdp, HSYNC2VSYNC_POL_CTRL, v_h_polarity);
if (ret < 0)
return ret;
/* Reset Data Enable */
val = cdns_mhdp_reg_read(mhdp, HDTX_CONTROLLER);
val &= ~F_DATA_EN(1);
ret = cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
if (ret < 0)
return ret;
/* Set bpc */
val &= ~F_VIF_DATA_WIDTH(3);
switch (video_info->color_depth) {
case 10:
val |= F_VIF_DATA_WIDTH(1);
break;
case 12:
val |= F_VIF_DATA_WIDTH(2);
break;
case 16:
val |= F_VIF_DATA_WIDTH(3);
break;
case 8:
default:
val |= F_VIF_DATA_WIDTH(0);
break;
}
/* select color encoding */
val &= ~F_HDMI_ENCODING(3);
switch (video_info->color_fmt) {
case YCBCR_4_4_4:
val |= F_HDMI_ENCODING(2);
break;
case YCBCR_4_2_2:
val |= F_HDMI_ENCODING(1);
break;
case YCBCR_4_2_0:
val |= F_HDMI_ENCODING(3);
break;
case PXL_RGB:
default:
val |= F_HDMI_ENCODING(0);
break;
}
ret = cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
if (ret < 0)
return ret;
/* set data enable */
val |= F_DATA_EN(1);
ret = cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
return ret;
}
int cdns_hdmi_disable_gcp(struct cdns_mhdp_device *mhdp)
{
u32 val;
val = cdns_mhdp_reg_read(mhdp, HDTX_CONTROLLER);
val &= ~F_GCP_EN(1);
return cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
}
int cdns_hdmi_enable_gcp(struct cdns_mhdp_device *mhdp)
{
u32 val;
val = cdns_mhdp_reg_read(mhdp, HDTX_CONTROLLER);
val |= F_GCP_EN(1);
return cdns_mhdp_reg_write(mhdp, HDTX_CONTROLLER, val);
}

View File

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Cadence MHDP DP MST bridge driver.
*
* Copyright: 2018 Cadence Design Systems, Inc.
*
* Author: Quentin Schulz <quentin.schulz@free-electrons.com>
*/
#ifndef CDNS_MHDP_H
#define CDNS_MHDP_H
#include <drm/drm_dp_mst_helper.h>
#define CDNS_APB_CFG 0x00000
#define CDNS_APB_CTRL (CDNS_APB_CFG + 0x00)
#define CDNS_MAILBOX_FULL (CDNS_APB_CFG + 0x08)
#define CDNS_MAILBOX_EMPTY (CDNS_APB_CFG + 0x0c)
#define CDNS_MAILBOX_TX_DATA (CDNS_APB_CFG + 0x10)
#define CDNS_MAILBOX_RX_DATA (CDNS_APB_CFG + 0x14)
#define CDNS_KEEP_ALIVE (CDNS_APB_CFG + 0x18)
#define CDNS_KEEP_ALIVE_MASK GENMASK(7, 0)
#define CDNS_MB_INT_MASK (CDNS_APB_CFG + 0x34)
#define CDNS_SW_CLK_L (CDNS_APB_CFG + 0x3c)
#define CDNS_SW_CLK_H (CDNS_APB_CFG + 0x40)
#define CDNS_SW_EVENT0 (CDNS_APB_CFG + 0x44)
#define CDNS_DPTX_HPD BIT(0)
#define CDNS_SW_EVENT1 (CDNS_APB_CFG + 0x48)
#define CDNS_SW_EVENT2 (CDNS_APB_CFG + 0x4c)
#define CDNS_SW_EVENT3 (CDNS_APB_CFG + 0x50)
#define CDNS_APB_INT_MASK (CDNS_APB_CFG + 0x6C)
#define CDNS_APB_INT_MASK_MAILBOX_INT BIT(0)
#define CDNS_APB_INT_MASK_SW_EVENT_INT BIT(1)
#define CDNS_DPTX_CAR (CDNS_APB_CFG + 0x904)
#define CDNS_VIF_CLK_EN BIT(0)
#define CDNS_VIF_CLK_RSTN BIT(1)
#define CDNS_SOURCE_VIDEO_IF(s) (0x00b00 + (s * 0x20))
#define CDNS_BND_HSYNC2VSYNC(s) (CDNS_SOURCE_VIDEO_IF(s) + \
0x00)
#define CDNS_IP_DTCT_WIN GENMASK(11, 0)
#define CDNS_IP_DET_INTERLACE_FORMAT BIT(12)
#define CDNS_IP_BYPASS_V_INTERFACE BIT(13)
#define CDNS_HSYNC2VSYNC_POL_CTRL(s) (CDNS_SOURCE_VIDEO_IF(s) + \
0x10)
#define CDNS_H2V_HSYNC_POL_ACTIVE_LOW BIT(1)
#define CDNS_H2V_VSYNC_POL_ACTIVE_LOW BIT(2)
#define CDNS_DPTX_PHY_CONFIG 0x02000
#define CDNS_PHY_TRAINING_EN BIT(0)
#define CDNS_PHY_TRAINING_TYPE(x) (((x) & GENMASK(3, 0)) << 1)
#define CDNS_PHY_SCRAMBLER_BYPASS BIT(5)
#define CDNS_PHY_ENCODER_BYPASS BIT(6)
#define CDNS_PHY_SKEW_BYPASS BIT(7)
#define CDNS_PHY_TRAINING_AUTO BIT(8)
#define CDNS_PHY_LANE0_SKEW(x) (((x) & GENMASK(2, 0)) << 9)
#define CDNS_PHY_LANE1_SKEW(x) (((x) & GENMASK(2, 0)) << 12)
#define CDNS_PHY_LANE2_SKEW(x) (((x) & GENMASK(2, 0)) << 15)
#define CDNS_PHY_LANE3_SKEW(x) (((x) & GENMASK(2, 0)) << 18)
#define CDNS_PHY_COMMON_CONFIG (CDNS_PHY_LANE1_SKEW(1) | \
CDNS_PHY_LANE2_SKEW(2) | \
CDNS_PHY_LANE3_SKEW(3))
#define CDNS_PHY_10BIT_EN BIT(21)
#define CDNS_DPTX_FRAMER 0x02200
#define CDNS_DP_FRAMER_GLOBAL_CONFIG (CDNS_DPTX_FRAMER + 0x00)
#define CDNS_DP_NUM_LANES(x) (x - 1)
#define CDNS_DP_MST_EN BIT(2)
#define CDNS_DP_FRAMER_EN BIT(3)
#define CDNS_DP_RATE_GOVERNOR_EN BIT(4)
#define CDNS_DP_NO_VIDEO_MODE BIT(5)
#define CDNS_DP_DISABLE_PHY_RST BIT(6)
#define CDNS_DP_WR_FAILING_EDGE_VSYNC BIT(7)
#define CDNS_DP_SW_RESET (CDNS_DPTX_FRAMER + 0x04)
#define CDNS_DP_FRAMER_TU (CDNS_DPTX_FRAMER + 0x08)
#define CDNS_DP_FRAMER_TU_SIZE(x) (((x) & GENMASK(6, 0)) << 8)
#define CDNS_DP_FRAMER_TU_VS(x) ((x) & GENMASK(5, 0))
#define CDNS_DP_FRAMER_TU_CNT_RST_EN BIT(15)
#define CDNS_DPTX_STREAM(s) (0x03000 + s * 0x80)
#define CDNS_DP_MSA_HORIZONTAL_0(s) (CDNS_DPTX_STREAM(s) + 0x00)
#define CDNS_DP_MSAH0_H_TOTAL(x) (x)
#define CDNS_DP_MSAH0_HSYNC_START(x) ((x) << 16)
#define CDNS_DP_MSA_HORIZONTAL_1(s) (CDNS_DPTX_STREAM(s) + 0x04)
#define CDNS_DP_MSAH1_HSYNC_WIDTH(x) (x)
#define CDNS_DP_MSAH1_HSYNC_POL_LOW BIT(15)
#define CDNS_DP_MSAH1_HDISP_WIDTH(x) ((x) << 16)
#define CDNS_DP_MSA_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x08)
#define CDNS_DP_MSAV0_V_TOTAL(x) (x)
#define CDNS_DP_MSAV0_VSYNC_START(x) ((x) << 16)
#define CDNS_DP_MSA_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x0c)
#define CDNS_DP_MSAV1_VSYNC_WIDTH(x) (x)
#define CDNS_DP_MSAV1_VSYNC_POL_LOW BIT(15)
#define CDNS_DP_MSAV1_VDISP_WIDTH(x) ((x) << 16)
#define CDNS_DP_MSA_MISC(s) (CDNS_DPTX_STREAM(s) + 0x10)
#define CDNS_DP_STREAM_CONFIGs(s) (CDNS_DPTX_STREAM(s) + 0x14)
#define CDNS_DP_STREAM_CONFIG_2(s) (CDNS_DPTX_STREAM(s) + 0x2c)
#define CDNS_DP_SC2_TU_VS_DIFF(x) ((x) << 8)
#define CDNS_DP_HORIZONTAL(s) (CDNS_DPTX_STREAM(s) + 0x30)
#define CDNS_DP_H_HSYNC_WIDTH(x) (x)
#define CDNS_DP_H_H_TOTAL(x) ((x) << 16)
#define CDNS_DP_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x34)
#define CDNS_DP_V0_VHEIGHT(x) (x)
#define CDNS_DP_V0_VSTART(x) ((x) << 16)
#define CDNS_DP_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x38)
#define CDNS_DP_V1_VTOTAL(x) (x)
#define CDNS_DP_V1_VTOTAL_EVEN BIT(16)
#define CDNS_DP_FRAMER_PXL_REPR(s) (CDNS_DPTX_STREAM(s) + 0x4c)
#define CDNS_DP_FRAMER_6_BPC BIT(0)
#define CDNS_DP_FRAMER_8_BPC BIT(1)
#define CDNS_DP_FRAMER_10_BPC BIT(2)
#define CDNS_DP_FRAMER_12_BPC BIT(3)
#define CDNS_DP_FRAMER_16_BPC BIT(4)
#define CDNS_DP_FRAMER_PXL_FORMAT 0x8
#define CDNS_DP_FRAMER_RGB BIT(0)
#define CDNS_DP_FRAMER_YCBCR444 BIT(1)
#define CDNS_DP_FRAMER_YCBCR422 BIT(2)
#define CDNS_DP_FRAMER_YCBCR420 BIT(3)
#define CDNS_DP_FRAMER_Y_ONLY BIT(4)
#define CDNS_DP_FRAMER_SP(s) (CDNS_DPTX_STREAM(s) + 0x10)
#define CDNS_DP_FRAMER_VSYNC_POL_LOW BIT(0)
#define CDNS_DP_FRAMER_HSYNC_POL_LOW BIT(1)
#define CDNS_DP_FRAMER_INTERLACE BIT(2)
#define CDNS_DP_LINE_THRESH(s) (CDNS_DPTX_STREAM(s) + 0x64)
#define CDNS_DP_ACTIVE_LINE_THRESH(x) (x)
#define CDNS_DP_VB_ID(s) (CDNS_DPTX_STREAM(s) + 0x68)
#define CDNS_DP_VB_ID_INTERLACED BIT(2)
#define CDNS_DP_VB_ID_COMPRESSED BIT(6)
#define CDNS_DP_FRONT_BACK_PORCH(s) (CDNS_DPTX_STREAM(s) + 0x78)
#define CDNS_DP_BACK_PORCH(x) (x)
#define CDNS_DP_FRONT_PORCH(x) ((x) << 16)
#define CDNS_DP_BYTE_COUNT(s) (CDNS_DPTX_STREAM(s) + 0x7c)
#define CDNS_DP_BYTE_COUNT_BYTES_IN_CHUNK_SHIFT 16
#define CDNS_DP_MST_STREAM_CONFIG(s) (CDNS_DPTX_STREAM(s) + 0x14)
#define CDNS_DP_MST_STRM_CFG_STREAM_EN BIT(0)
#define CDNS_DP_MST_STRM_CFG_NO_VIDEO BIT(1)
#define CDNS_DP_MST_SLOT_ALLOCATE(s) (CDNS_DPTX_STREAM(s) + 0x44)
#define CDNS_DP_S_ALLOC_START_SLOT(x) (x)
#define CDNS_DP_S_ALLOC_END_SLOT(x) ((x) << 8)
#define CDNS_DP_RATE_GOVERNING(s) (CDNS_DPTX_STREAM(s) + 0x48)
#define CDNS_DP_RG_TARG_AV_SLOTS_Y(x) (x)
#define CDNS_DP_RG_TARG_AV_SLOTS_X(x) (x << 4)
#define CDNS_DP_RG_ENABLE BIT(10)
#define CDNS_DP_MTPH_CONTROL 0x2264
#define CDNS_DP_MTPH_ECF_EN BIT(0)
#define CDNS_DP_MTPH_ACT_EN BIT(1)
#define CDNS_DP_MTPH_LVP_EN BIT(2)
#define CDNS_DP_MTPH_STATUS 0x226C
#define CDNS_DP_MTPH_ACT_STATUS BIT(0)
#define CDNS_DPTX_GLOBAL 0x02300
#define CDNS_DP_LANE_EN (CDNS_DPTX_GLOBAL + 0x00)
#define CDNS_DP_LANE_EN_LANES(x) GENMASK(x - 1, 0)
#define CDNS_DP_ENHNCD (CDNS_DPTX_GLOBAL + 0x04)
#define to_mhdp_connector(x) container_of(x, struct cdns_mhdp_connector, base)
#define to_mhdp_bridge(x) container_of(x, struct cdns_mhdp_bridge, base)
#define mgr_to_mhdp(x) container_of(x, struct cdns_mhdp_device, mst_mgr)
#define CDNS_MHDP_MAX_STREAMS 4
enum pixel_format {
PIXEL_FORMAT_RGB = 1,
PIXEL_FORMAT_YCBCR_444 = 2,
PIXEL_FORMAT_YCBCR_422 = 4,
PIXEL_FORMAT_YCBCR_420 = 8,
PIXEL_FORMAT_Y_ONLY = 16,
};
int cdns_mhdp_mst_init(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_mst_deinit(struct cdns_mhdp_device *mhdp);
bool cdns_mhdp_mst_probe(struct cdns_mhdp_device *mhdp);
enum pixel_format cdns_mhdp_get_pxlfmt(u32 color_formats);
u32 cdns_mhdp_get_bpp(u32 bpc, u32 color_formats);
void cdns_mhdp_configure_video(struct drm_bridge *bridge);
void cdns_mhdp_mst_enable(struct drm_bridge *bridge);
void cdns_mhdp_mst_disable(struct drm_bridge *bridge);
void cdns_mhdp_enable(struct drm_bridge *bridge);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
config DRM_NWL_MIPI_DSI
tristate "Support for Northwest Logic MIPI DSI Host controller"
depends on DRM
depends on COMMON_CLK
depends on OF && HAS_IOMEM
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY_MIPI_DPHY
select MFD_SYSCON
select MULTIPLEXER
select REGMAP_MMIO
help
This enables the Northwest Logic MIPI DSI Host controller as
for example found on NXP's i.MX8 Processors.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
nwl-mipi-dsi-y := nwl-drv.o nwl-dsi.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-mipi-dsi.o
header-test-y += nwl-drv.h nwl-dsi.h

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* NWL MIPI DSI host driver
*
* Copyright (C) 2017 NXP
* Copyright (C) 2019 Purism SPC
*/
#ifndef __NWL_DRV_H__
#define __NWL_DRV_H__
#include <linux/mux/consumer.h>
#include <linux/phy/phy.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
struct nwl_dsi_platform_data;
/* i.MX8 NWL quirks */
/* i.MX8MQ errata E11418 */
#define E11418_HS_MODE_QUIRK BIT(0)
/* Skip DSI bits in SRC on disable to avoid blank display on enable */
#define SRC_RESET_QUIRK BIT(1)
/* * DPI color coding */
#define NWL_DSI_DPI_16_BIT_565_PACKED 0
#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
#define NWL_DSI_DPI_18_BIT_PACKED 3
#define NWL_DSI_DPI_18_BIT_ALIGNED 4
#define NWL_DSI_DPI_24_BIT 5
#define NWL_DSI_MAX_PLATFORM_CLOCKS 2
struct nwl_dsi_plat_clk_config {
const char *id;
struct clk *clk;
bool present;
};
struct mode_config {
int clock;
int crtc_clock;
unsigned int lanes;
unsigned long bitclock;
unsigned long phy_rates[3];
unsigned long pll_rates[3];
int phy_rate_idx;
struct list_head list;
};
struct nwl_dsi {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
struct drm_bridge *panel_bridge;
struct device *dev;
struct phy *phy;
union phy_configure_opts phy_cfg;
unsigned int quirks;
unsigned int instance;
struct regmap *regmap;
struct regmap *csr;
int irq;
struct reset_control *rst_byte;
struct reset_control *rst_esc;
struct reset_control *rst_dpi;
struct reset_control *rst_pclk;
struct mux_control *mux;
/* DSI clocks */
struct clk *phy_ref_clk;
struct clk *rx_esc_clk;
struct clk *tx_esc_clk;
struct clk *pll_clk;
struct clk *lcdif_clk;
/* Platform dependent clocks */
struct nwl_dsi_plat_clk_config clk_config[NWL_DSI_MAX_PLATFORM_CLOCKS];
struct list_head valid_modes;
/* dsi lanes */
u32 lanes;
u32 clk_drop_lvl;
enum mipi_dsi_pixel_format format;
struct drm_display_mode mode;
unsigned long dsi_mode_flags;
struct nwl_dsi_transfer *xfer;
const struct nwl_dsi_platform_data *pdata;
bool use_dcss;
};
#endif /* __NWL_DRV_H__ */

View File

@ -0,0 +1,684 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* NWL MIPI DSI host driver
*
* Copyright (C) 2017 NXP
* Copyright (C) 2019 Purism SPC
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/regmap.h>
#include <linux/time64.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include "nwl-drv.h"
#include "nwl-dsi.h"
#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
/*
* PKT_CONTROL format:
* [15: 0] - word count
* [17:16] - virtual channel
* [23:18] - data type
* [24] - LP or HS select (0 - LP, 1 - HS)
* [25] - perform BTA after packet is sent
* [26] - perform BTA only, no packet tx
*/
#define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x))
#define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x))
#define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x))
#define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x))
#define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x))
#define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x))
/*
* RX_PKT_HEADER format:
* [15: 0] - word count
* [21:16] - data type
* [23:22] - virtual channel
*/
#define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x))
#define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x))
/* DSI Video mode */
#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0
#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0)
#define NWL_DSI_VM_BURST_MODE BIT(1)
/* * DPI color coding */
#define NWL_DSI_DPI_16_BIT_565_PACKED 0
#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
#define NWL_DSI_DPI_18_BIT_PACKED 3
#define NWL_DSI_DPI_18_BIT_ALIGNED 4
#define NWL_DSI_DPI_24_BIT 5
/* * DPI Pixel format */
#define NWL_DSI_PIXEL_FORMAT_16 0
#define NWL_DSI_PIXEL_FORMAT_18 BIT(0)
#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
#define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1))
enum transfer_direction {
DSI_PACKET_SEND,
DSI_PACKET_RECEIVE,
};
struct nwl_dsi_transfer {
const struct mipi_dsi_msg *msg;
struct mipi_dsi_packet packet;
struct completion completed;
int status; /* status of transmission */
enum transfer_direction direction;
bool need_bta;
u8 cmd;
u16 rx_word_count;
size_t tx_len; /* in bytes */
size_t rx_len; /* in bytes */
};
static int nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
{
int ret;
ret = regmap_write(dsi->regmap, reg, val);
if (ret < 0)
DRM_DEV_ERROR(dsi->dev,
"Failed to write NWL DSI reg 0x%x: %d\n", reg,
ret);
return ret;
}
static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
{
unsigned int val;
int ret;
ret = regmap_read(dsi->regmap, reg, &val);
if (ret < 0)
DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
reg, ret);
return val;
}
static u32 nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
{
switch (format) {
case MIPI_DSI_FMT_RGB565:
return NWL_DSI_PIXEL_FORMAT_16;
case MIPI_DSI_FMT_RGB666:
return NWL_DSI_PIXEL_FORMAT_18L;
case MIPI_DSI_FMT_RGB666_PACKED:
return NWL_DSI_PIXEL_FORMAT_18;
case MIPI_DSI_FMT_RGB888:
return NWL_DSI_PIXEL_FORMAT_24;
default:
return -EINVAL;
}
}
#define PSEC_PER_SEC 1000000000000LL
/*
* ps2bc - Picoseconds to byte clock cycles
*/
static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
{
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
return DIV_ROUND_UP(ps * dsi->mode.clock * 1000 * bpp,
dsi->lanes * 8 * PSEC_PER_SEC);
}
/*
* ui2bc - UI time periods to byte clock cycles
*/
static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
{
int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
return DIV_ROUND_UP(ui * dsi->lanes, dsi->mode.clock * 1000 * bpp);
}
/*
* us2bc - micro seconds to lp clock cycles
*/
static u32 us2lp(u32 lp_clk_rate, unsigned long us)
{
return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
}
static int nwl_dsi_config_host(struct nwl_dsi *dsi)
{
u32 cycles;
struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
if (dsi->lanes < 1 || dsi->lanes > 4)
return -EINVAL;
DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
} else {
nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
}
/* values in byte clock cycles */
cycles = ui2bc(dsi, cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
cycles += ui2bc(dsi, cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
cycles = ps2bc(dsi, cfg->hs_exit);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
/* In LP clock cycles */
cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
return 0;
}
static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
{
u32 color_format, mode;
bool burst_mode;
int hfront_porch, hback_porch, vfront_porch, vback_porch;
int hsync_len, vsync_len;
hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
if (color_format < 0) {
DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
dsi->format);
return color_format;
}
DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
/*
* Adjusting input polarity based on the video mode results in
* a black screen so always pick active low:
*/
nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
!(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
if (burst_mode) {
nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
} else {
mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
dsi->mode.hdisplay);
}
nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
return 0;
}
static void nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
{
u32 irq_enable;
nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
NWL_DSI_RX_PKT_HDR_RCVD_MASK |
NWL_DSI_TX_FIFO_OVFLW_MASK |
NWL_DSI_HS_TX_TIMEOUT_MASK);
nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
}
static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
struct mipi_dsi_device *device)
{
struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
struct device *dev = dsi->dev;
DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
device->format, device->mode_flags);
if (device->lanes < 1 || device->lanes > 4)
return -EINVAL;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->dsi_mode_flags = device->mode_flags;
return 0;
}
static int nwl_dsi_host_detach(struct mipi_dsi_host *dsi_host,
struct mipi_dsi_device *device)
{
struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
dsi->lanes = 0;
dsi->format = 0;
dsi->dsi_mode_flags = 0;
return 0;
}
static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
{
struct device *dev = dsi->dev;
struct nwl_dsi_transfer *xfer = dsi->xfer;
u8 *payload = xfer->msg->rx_buf;
u32 val;
u16 word_count;
u8 channel;
u8 data_type;
xfer->status = 0;
if (xfer->rx_word_count == 0) {
if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
return false;
/* Get the RX header and parse it */
val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
word_count = NWL_DSI_WC(val);
channel = NWL_DSI_RX_VC(val);
data_type = NWL_DSI_RX_DT(val);
if (channel != xfer->msg->channel) {
DRM_DEV_ERROR(dev,
"[%02X] Channel mismatch (%u != %u)\n",
xfer->cmd, channel, xfer->msg->channel);
return true;
}
switch (data_type) {
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
/* Fall through */
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
if (xfer->msg->rx_len > 1) {
/* read second byte */
payload[1] = word_count >> 8;
++xfer->rx_len;
}
/* Fall through */
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
/* Fall through */
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
if (xfer->msg->rx_len > 0) {
/* read first byte */
payload[0] = word_count & 0xff;
++xfer->rx_len;
}
xfer->status = xfer->rx_len;
return true;
case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
word_count &= 0xff;
DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
xfer->cmd, word_count);
xfer->status = -EPROTO;
return true;
}
if (word_count > xfer->msg->rx_len) {
DRM_DEV_ERROR(
dev,
"[%02X] Receive buffer too small: %lu (< %u)\n",
xfer->cmd, xfer->msg->rx_len, word_count);
return true;
}
xfer->rx_word_count = word_count;
} else {
/* Set word_count from previous header read */
word_count = xfer->rx_word_count;
}
/* If RX payload is not yet received, wait for it */
if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
return false;
/* Read the RX payload */
while (word_count >= 4) {
val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
payload[0] = (val >> 0) & 0xff;
payload[1] = (val >> 8) & 0xff;
payload[2] = (val >> 16) & 0xff;
payload[3] = (val >> 24) & 0xff;
payload += 4;
xfer->rx_len += 4;
word_count -= 4;
}
if (word_count > 0) {
val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
switch (word_count) {
case 3:
payload[2] = (val >> 16) & 0xff;
++xfer->rx_len;
/* Fall through */
case 2:
payload[1] = (val >> 8) & 0xff;
++xfer->rx_len;
/* Fall through */
case 1:
payload[0] = (val >> 0) & 0xff;
++xfer->rx_len;
break;
}
}
xfer->status = xfer->rx_len;
return true;
}
static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
{
struct nwl_dsi_transfer *xfer = dsi->xfer;
bool end_packet = false;
if (!xfer)
return;
if (status & NWL_DSI_TX_FIFO_OVFLW) {
DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
return;
}
if (status & NWL_DSI_HS_TX_TIMEOUT) {
DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
return;
}
if (xfer->direction == DSI_PACKET_SEND &&
status & NWL_DSI_TX_PKT_DONE) {
xfer->status = xfer->tx_len;
end_packet = true;
} else if (status & NWL_DSI_DPHY_DIRECTION &&
((status & (NWL_DSI_RX_PKT_HDR_RCVD |
NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
end_packet = nwl_dsi_read_packet(dsi, status);
}
if (end_packet)
complete(&xfer->completed);
}
static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
{
struct nwl_dsi_transfer *xfer = dsi->xfer;
struct mipi_dsi_packet *pkt = &xfer->packet;
const u8 *payload;
size_t length;
u16 word_count;
u8 hs_mode;
u32 val;
u32 hs_workaround = 0;
/* Send the payload, if any */
length = pkt->payload_length;
payload = pkt->payload;
while (length >= 4) {
val = *(u32 *)payload;
hs_workaround |= !(val & 0xFFFF00);
nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
payload += 4;
length -= 4;
}
/* Send the rest of the payload */
val = 0;
switch (length) {
case 3:
val |= payload[2] << 16;
/* Fall through */
case 2:
val |= payload[1] << 8;
hs_workaround |= !(val & 0xFFFF00);
/* Fall through */
case 1:
val |= payload[0];
nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
break;
}
xfer->tx_len = pkt->payload_length;
/*
* Send the header
* header[0] = Virtual Channel + Data Type
* header[1] = Word Count LSB (LP) or first param (SP)
* header[2] = Word Count MSB (LP) or second param (SP)
*/
word_count = pkt->header[1] | (pkt->header[2] << 8);
if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
DRM_DEV_DEBUG_DRIVER(dsi->dev,
"Using hs mode workaround for cmd 0x%x\n",
xfer->cmd);
hs_mode = 1;
} else {
hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
}
val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
NWL_DSI_BTA_TX(xfer->need_bta);
nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
/* Send packet command */
nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
}
static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
const struct mipi_dsi_msg *msg)
{
struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
struct nwl_dsi_transfer xfer;
ssize_t ret = 0;
/* Create packet to be sent */
dsi->xfer = &xfer;
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0) {
dsi->xfer = NULL;
return ret;
}
if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
msg->type & MIPI_DSI_DCS_READ) &&
msg->rx_len > 0 && msg->rx_buf != NULL)
xfer.direction = DSI_PACKET_RECEIVE;
else
xfer.direction = DSI_PACKET_SEND;
xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
xfer.msg = msg;
xfer.status = -ETIMEDOUT;
xfer.rx_word_count = 0;
xfer.rx_len = 0;
xfer.cmd = 0x00;
if (msg->tx_len > 0)
xfer.cmd = ((u8 *)(msg->tx_buf))[0];
init_completion(&xfer.completed);
ret = clk_prepare_enable(dsi->rx_esc_clk);
if (ret < 0) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
ret);
return ret;
}
DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
clk_get_rate(dsi->rx_esc_clk));
/* Initiate the DSI packet transmision */
nwl_dsi_begin_transmission(dsi);
if (!wait_for_completion_timeout(&xfer.completed,
NWL_DSI_MIPI_FIFO_TIMEOUT)) {
DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
xfer.cmd);
ret = -ETIMEDOUT;
} else {
ret = xfer.status;
}
clk_disable_unprepare(dsi->rx_esc_clk);
return ret;
}
const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
.attach = nwl_dsi_host_attach,
.detach = nwl_dsi_host_detach,
.transfer = nwl_dsi_host_transfer,
};
irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
{
u32 irq_status;
struct nwl_dsi *dsi = data;
irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
if (irq_status & NWL_DSI_TX_PKT_DONE ||
irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
nwl_dsi_finish_transmission(dsi, irq_status);
return IRQ_HANDLED;
}
int nwl_dsi_enable(struct nwl_dsi *dsi)
{
struct device *dev = dsi->dev;
union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
int ret;
if (!dsi->lanes) {
DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
return -EINVAL;
}
ret = phy_init(dsi->phy);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
return ret;
}
ret = phy_configure(dsi->phy, phy_cfg);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dsi->tx_esc_clk);
if (ret < 0) {
DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
ret);
return ret;
}
DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
clk_get_rate(dsi->tx_esc_clk));
ret = nwl_dsi_config_host(dsi);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
return ret;
}
ret = nwl_dsi_config_dpi(dsi);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
return ret;
}
ret = phy_power_on(dsi->phy);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
return ret;
}
nwl_dsi_init_interrupts(dsi);
return 0;
}
int nwl_dsi_disable(struct nwl_dsi *dsi)
{
struct device *dev = dsi->dev;
DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
phy_power_off(dsi->phy);
phy_exit(dsi->phy);
/* Disabling the clock before the phy breaks enabling dsi again */
clk_disable_unprepare(dsi->tx_esc_clk);
return 0;
}

View File

@ -0,0 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* NWL MIPI DSI host driver
*
* Copyright (C) 2017 NXP
* Copyright (C) 2019 Purism SPC
*/
#ifndef __NWL_DSI_H__
#define __NWL_DSI_H__
#include <linux/irqreturn.h>
#include <drm/drm_mipi_dsi.h>
#include "nwl-drv.h"
/* DSI HOST registers */
#define NWL_DSI_CFG_NUM_LANES 0x0
#define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4
#define NWL_DSI_CFG_T_PRE 0x8
#define NWL_DSI_CFG_T_POST 0xc
#define NWL_DSI_CFG_TX_GAP 0x10
#define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14
#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18
#define NWL_DSI_CFG_HTX_TO_COUNT 0x1c
#define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20
#define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24
#define NWL_DSI_CFG_TWAKEUP 0x28
#define NWL_DSI_CFG_STATUS_OUT 0x2c
#define NWL_DSI_RX_ERROR_STATUS 0x30
/* DSI DPI registers */
#define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200
#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204
#define NWL_DSI_INTERFACE_COLOR_CODING 0x208
#define NWL_DSI_PIXEL_FORMAT 0x20c
#define NWL_DSI_VSYNC_POLARITY 0x210
#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
#define NWL_DSI_HSYNC_POLARITY 0x214
#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
#define NWL_DSI_VIDEO_MODE 0x218
#define NWL_DSI_HFP 0x21c
#define NWL_DSI_HBP 0x220
#define NWL_DSI_HSA 0x224
#define NWL_DSI_ENABLE_MULT_PKTS 0x228
#define NWL_DSI_VBP 0x22c
#define NWL_DSI_VFP 0x230
#define NWL_DSI_BLLP_MODE 0x234
#define NWL_DSI_USE_NULL_PKT_BLLP 0x238
#define NWL_DSI_VACTIVE 0x23c
#define NWL_DSI_VC 0x240
/* DSI APB PKT control */
#define NWL_DSI_TX_PAYLOAD 0x280
#define NWL_DSI_PKT_CONTROL 0x284
#define NWL_DSI_SEND_PACKET 0x288
#define NWL_DSI_PKT_STATUS 0x28c
#define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290
#define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294
#define NWL_DSI_RX_PAYLOAD 0x298
#define NWL_DSI_RX_PKT_HEADER 0x29c
/* DSI IRQ handling */
#define NWL_DSI_IRQ_STATUS 0x2a0
#define NWL_DSI_SM_NOT_IDLE BIT(0)
#define NWL_DSI_TX_PKT_DONE BIT(1)
#define NWL_DSI_DPHY_DIRECTION BIT(2)
#define NWL_DSI_TX_FIFO_OVFLW BIT(3)
#define NWL_DSI_TX_FIFO_UDFLW BIT(4)
#define NWL_DSI_RX_FIFO_OVFLW BIT(5)
#define NWL_DSI_RX_FIFO_UDFLW BIT(6)
#define NWL_DSI_RX_PKT_HDR_RCVD BIT(7)
#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8)
#define NWL_DSI_BTA_TIMEOUT BIT(29)
#define NWL_DSI_LP_RX_TIMEOUT BIT(30)
#define NWL_DSI_HS_TX_TIMEOUT BIT(31)
#define NWL_DSI_IRQ_STATUS2 0x2a4
#define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0)
#define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1)
#define NWL_DSI_CRC_ERR BIT(2)
#define NWL_DSI_IRQ_MASK 0x2a8
#define NWL_DSI_SM_NOT_IDLE_MASK BIT(0)
#define NWL_DSI_TX_PKT_DONE_MASK BIT(1)
#define NWL_DSI_DPHY_DIRECTION_MASK BIT(2)
#define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3)
#define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4)
#define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5)
#define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6)
#define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7)
#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8)
#define NWL_DSI_BTA_TIMEOUT_MASK BIT(29)
#define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30)
#define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31)
#define NWL_DSI_IRQ_MASK2 0x2ac
#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0)
#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1)
#define NWL_DSI_CRC_ERR_MASK BIT(2)
extern const struct mipi_dsi_host_ops nwl_dsi_host_ops;
irqreturn_t nwl_dsi_irq_handler(int irq, void *data);
int nwl_dsi_enable(struct nwl_dsi *dsi);
int nwl_dsi_disable(struct nwl_dsi *dsi);
#endif /* __NWL_DSI_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -261,6 +261,9 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
{ .format = DRM_FORMAT_NV12_10LE40, .depth = 0, .num_planes = 2,
.char_per_block = { 5, 5, 0 }, .block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P210, .depth = 0,
.num_planes = 2, .char_per_block = { 2, 4, 0 },
.block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,

View File

@ -100,8 +100,10 @@ void drm_of_component_match_add(struct device *master,
EXPORT_SYMBOL_GPL(drm_of_component_match_add);
/**
* drm_of_component_probe - Generic probe function for a component based master
* drm_of_component_probe_with_match - Generic probe function with match
* entries for a component based master
* @dev: master device containing the OF node
* @match: component match pointer provided to store matches
* @compare_of: compare function used for matching components
* @m_ops: component master ops to be used
*
@ -112,12 +114,12 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_component_probe(struct device *dev,
int drm_of_component_probe_with_match(struct device *dev,
struct component_match *match,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops)
{
struct device_node *ep, *port, *remote;
struct component_match *match = NULL;
int i;
if (!dev->of_node)
@ -183,6 +185,29 @@ int drm_of_component_probe(struct device *dev,
return component_master_add_with_match(dev, m_ops, match);
}
EXPORT_SYMBOL(drm_of_component_probe_with_match);
/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
* @master_ops: component master ops to be used
*
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
* See Documentation/devicetree/bindings/graph.txt for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops)
{
struct component_match *match = NULL;
return drm_of_component_probe_with_match(dev, match, compare_of, m_ops);
}
EXPORT_SYMBOL(drm_of_component_probe);
/*

View File

@ -6,7 +6,7 @@ config DRM_IMX
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
depends on IMX_IPUV3_CORE || IMX_DPU_CORE || IMX_LCDIF_CORE
help
enable i.MX graphics support
@ -29,9 +29,39 @@ config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
select DRM_PANEL
select PHY_MIXEL_LVDS
select PHY_MIXEL_LVDS_COMBO
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
found on i.MX53, i.MX6 and i.MX8 processors.
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
@ -39,3 +69,27 @@ config DRM_IMX_HDMI
depends on DRM_IMX
help
Choose this if you want to use HDMI on i.MX6.
config DRM_IMX_SEC_DSIM
tristate "Support for Samsung MIPI DSIM displays"
depends on DRM_IMX
select MFD_SYSCON
select DRM_SEC_MIPI_DSIM
help
Choose this to enable the internal SEC MIPI DSIM controller
found on i.MX platform.
config DRM_IMX_CDNS_MHDP
tristate "NXP i.MX MX8 DRM HDMI/DP"
select DRM_CDNS_MHDP
select DRM_CDNS_DP
select DRM_CDNS_HDMI
select DRM_CDNS_AUDIO
depends on DRM_IMX
help
Choose this if you want to use HDMI on i.MX8.
source "drivers/gpu/drm/imx/dpu/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
source "drivers/gpu/drm/imx/lcdif/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
imxdrm-objs := imx-drm-core.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
@ -8,4 +8,10 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_DPU) += dpu/
obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3/
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_LCDIF) += lcdif/
obj-$(CONFIG_DRM_IMX_SEC_DSIM) += sec_mipi_dsim-imx.o
obj-$(CONFIG_DRM_IMX_CDNS_MHDP) += cdn-mhdp-imxdrv.o cdn-mhdp-dp-phy.o cdn-mhdp-hdmi-phy.o cdn-mhdp-imx8qm.o cdn-mhdp-ls1028a.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/

View File

@ -0,0 +1,529 @@
/*
* Cadence Display Port Interface (DP) PHY driver
*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <drm/drm_dp_helper.h>
#include <drm/bridge/cdns-mhdp-common.h>
#include "cdn-mhdp-phy.h"
enum dp_link_rate {
RATE_1_6 = 162000,
RATE_2_1 = 216000,
RATE_2_4 = 243000,
RATE_2_7 = 270000,
RATE_3_2 = 324000,
RATE_4_3 = 432000,
RATE_5_4 = 540000,
RATE_8_1 = 810000,
};
struct phy_pll_reg {
u16 val[7];
u32 addr;
};
static const struct phy_pll_reg phy_pll_27m_cfg[] = {
/* 1.62 2.16 2.43 2.7 3.24 4.32 5.4 register address */
{{ 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E, 0x010E }, CMN_PLL0_VCOCAL_INIT_TMR },
{{ 0x001B, 0x001B, 0x001B, 0x001B, 0x001B, 0x001B, 0x001B }, CMN_PLL0_VCOCAL_ITER_TMR },
{{ 0x30B9, 0x3087, 0x3096, 0x30B4, 0x30B9, 0x3087, 0x30B4 }, CMN_PLL0_VCOCAL_START },
{{ 0x0077, 0x009F, 0x00B3, 0x00C7, 0x0077, 0x009F, 0x00C7 }, CMN_PLL0_INTDIV },
{{ 0xF9DA, 0xF7CD, 0xF6C7, 0xF5C1, 0xF9DA, 0xF7CD, 0xF5C1 }, CMN_PLL0_FRACDIV },
{{ 0x001E, 0x0028, 0x002D, 0x0032, 0x001E, 0x0028, 0x0032 }, CMN_PLL0_HIGH_THR },
{{ 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020 }, CMN_PLL0_DSM_DIAG },
{{ 0x0000, 0x1000, 0x1000, 0x1000, 0x0000, 0x1000, 0x1000 }, CMN_PLLSM0_USER_DEF_CTRL },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_OVRD },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_FBH_OVRD },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_FBL_OVRD },
{{ 0x0006, 0x0007, 0x0007, 0x0007, 0x0006, 0x0007, 0x0007 }, CMN_DIAG_PLL0_V2I_TUNE },
{{ 0x0043, 0x0043, 0x0043, 0x0042, 0x0043, 0x0043, 0x0042 }, CMN_DIAG_PLL0_CP_TUNE },
{{ 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008 }, CMN_DIAG_PLL0_LF_PROG },
{{ 0x0100, 0x0001, 0x0001, 0x0001, 0x0100, 0x0001, 0x0001 }, CMN_DIAG_PLL0_PTATIS_TUNE1 },
{{ 0x0007, 0x0001, 0x0001, 0x0001, 0x0007, 0x0001, 0x0001 }, CMN_DIAG_PLL0_PTATIS_TUNE2 },
{{ 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020 }, CMN_DIAG_PLL0_TEST_MODE},
{{ 0x0016, 0x0016, 0x0016, 0x0016, 0x0016, 0x0016, 0x0016 }, CMN_PSM_CLK_CTRL }
};
static const struct phy_pll_reg phy_pll_24m_cfg[] = {
/* 1.62 2.16 2.43 2.7 3.24 4.32 5.4 register address */
{{ 0x00F0, 0x00F0, 0x00F0, 0x00F0, 0x00F0, 0x00F0, 0x00F0 }, CMN_PLL0_VCOCAL_INIT_TMR },
{{ 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018 }, CMN_PLL0_VCOCAL_ITER_TMR },
{{ 0x3061, 0x3092, 0x30B3, 0x30D0, 0x3061, 0x3092, 0x30D0 }, CMN_PLL0_VCOCAL_START },
{{ 0x0086, 0x00B3, 0x00CA, 0x00E0, 0x0086, 0x00B3, 0x00E0 }, CMN_PLL0_INTDIV },
{{ 0xF917, 0xF6C7, 0x75A1, 0xF479, 0xF917, 0xF6C7, 0xF479 }, CMN_PLL0_FRACDIV },
{{ 0x0022, 0x002D, 0x0033, 0x0038, 0x0022, 0x002D, 0x0038 }, CMN_PLL0_HIGH_THR },
{{ 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020 }, CMN_PLL0_DSM_DIAG },
{{ 0x0000, 0x1000, 0x1000, 0x1000, 0x0000, 0x1000, 0x1000 }, CMN_PLLSM0_USER_DEF_CTRL },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_OVRD },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_FBH_OVRD },
{{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, CMN_DIAG_PLL0_FBL_OVRD },
{{ 0x0006, 0x0007, 0x0007, 0x0007, 0x0006, 0x0007, 0x0007 }, CMN_DIAG_PLL0_V2I_TUNE },
{{ 0x0026, 0x0029, 0x0029, 0x0029, 0x0026, 0x0029, 0x0029 }, CMN_DIAG_PLL0_CP_TUNE },
{{ 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008 }, CMN_DIAG_PLL0_LF_PROG },
{{ 0x008C, 0x008C, 0x008C, 0x008C, 0x008C, 0x008C, 0x008C }, CMN_DIAG_PLL0_PTATIS_TUNE1 },
{{ 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E }, CMN_DIAG_PLL0_PTATIS_TUNE2 },
{{ 0x0022, 0x0022, 0x0022, 0x0022, 0x0022, 0x0022, 0x0022 }, CMN_DIAG_PLL0_TEST_MODE},
{{ 0x0016, 0x0016, 0x0016, 0x0016, 0x0016, 0x0016, 0x0016 }, CMN_PSM_CLK_CTRL }
};
static int link_rate_index(u32 rate)
{
switch (rate) {
case RATE_1_6:
return 0;
case RATE_2_1:
return 1;
case RATE_2_4:
return 2;
case RATE_2_7:
return 3;
case RATE_3_2:
return 4;
case RATE_4_3:
return 5;
case RATE_5_4:
return 6;
default:
return -1;
}
}
static void dp_aux_cfg(struct cdns_mhdp_device *mhdp)
{
/* Power up Aux */
cdns_phy_reg_write(mhdp, TXDA_CYA_AUXDA_CYA, 1);
cdns_phy_reg_write(mhdp, TX_DIG_CTRL_REG_2, 36);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x0100);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x0300);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_3, 0x0000);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2008);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2018);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0xA018);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030C);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_5, 0x0000);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_4, 0x1001);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0xA098);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0xA198);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030d);
ndelay(150);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030f);
}
/* PMA common configuration for 24MHz */
static void dp_phy_pma_cmn_cfg_24mhz(struct cdns_mhdp_device *mhdp)
{
int k;
u32 num_lanes = mhdp->dp.link.num_lanes;
u16 val;
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xFFF7;
val |= 0x0008;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
for (k = 0; k < num_lanes; k++) {
/* Transceiver control and diagnostic registers */
cdns_phy_reg_write(mhdp, XCVR_DIAG_LANE_FCM_EN_MGN_TMR | (k << 9), 0x0090);
/* Transmitter receiver detect registers */
cdns_phy_reg_write(mhdp, TX_RCVDET_EN_TMR | (k << 9), 0x0960);
cdns_phy_reg_write(mhdp, TX_RCVDET_ST_TMR | (k << 9), 0x0030);
}
}
/* Valid for 24 MHz only */
static void dp_phy_pma_cmn_pll0_24mhz(struct cdns_mhdp_device *mhdp)
{
u32 num_lanes = mhdp->dp.link.num_lanes;
u32 link_rate = mhdp->dp.link.rate;
u16 val;
int index, i, k;
/*
* PLL reference clock source select
* for single ended reference clock val |= 0x0030;
* for differential clock val |= 0x0000;
*/
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val = val & 0xFF8F;
val = val | 0x0030;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
/* DP PLL data rate 0/1 clock divider value */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val &= 0x00FF;
if (link_rate <= RATE_2_7)
val |= 0x2400;
else
val |= 0x1200;
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* High speed clock 0/1 div */
val = cdns_phy_reg_read(mhdp, CMN_DIAG_HSCLK_SEL);
val &= 0xFFCC;
if (link_rate <= RATE_2_7)
val |= 0x0011;
cdns_phy_reg_write(mhdp, CMN_DIAG_HSCLK_SEL, val);
for (k = 0; k < num_lanes; k = k + 1) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)));
val &= 0xCFFF;
if (link_rate <= RATE_2_7)
val |= 0x1000;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)), val);
}
/* DP PHY PLL 24MHz configuration */
index = link_rate_index(link_rate);
for (i = 0; i < ARRAY_SIZE(phy_pll_24m_cfg); i++)
cdns_phy_reg_write(mhdp, phy_pll_24m_cfg[i].addr, phy_pll_24m_cfg[i].val[index]);
/* Transceiver control and diagnostic registers */
for (k = 0; k < num_lanes; k = k + 1) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)));
val &= 0x8FFF;
if (link_rate <= RATE_2_7)
val |= 0x2000;
else
val |= 0x1000;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)), val);
}
for (k = 0; k < num_lanes; k = k + 1) {
cdns_phy_reg_write(mhdp, (XCVR_PSM_RCTRL | (k << 9)), 0xBEFC);
cdns_phy_reg_write(mhdp, (TX_PSC_A0 | (k << 9)), 0x6799);
cdns_phy_reg_write(mhdp, (TX_PSC_A1 | (k << 9)), 0x6798);
cdns_phy_reg_write(mhdp, (TX_PSC_A2 | (k << 9)), 0x0098);
cdns_phy_reg_write(mhdp, (TX_PSC_A3 | (k << 9)), 0x0098);
}
}
/* PMA common configuration for 27MHz */
static void dp_phy_pma_cmn_cfg_27mhz(struct cdns_mhdp_device *mhdp)
{
u32 num_lanes = mhdp->dp.link.num_lanes;
u16 val;
int k;
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xFFF7;
val |= 0x0008;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
/* Startup state machine registers */
cdns_phy_reg_write(mhdp, CMN_SSM_BIAS_TMR, 0x0087);
cdns_phy_reg_write(mhdp, CMN_PLLSM0_PLLEN_TMR, 0x001B);
cdns_phy_reg_write(mhdp, CMN_PLLSM0_PLLPRE_TMR, 0x0036);
cdns_phy_reg_write(mhdp, CMN_PLLSM0_PLLVREF_TMR, 0x001B);
cdns_phy_reg_write(mhdp, CMN_PLLSM0_PLLLOCK_TMR, 0x006C);
/* Current calibration registers */
cdns_phy_reg_write(mhdp, CMN_ICAL_INIT_TMR, 0x0044);
cdns_phy_reg_write(mhdp, CMN_ICAL_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_ICAL_ADJ_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_ICAL_ADJ_ITER_TMR, 0x0006);
/* Resistor calibration registers */
cdns_phy_reg_write(mhdp, CMN_TXPUCAL_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_TXPUCAL_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_TXPU_ADJ_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_TXPU_ADJ_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_TXPDCAL_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_TXPDCAL_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_TXPD_ADJ_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_TXPD_ADJ_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_RXCAL_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_RXCAL_ITER_TMR, 0x0006);
cdns_phy_reg_write(mhdp, CMN_RX_ADJ_INIT_TMR, 0x0022);
cdns_phy_reg_write(mhdp, CMN_RX_ADJ_ITER_TMR, 0x0006);
for (k = 0; k < num_lanes; k = k + 1) {
/* Power state machine registers */
cdns_phy_reg_write(mhdp, XCVR_PSM_CAL_TMR | (k << 9), 0x016D);
cdns_phy_reg_write(mhdp, XCVR_PSM_A0IN_TMR | (k << 9), 0x016D);
/* Transceiver control and diagnostic registers */
cdns_phy_reg_write(mhdp, XCVR_DIAG_LANE_FCM_EN_MGN_TMR | (k << 9), 0x00A2);
cdns_phy_reg_write(mhdp, TX_DIAG_BGREF_PREDRV_DELAY | (k << 9), 0x0097);
/* Transmitter receiver detect registers */
cdns_phy_reg_write(mhdp, TX_RCVDET_EN_TMR | (k << 9), 0x0A8C);
cdns_phy_reg_write(mhdp, TX_RCVDET_ST_TMR | (k << 9), 0x0036);
}
}
static void dp_phy_pma_cmn_pll0_27mhz(struct cdns_mhdp_device *mhdp)
{
u32 num_lanes = mhdp->dp.link.num_lanes;
u32 link_rate = mhdp->dp.link.rate;
u16 val;
int index, i, k;
/*
* PLL reference clock source select
* for single ended reference clock val |= 0x0030;
* for differential clock val |= 0x0000;
*/
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xFF8F;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
/* for differential clock on the refclk_p and refclk_m off chip pins:
* CMN_DIAG_ACYA[8]=1'b1
*/
cdns_phy_reg_write(mhdp, CMN_DIAG_ACYA, 0x0100);
/* DP PLL data rate 0/1 clock divider value */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val &= 0x00FF;
if (link_rate <= RATE_2_7)
val |= 0x2400;
else
val |= 0x1200;
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* High speed clock 0/1 div */
val = cdns_phy_reg_read(mhdp, CMN_DIAG_HSCLK_SEL);
val &= 0xFFCC;
if (link_rate <= RATE_2_7)
val |= 0x0011;
cdns_phy_reg_write(mhdp, CMN_DIAG_HSCLK_SEL, val);
for (k = 0; k < num_lanes; k++) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)));
val = val & 0xCFFF;
if (link_rate <= RATE_2_7)
val |= 0x1000;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)), val);
}
/* DP PHY PLL 27MHz configuration */
index = link_rate_index(link_rate);
for (i = 0; i < ARRAY_SIZE(phy_pll_27m_cfg); i++)
cdns_phy_reg_write(mhdp, phy_pll_27m_cfg[i].addr, phy_pll_27m_cfg[i].val[index]);
/* Transceiver control and diagnostic registers */
for (k = 0; k < num_lanes; k++) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)));
val = val & 0x8FFF;
if (link_rate <= RATE_2_7)
val |= 0x2000;
else
val |= 0x1000;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)), val);
}
for (k = 0; k < num_lanes; k = k + 1) {
/* Power state machine registers */
cdns_phy_reg_write(mhdp, (XCVR_PSM_RCTRL | (k << 9)), 0xBEFC);
cdns_phy_reg_write(mhdp, (TX_PSC_A0 | (k << 9)), 0x6799);
cdns_phy_reg_write(mhdp, (TX_PSC_A1 | (k << 9)), 0x6798);
cdns_phy_reg_write(mhdp, (TX_PSC_A2 | (k << 9)), 0x0098);
cdns_phy_reg_write(mhdp, (TX_PSC_A3 | (k << 9)), 0x0098);
/* Receiver calibration power state definition register */
val = cdns_phy_reg_read(mhdp, RX_PSC_CAL | (k << 9));
val &= 0xFFBB;
cdns_phy_reg_write(mhdp, (RX_PSC_CAL | (k << 9)), val);
val = cdns_phy_reg_read(mhdp, RX_PSC_A0 | (k << 9));
val &= 0xFFBB;
cdns_phy_reg_write(mhdp, (RX_PSC_A0 | (k << 9)), val);
}
}
static void dp_phy_power_down(struct cdns_mhdp_device *mhdp)
{
u16 val;
int i;
if (!mhdp->power_up)
return;
/* Place the PHY lanes in the A3 power state. */
cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x8);
/* Wait for Power State A3 Ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_MODE_CTRL);
if (val & (1 << 7))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait A3 Ack failed\n");
return;
}
/* Disable HDP PLLs data rate and full rate clocks out of PMA. */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val &= ~(1 << 2);
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* Wait for PLL clock gate ACK */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
if (!(val & (1 << 3)))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait PLL clock gate Ack failed\n");
return;
}
/* Disable HDP PLLs for high speed clocks */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val &= ~(1 << 0);
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* Wait for PLL disable ACK */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
if (!(val & (1 << 1)))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait PLL disable Ack failed\n");
return;
}
}
static int dp_phy_power_up(struct cdns_mhdp_device *mhdp)
{
u32 val, i;
/* Enable HDP PLLs for high speed clocks */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val |= (1 << 0);
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* Wait for PLL ready ACK */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
if (val & (1 << 1))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait PLL Ack failed\n");
return -1;
}
/* Enable HDP PLLs data rate and full rate clocks out of PMA. */
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val |= (1 << 2);
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* Wait for PLL clock enable ACK */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
if (val & (1 << 3))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait PLL clock enable ACk failed\n");
return -1;
}
/* Configure PHY in A2 Mode */
cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x0004);
/* Wait for Power State A2 Ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_MODE_CTRL);
if (val & (1 << 6))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait A2 Ack failed\n");
return -1;
}
/* Configure PHY in A0 mode (PHY must be in the A0 power
* state in order to transmit data)
*/
cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x0101);
/* Wait for Power State A0 Ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_MODE_CTRL);
if (val & (1 << 4))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait A0 Ack failed\n");
return -1;
}
mhdp->power_up = true;
return 0;
}
int cdns_dp_phy_set_imx8mq(struct cdns_mhdp_device *mhdp)
{
int ret;
/* Disable phy clock if PHY in power up state */
dp_phy_power_down(mhdp);
dp_phy_pma_cmn_cfg_27mhz(mhdp);
dp_phy_pma_cmn_pll0_27mhz(mhdp);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_0, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_1, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_2, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_3, 1);
/* PHY power up */
ret = dp_phy_power_up(mhdp);
if (ret < 0)
return ret;
dp_aux_cfg(mhdp);
return ret;
}
int cdns_dp_phy_set_imx8qm(struct cdns_mhdp_device *mhdp)
{
int ret;
/* Disable phy clock if PHY in power up state */
dp_phy_power_down(mhdp);
dp_phy_pma_cmn_cfg_24mhz(mhdp);
dp_phy_pma_cmn_pll0_24mhz(mhdp);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_0, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_1, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_2, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_3, 1);
/* PHY power up */
ret = dp_phy_power_up(mhdp);
if (ret < 0)
return ret;
dp_aux_cfg(mhdp);
return true;
}

View File

@ -0,0 +1,763 @@
/*
* Cadence High-Definition Multimedia Interface (HDMI) driver
*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <drm/drm_of.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <linux/io.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_atomic.h>
#include <linux/io.h>
#include <drm/bridge/cdns-mhdp-common.h>
#include "cdn-mhdp-phy.h"
/* HDMI TX clock control settings */
struct hdmi_ctrl {
u32 pixel_clk_freq_min;
u32 pixel_clk_freq_max;
u32 feedback_factor;
u32 data_range_kbps_min;
u32 data_range_kbps_max;
u32 cmnda_pll0_ip_div;
u32 cmn_ref_clk_dig_div;
u32 ref_clk_divider_scaler;
u32 pll_fb_div_total;
u32 cmnda_pll0_fb_div_low;
u32 cmnda_pll0_fb_div_high;
u32 pixel_div_total;
u32 cmnda_pll0_pxdiv_low;
u32 cmnda_pll0_pxdiv_high;
u32 vco_freq_min;
u32 vco_freq_max;
u32 vco_ring_select;
u32 cmnda_hs_clk_0_sel;
u32 cmnda_hs_clk_1_sel;
u32 hsclk_div_at_xcvr;
u32 hsclk_div_tx_sub_rate;
u32 cmnda_pll0_hs_sym_div_sel;
u32 cmnda_pll0_clk_freq_min;
u32 cmnda_pll0_clk_freq_max;
};
/* HDMI TX clock control settings, pixel clock is output */
static const struct hdmi_ctrl imx8mq_ctrl_table[] = {
/*Minclk Maxclk Fdbak DR_min DR_max ip_d dig DS Totl */
{ 27000, 27000, 1000, 270000, 270000, 0x03, 0x1, 0x1, 240, 0x0BC, 0x030, 80, 0x026, 0x026, 2160000, 2160000, 0, 2, 2, 2, 4, 0x3, 27000, 27000},
{ 27000, 27000, 1250, 337500, 337500, 0x03, 0x1, 0x1, 300, 0x0EC, 0x03C, 100, 0x030, 0x030, 2700000, 2700000, 0, 2, 2, 2, 4, 0x3, 33750, 33750},
{ 27000, 27000, 1500, 405000, 405000, 0x03, 0x1, 0x1, 360, 0x11C, 0x048, 120, 0x03A, 0x03A, 3240000, 3240000, 0, 2, 2, 2, 4, 0x3, 40500, 40500},
{ 27000, 27000, 2000, 540000, 540000, 0x03, 0x1, 0x1, 240, 0x0BC, 0x030, 80, 0x026, 0x026, 2160000, 2160000, 0, 2, 2, 2, 4, 0x2, 54000, 54000},
{ 54000, 54000, 1000, 540000, 540000, 0x03, 0x1, 0x1, 480, 0x17C, 0x060, 80, 0x026, 0x026, 4320000, 4320000, 1, 2, 2, 2, 4, 0x3, 54000, 54000},
{ 54000, 54000, 1250, 675000, 675000, 0x04, 0x1, 0x1, 400, 0x13C, 0x050, 50, 0x017, 0x017, 2700000, 2700000, 0, 1, 1, 2, 4, 0x2, 67500, 67500},
{ 54000, 54000, 1500, 810000, 810000, 0x04, 0x1, 0x1, 480, 0x17C, 0x060, 60, 0x01C, 0x01C, 3240000, 3240000, 0, 2, 2, 2, 2, 0x2, 81000, 81000},
{ 54000, 54000, 2000, 1080000, 1080000, 0x03, 0x1, 0x1, 240, 0x0BC, 0x030, 40, 0x012, 0x012, 2160000, 2160000, 0, 2, 2, 2, 1, 0x1, 108000, 108000},
{ 74250, 74250, 1000, 742500, 742500, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 80, 0x026, 0x026, 5940000, 5940000, 1, 2, 2, 2, 4, 0x3, 74250, 74250},
{ 74250, 74250, 1250, 928125, 928125, 0x04, 0x1, 0x1, 550, 0x1B4, 0x06E, 50, 0x017, 0x017, 3712500, 3712500, 1, 1, 1, 2, 4, 0x2, 92812, 92812},
{ 74250, 74250, 1500, 1113750, 1113750, 0x04, 0x1, 0x1, 660, 0x20C, 0x084, 60, 0x01C, 0x01C, 4455000, 4455000, 1, 2, 2, 2, 2, 0x2, 111375, 111375},
{ 74250, 74250, 2000, 1485000, 1485000, 0x03, 0x1, 0x1, 330, 0x104, 0x042, 40, 0x012, 0x012, 2970000, 2970000, 0, 2, 2, 2, 1, 0x1, 148500, 148500},
{ 99000, 99000, 1000, 990000, 990000, 0x03, 0x1, 0x1, 440, 0x15C, 0x058, 40, 0x012, 0x012, 3960000, 3960000, 1, 2, 2, 2, 2, 0x2, 99000, 99000},
{ 99000, 99000, 1250, 1237500, 1237500, 0x03, 0x1, 0x1, 275, 0x0D8, 0x037, 25, 0x00B, 0x00A, 2475000, 2475000, 0, 1, 1, 2, 2, 0x1, 123750, 123750},
{ 99000, 99000, 1500, 1485000, 1485000, 0x03, 0x1, 0x1, 330, 0x104, 0x042, 30, 0x00D, 0x00D, 2970000, 2970000, 0, 2, 2, 2, 1, 0x1, 148500, 148500},
{ 99000, 99000, 2000, 1980000, 1980000, 0x03, 0x1, 0x1, 440, 0x15C, 0x058, 40, 0x012, 0x012, 3960000, 3960000, 1, 2, 2, 2, 1, 0x1, 198000, 198000},
{148500, 148500, 1000, 1485000, 1485000, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 40, 0x012, 0x012, 5940000, 5940000, 1, 2, 2, 2, 2, 0x2, 148500, 148500},
{148500, 148500, 1250, 1856250, 1856250, 0x04, 0x1, 0x1, 550, 0x1B4, 0x06E, 25, 0x00B, 0x00A, 3712500, 3712500, 1, 1, 1, 2, 2, 0x1, 185625, 185625},
{148500, 148500, 1500, 2227500, 2227500, 0x03, 0x1, 0x1, 495, 0x188, 0x063, 30, 0x00D, 0x00D, 4455000, 4455000, 1, 1, 1, 2, 2, 0x1, 222750, 222750},
{148500, 148500, 2000, 2970000, 2970000, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 40, 0x012, 0x012, 5940000, 5940000, 1, 2, 2, 2, 1, 0x1, 297000, 297000},
{198000, 198000, 1000, 1980000, 1980000, 0x03, 0x1, 0x1, 220, 0x0AC, 0x02C, 10, 0x003, 0x003, 1980000, 1980000, 0, 1, 1, 2, 1, 0x0, 198000, 198000},
{198000, 198000, 1250, 2475000, 2475000, 0x03, 0x1, 0x1, 550, 0x1B4, 0x06E, 25, 0x00B, 0x00A, 4950000, 4950000, 1, 1, 1, 2, 2, 0x1, 247500, 247500},
{198000, 198000, 1500, 2970000, 2970000, 0x03, 0x1, 0x1, 330, 0x104, 0x042, 15, 0x006, 0x005, 2970000, 2970000, 0, 1, 1, 2, 1, 0x0, 297000, 297000},
{198000, 198000, 2000, 3960000, 3960000, 0x03, 0x1, 0x1, 440, 0x15C, 0x058, 20, 0x008, 0x008, 3960000, 3960000, 1, 1, 1, 2, 1, 0x0, 396000, 396000},
{297000, 297000, 1000, 2970000, 2970000, 0x03, 0x1, 0x1, 330, 0x104, 0x042, 10, 0x003, 0x003, 2970000, 2970000, 0, 1, 1, 2, 1, 0x0, 297000, 297000},
{297000, 297000, 1500, 4455000, 4455000, 0x03, 0x1, 0x1, 495, 0x188, 0x063, 15, 0x006, 0x005, 4455000, 4455000, 1, 1, 1, 2, 1, 0x0, 445500, 445500},
{297000, 297000, 2000, 5940000, 5940000, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 20, 0x008, 0x008, 5940000, 5940000, 1, 1, 1, 2, 1, 0x0, 594000, 594000},
{594000, 594000, 1000, 5940000, 5940000, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 10, 0x003, 0x003, 5940000, 5940000, 1, 1, 1, 2, 1, 0x0, 594000, 594000},
{594000, 594000, 750, 4455000, 4455000, 0x03, 0x1, 0x1, 495, 0x188, 0x063, 10, 0x003, 0x003, 4455000, 4455000, 1, 1, 1, 2, 1, 0x0, 445500, 445500},
{594000, 594000, 625, 3712500, 3712500, 0x04, 0x1, 0x1, 550, 0x1B4, 0x06E, 10, 0x003, 0x003, 3712500, 3712500, 1, 1, 1, 2, 1, 0x0, 371250, 371250},
{594000, 594000, 500, 2970000, 2970000, 0x03, 0x1, 0x1, 660, 0x20C, 0x084, 10, 0x003, 0x003, 5940000, 5940000, 1, 1, 1, 2, 2, 0x1, 297000, 297000},
};
/* HDMI TX clock control settings, pixel clock is input */
static const struct hdmi_ctrl imx8qm_ctrl_table[] = {
/*pclk_l pclk_h fd DRR_L DRR_H PLLD */
{ 25000, 42500, 1000, 250000, 425000, 0x05, 0x01, 0x01, 400, 0x182, 0x00A, 0, 0, 0, 2000000, 3400000, 0, 2, 2, 2, 4, 0x03, 25000, 42500},
{ 42500, 85000, 1000, 425000, 850000, 0x08, 0x03, 0x01, 320, 0x132, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 4, 0x02, 42500, 85000},
{ 85000, 170000, 1000, 850000, 1700000, 0x11, 0x00, 0x07, 340, 0x146, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 2, 0x01, 85000, 170000},
{170000, 340000, 1000, 1700000, 3400000, 0x22, 0x01, 0x07, 340, 0x146, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 1, 0x00, 170000, 340000},
{340000, 600000, 1000, 3400000, 6000000, 0x3C, 0x03, 0x06, 600, 0x24A, 0x00A, 0, 0, 0, 3400000, 6000000, 1, 1, 1, 2, 1, 0x00, 340000, 600000},
{ 25000, 34000, 1205, 312500, 425000, 0x04, 0x01, 0x01, 400, 0x182, 0x00A, 0, 0, 0, 2500000, 3400000, 0, 2, 2, 2, 4, 0x03, 31250, 42500},
{ 34000, 68000, 1205, 425000, 850000, 0x06, 0x02, 0x01, 300, 0x11E, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 4, 0x02, 42500, 85000},
{ 68000, 136000, 1205, 850000, 1700000, 0x0D, 0x02, 0x02, 325, 0x137, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 2, 0x01, 85000, 170000},
{136000, 272000, 1205, 1700000, 3400000, 0x1A, 0x02, 0x04, 325, 0x137, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 1, 0x00, 170000, 340000},
{272000, 480000, 1205, 3400000, 6000000, 0x30, 0x03, 0x05, 600, 0x24A, 0x00A, 0, 0, 0, 3400000, 6000000, 1, 1, 1, 2, 1, 0x00, 340000, 600000},
{ 25000, 28000, 1500, 375000, 420000, 0x03, 0x01, 0x01, 360, 0x15A, 0x00A, 0, 0, 0, 3000000, 3360000, 0, 2, 2, 2, 4, 0x03, 37500, 42000},
{ 28000, 56000, 1500, 420000, 840000, 0x06, 0x02, 0x01, 360, 0x15A, 0x00A, 0, 0, 0, 1680000, 3360000, 0, 1, 1, 2, 4, 0x02, 42000, 84000},
{ 56000, 113000, 1500, 840000, 1695000, 0x0B, 0x00, 0x05, 330, 0x13C, 0x00A, 0, 0, 0, 1680000, 3390000, 0, 1, 1, 2, 2, 0x01, 84000, 169500},
{113000, 226000, 1500, 1695000, 3390000, 0x16, 0x01, 0x05, 330, 0x13C, 0x00A, 0, 0, 0, 1695000, 3390000, 0, 1, 1, 2, 1, 0x00, 169500, 339000},
{226000, 400000, 1500, 3390000, 6000000, 0x28, 0x03, 0x04, 600, 0x24A, 0x00A, 0, 0, 0, 3390000, 6000000, 1, 1, 1, 2, 1, 0x00, 339000, 600000},
{ 25000, 42500, 2000, 500000, 850000, 0x05, 0x01, 0x01, 400, 0x182, 0x00A, 0, 0, 0, 2000000, 3400000, 0, 1, 1, 2, 4, 0x02, 50000, 85000},
{ 42500, 85000, 2000, 850000, 1700000, 0x08, 0x03, 0x01, 320, 0x132, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 2, 0x01, 85000, 170000},
{ 85000, 170000, 2000, 1700000, 3400000, 0x11, 0x00, 0x07, 340, 0x146, 0x00A, 0, 0, 0, 1700000, 3400000, 0, 1, 1, 2, 1, 0x00, 170000, 340000},
{170000, 300000, 2000, 3400000, 6000000, 0x22, 0x01, 0x06, 680, 0x29A, 0x00A, 0, 0, 0, 3400000, 6000000, 1, 1, 1, 2, 1, 0x00, 340000, 600000},
{594000, 594000, 5000, 2970000, 2970000, 0x3C, 0x03, 0x06, 600, 0x24A, 0x00A, 0, 0, 0, 5940000, 5940000, 1, 1, 1, 2, 2, 0x01, 297000, 297000},
{594000, 594000, 6250, 3712500, 3712500, 0x3C, 0x03, 0x06, 375, 0x169, 0x00A, 0, 0, 0, 3712500, 3712500, 1, 1, 1, 2, 1, 0x00, 371250, 371250},
{594000, 594000, 7500, 4455000, 4455000, 0x3C, 0x03, 0x06, 450, 0x1B4, 0x00A, 0, 0, 0, 4455000, 4455000, 1, 1, 1, 2, 1, 0x00, 445500, 445500},
};
/* HDMI TX PLL tuning settings */
struct hdmi_pll_tuning {
u32 vco_freq_bin;
u32 vco_freq_min;
u32 vco_freq_max;
u32 volt_to_current_coarse;
u32 volt_to_current;
u32 ndac_ctrl;
u32 pmos_ctrl;
u32 ptat_ndac_ctrl;
u32 feedback_div_total;
u32 charge_pump_gain;
u32 coarse_code;
u32 v2i_code;
u32 vco_cal_code;
};
/* HDMI TX PLL tuning settings, pixel clock is output */
static const struct hdmi_pll_tuning imx8mq_pll_table[] = {
/* bin VCO_freq min/max coar cod NDAC PMOS PTAT div-T P-Gain Coa V2I CAL */
{ 1, 1980000, 1980000, 0x4, 0x3, 0x0, 0x09, 0x09, 220, 0x42, 160, 5, 183 },
{ 2, 2160000, 2160000, 0x4, 0x3, 0x0, 0x09, 0x09, 240, 0x42, 166, 6, 208 },
{ 3, 2475000, 2475000, 0x5, 0x3, 0x1, 0x00, 0x07, 275, 0x42, 167, 6, 209 },
{ 4, 2700000, 2700000, 0x5, 0x3, 0x1, 0x00, 0x07, 300, 0x42, 188, 6, 230 },
{ 4, 2700000, 2700000, 0x5, 0x3, 0x1, 0x00, 0x07, 400, 0x4C, 188, 6, 230 },
{ 5, 2970000, 2970000, 0x6, 0x3, 0x1, 0x00, 0x07, 330, 0x42, 183, 6, 225 },
{ 6, 3240000, 3240000, 0x6, 0x3, 0x1, 0x00, 0x07, 360, 0x42, 203, 7, 256 },
{ 6, 3240000, 3240000, 0x6, 0x3, 0x1, 0x00, 0x07, 480, 0x4C, 203, 7, 256 },
{ 7, 3712500, 3712500, 0x4, 0x3, 0x0, 0x07, 0x0F, 550, 0x4C, 212, 7, 257 },
{ 8, 3960000, 3960000, 0x5, 0x3, 0x0, 0x07, 0x0F, 440, 0x42, 184, 6, 226 },
{ 9, 4320000, 4320000, 0x5, 0x3, 0x1, 0x07, 0x0F, 480, 0x42, 205, 7, 258 },
{ 10, 4455000, 4455000, 0x5, 0x3, 0x0, 0x07, 0x0F, 495, 0x42, 219, 7, 272 },
{ 10, 4455000, 4455000, 0x5, 0x3, 0x0, 0x07, 0x0F, 660, 0x4C, 219, 7, 272 },
{ 11, 4950000, 4950000, 0x6, 0x3, 0x1, 0x00, 0x07, 550, 0x42, 213, 7, 258 },
{ 12, 5940000, 5940000, 0x7, 0x3, 0x1, 0x00, 0x07, 660, 0x42, 244, 8, 292 },
};
/* HDMI TX PLL tuning settings, pixel clock is input */
static const struct hdmi_pll_tuning imx8qm_pll_table[] = {
/* bin VCO_freq min/max coar cod NDAC PMOS PTAT div-T P-Gain pad only */
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 300, 0x08D, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 320, 0x08E, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 325, 0x08E, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 330, 0x08E, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 340, 0x08F, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 360, 0x0A7, 0, 0, 0 },
{ 0, 1700000, 2000000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 400, 0x0C5, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 300, 0x086, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 320, 0x087, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 325, 0x087, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 330, 0x104, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 340, 0x08B, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 360, 0x08D, 0, 0, 0 },
{ 1, 2000000, 2400000, 0x3, 0x1, 0x0, 0x8C, 0x2E, 400, 0x0A6, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 300, 0x04E, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 320, 0x04F, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 325, 0x04F, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 330, 0x085, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 340, 0x085, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 360, 0x086, 0, 0, 0 },
{ 2, 2400000, 2800000, 0x3, 0x1, 0x0, 0x04, 0x0D, 400, 0x08B, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 300, 0x047, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 320, 0x04B, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 325, 0x04B, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 330, 0x04B, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 340, 0x04D, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 360, 0x04E, 0, 0, 0 },
{ 3, 2800000, 3400000, 0x3, 0x1, 0x0, 0x04, 0x0D, 400, 0x085, 0, 0, 0 },
{ 4, 3400000, 3900000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 375, 0x041, 0, 0, 0 },
{ 4, 3400000, 3900000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 600, 0x08D, 0, 0, 0 },
{ 4, 3400000, 3900000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 680, 0x0A6, 0, 0, 0 },
{ 5, 3900000, 4500000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 450, 0x041, 0, 0, 0 },
{ 5, 3900000, 4500000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 600, 0x087, 0, 0, 0 },
{ 5, 3900000, 4500000, 0x7, 0x1, 0x0, 0x8E, 0x2F, 680, 0x0A4, 0, 0, 0 },
{ 6, 4500000, 5200000, 0x7, 0x1, 0x0, 0x04, 0x0D, 600, 0x04F, 0, 0, 0 },
{ 6, 4500000, 5200000, 0x7, 0x1, 0x0, 0x04, 0x0D, 680, 0x086, 0, 0, 0 },
{ 7, 5200000, 6000000, 0x7, 0x1, 0x0, 0x04, 0x0D, 600, 0x04D, 0, 0, 0 },
{ 7, 5200000, 6000000, 0x7, 0x1, 0x0, 0x04, 0x0D, 680, 0x04F, 0, 0, 0 }
};
static void hdmi_arc_config(struct cdns_mhdp_device *mhdp)
{
u16 txpu_calib_code;
u16 txpd_calib_code;
u16 txpu_adj_calib_code;
u16 txpd_adj_calib_code;
u16 prev_calib_code;
u16 new_calib_code;
u16 rdata;
/* Power ARC */
cdns_phy_reg_write(mhdp, TXDA_CYA_AUXDA_CYA, 0x0001);
prev_calib_code = cdns_phy_reg_read(mhdp, TX_DIG_CTRL_REG_2);
txpu_calib_code = cdns_phy_reg_read(mhdp, CMN_TXPUCAL_CTRL);
txpd_calib_code = cdns_phy_reg_read(mhdp, CMN_TXPDCAL_CTRL);
txpu_adj_calib_code = cdns_phy_reg_read(mhdp, CMN_TXPU_ADJ_CTRL);
txpd_adj_calib_code = cdns_phy_reg_read(mhdp, CMN_TXPD_ADJ_CTRL);
new_calib_code = ((txpu_calib_code + txpd_calib_code) / 2)
+ txpu_adj_calib_code + txpd_adj_calib_code;
if (new_calib_code != prev_calib_code) {
rdata = cdns_phy_reg_read(mhdp, TX_ANA_CTRL_REG_1);
rdata &= 0xDFFF;
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, rdata);
cdns_phy_reg_write(mhdp, TX_DIG_CTRL_REG_2, new_calib_code);
mdelay(10);
rdata |= 0x2000;
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, rdata);
udelay(150);
}
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x0100);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x0300);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_3, 0x0000);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2008);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2018);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2098);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030C);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_5, 0x0010);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_4, 0x4001);
mdelay(5);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_1, 0x2198);
mdelay(5);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030D);
udelay(100);
cdns_phy_reg_write(mhdp, TX_ANA_CTRL_REG_2, 0x030F);
}
static void hdmi_phy_set_vswing(struct cdns_mhdp_device *mhdp)
{
const u32 num_lanes = 4;
u32 k;
for (k = 0; k < num_lanes; k++) {
cdns_phy_reg_write(mhdp, (TX_DIAG_TX_DRV | (k << 9)), 0x7c0);
cdns_phy_reg_write(mhdp, (TX_TXCC_CPOST_MULT_00_0 | (k << 9)), 0x0);
cdns_phy_reg_write(mhdp, (TX_TXCC_CAL_SCLR_MULT_0 | (k << 9)), 0x120);
}
}
static int hdmi_feedback_factor(struct cdns_mhdp_device *mhdp)
{
u32 feedback_factor;
switch (mhdp->video_info.color_fmt) {
case YCBCR_4_2_2:
feedback_factor = 1000;
break;
case YCBCR_4_2_0:
switch (mhdp->video_info.color_depth) {
case 8:
feedback_factor = 500;
break;
case 10:
feedback_factor = 625;
break;
case 12:
feedback_factor = 750;
break;
case 16:
feedback_factor = 1000;
break;
default:
DRM_ERROR("Invalid ColorDepth\n");
return 0;
}
break;
default:
/* Assume RGB/YUV444 */
switch (mhdp->video_info.color_depth) {
case 10:
feedback_factor = 1250;
break;
case 12:
feedback_factor = 1500;
break;
case 16:
feedback_factor = 2000;
break;
default:
feedback_factor = 1000;
}
}
return feedback_factor;
}
static int hdmi_phy_config(struct cdns_mhdp_device *mhdp,
const struct hdmi_ctrl *p_ctrl_table,
const struct hdmi_pll_tuning *p_pll_table,
char pclk_in)
{
const u32 num_lanes = 4;
u32 val, i, k;
/* enable PHY isolation mode only for CMN */
cdns_phy_reg_write(mhdp, PHY_PMA_ISOLATION_CTRL, 0xD000);
/* set cmn_pll0_clk_datart1_div/cmn_pll0_clk_datart0_div dividers */
val = cdns_phy_reg_read(mhdp, PHY_PMA_ISO_PLL_CTRL1);
val &= 0xFF00;
val |= 0x0012;
cdns_phy_reg_write(mhdp, PHY_PMA_ISO_PLL_CTRL1, val);
/* assert PHY reset from isolation register */
cdns_phy_reg_write(mhdp, PHY_ISO_CMN_CTRL, 0x0000);
/* assert PMA CMN reset */
cdns_phy_reg_write(mhdp, PHY_PMA_ISO_CMN_CTRL, 0x0000);
/* register XCVR_DIAG_BIDI_CTRL */
for (k = 0; k < num_lanes; k++)
cdns_phy_reg_write(mhdp, XCVR_DIAG_BIDI_CTRL | (k << 9), 0x00FF);
/* Describing Task phy_cfg_hdp */
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xFFF7;
val |= 0x0008;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
/* PHY Registers */
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xCFFF;
val |= p_ctrl_table->cmn_ref_clk_dig_div << 12;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
val = cdns_phy_reg_read(mhdp, PHY_HDP_CLK_CTL);
val &= 0x00FF;
val |= 0x1200;
cdns_phy_reg_write(mhdp, PHY_HDP_CLK_CTL, val);
/* Common control module control and diagnostic registers */
val = cdns_phy_reg_read(mhdp, CMN_CDIAG_REFCLK_CTRL);
val &= 0x8FFF;
val |= p_ctrl_table->ref_clk_divider_scaler << 12;
val |= 0x00C0;
cdns_phy_reg_write(mhdp, CMN_CDIAG_REFCLK_CTRL, val);
/* High speed clock used */
val = cdns_phy_reg_read(mhdp, CMN_DIAG_HSCLK_SEL);
val &= 0xFF00;
val |= (p_ctrl_table->cmnda_hs_clk_0_sel >> 1) << 0;
val |= (p_ctrl_table->cmnda_hs_clk_1_sel >> 1) << 4;
cdns_phy_reg_write(mhdp, CMN_DIAG_HSCLK_SEL, val);
for (k = 0; k < num_lanes; k++) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)));
val &= 0xCFFF;
val |= (p_ctrl_table->cmnda_hs_clk_0_sel >> 1) << 12;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_HSCLK_SEL | (k << 9)), val);
}
/* PLL 0 control state machine registers */
val = p_ctrl_table->vco_ring_select << 12;
cdns_phy_reg_write(mhdp, CMN_PLLSM0_USER_DEF_CTRL, val);
if (pclk_in == true)
val = 0x30A0;
else {
val = cdns_phy_reg_read(mhdp, CMN_PLL0_VCOCAL_START);
val &= 0xFE00;
val |= p_pll_table->vco_cal_code;
}
cdns_phy_reg_write(mhdp, CMN_PLL0_VCOCAL_START, val);
cdns_phy_reg_write(mhdp, CMN_PLL0_VCOCAL_INIT_TMR, 0x0064);
cdns_phy_reg_write(mhdp, CMN_PLL0_VCOCAL_ITER_TMR, 0x000A);
/* Common functions control and diagnostics registers */
val = p_ctrl_table->cmnda_pll0_hs_sym_div_sel << 8;
val |= p_ctrl_table->cmnda_pll0_ip_div;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_INCLK_CTRL, val);
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_OVRD, 0x0000);
val = p_ctrl_table->cmnda_pll0_fb_div_high;
val |= (1 << 15);
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_FBH_OVRD, val);
val = p_ctrl_table->cmnda_pll0_fb_div_low;
val |= (1 << 15);
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_FBL_OVRD, val);
if (pclk_in == false) {
val = p_ctrl_table->cmnda_pll0_pxdiv_low;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_PXL_DIVL, val);
val = p_ctrl_table->cmnda_pll0_pxdiv_high;
val |= (1 << 15);
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_PXL_DIVH, val);
}
val = p_pll_table->volt_to_current_coarse;
val |= (p_pll_table->volt_to_current) << 4;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_V2I_TUNE, val);
val = p_pll_table->charge_pump_gain;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_CP_TUNE, val);
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_LF_PROG, 0x0008);
val = p_pll_table->pmos_ctrl;
val |= (p_pll_table->ndac_ctrl) << 8;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_PTATIS_TUNE1, val);
val = p_pll_table->ptat_ndac_ctrl;
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_PTATIS_TUNE2, val);
if (pclk_in == true)
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_TEST_MODE, 0x0022);
else
cdns_phy_reg_write(mhdp, CMN_DIAG_PLL0_TEST_MODE, 0x0020);
cdns_phy_reg_write(mhdp, CMN_PSM_CLK_CTRL, 0x0016);
/* Transceiver control and diagnostic registers */
for (k = 0; k < num_lanes; k++) {
val = cdns_phy_reg_read(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)));
val &= 0xBFFF;
cdns_phy_reg_write(mhdp, (XCVR_DIAG_PLLDRC_CTRL | (k << 9)), val);
}
for (k = 0; k < num_lanes; k++) {
val = cdns_phy_reg_read(mhdp, (TX_DIAG_TX_CTRL | (k << 9)));
val &= 0xFF3F;
val |= (p_ctrl_table->hsclk_div_tx_sub_rate >> 1) << 6;
cdns_phy_reg_write(mhdp, (TX_DIAG_TX_CTRL | (k << 9)), val);
}
/*
* for single ended reference clock val |= 0x0030;
* for differential clock val |= 0x0000;
*/
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
val &= 0xFF8F;
if (pclk_in == true)
val |= 0x0030;
cdns_phy_reg_write(mhdp, PHY_PMA_CMN_CTRL1, val);
/* for differential clock on the refclk_p and
* refclk_m off chip pins: CMN_DIAG_ACYA[8]=1'b1 */
cdns_phy_reg_write(mhdp, CMN_DIAG_ACYA, 0x0100);
/* Deassert PHY reset */
cdns_phy_reg_write(mhdp, PHY_ISO_CMN_CTRL, 0x0001);
cdns_phy_reg_write(mhdp, PHY_PMA_ISO_CMN_CTRL, 0x0003);
/* Power state machine registers */
for (k = 0; k < num_lanes; k++)
cdns_phy_reg_write(mhdp, XCVR_PSM_RCTRL | (k << 9), 0xFEFC);
/* Assert cmn_macro_pwr_en */
cdns_phy_reg_write(mhdp, PHY_PMA_ISO_CMN_CTRL, 0x0013);
/* wait for cmn_macro_pwr_en_ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_PMA_ISO_CMN_CTRL);
if (val & (1 << 5))
break;
msleep(20);
}
if (i == 10) {
DRM_ERROR("PMA ouput macro power up failed\n");
return false;
}
/* wait for cmn_ready */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_PMA_CMN_CTRL1);
if (val & (1 << 0))
break;
msleep(20);
}
if (i == 10) {
DRM_ERROR("PMA output ready failed\n");
return false;
}
for (k = 0; k < num_lanes; k++) {
cdns_phy_reg_write(mhdp, TX_PSC_A0 | (k << 9), 0x6791);
cdns_phy_reg_write(mhdp, TX_PSC_A1 | (k << 9), 0x6790);
cdns_phy_reg_write(mhdp, TX_PSC_A2 | (k << 9), 0x0090);
cdns_phy_reg_write(mhdp, TX_PSC_A3 | (k << 9), 0x0090);
val = cdns_phy_reg_read(mhdp, RX_PSC_CAL | (k << 9));
val &= 0xFFBB;
cdns_phy_reg_write(mhdp, RX_PSC_CAL | (k << 9), val);
val = cdns_phy_reg_read(mhdp, RX_PSC_A0 | (k << 9));
val &= 0xFFBB;
cdns_phy_reg_write(mhdp, RX_PSC_A0 | (k << 9), val);
}
return true;
}
static int hdmi_phy_cfg_t28hpc(struct cdns_mhdp_device *mhdp,
struct drm_display_mode *mode)
{
const struct hdmi_ctrl *p_ctrl_table;
const struct hdmi_pll_tuning *p_pll_table;
const u32 refclk_freq_khz = 27000;
const u8 pclk_in = false;
u32 pixel_freq = mode->clock;
u32 vco_freq, char_freq;
u32 div_total, feedback_factor;
u32 i, ret;
feedback_factor = hdmi_feedback_factor(mhdp);
char_freq = pixel_freq * feedback_factor / 1000;
DRM_INFO("Pixel clock: %d KHz, character clock: %d, bpc is %0d-bit.\n",
pixel_freq, char_freq, mhdp->video_info.color_depth);
/* Get right row from the ctrl_table table.
* Check if 'pixel_freq_khz' value matches the PIXEL_CLK_FREQ column.
* Consider only the rows with FEEDBACK_FACTOR column matching feedback_factor. */
for (i = 0; i < ARRAY_SIZE(imx8mq_ctrl_table); i++) {
if (feedback_factor == imx8mq_ctrl_table[i].feedback_factor &&
pixel_freq == imx8mq_ctrl_table[i].pixel_clk_freq_min) {
p_ctrl_table = &imx8mq_ctrl_table[i];
break;
}
}
if (i == ARRAY_SIZE(imx8mq_ctrl_table)) {
DRM_WARN("Pixel clk (%d KHz) not supported, color depth (%0d-bit)\n",
pixel_freq, mhdp->video_info.color_depth);
return 0;
}
div_total = p_ctrl_table->pll_fb_div_total;
vco_freq = refclk_freq_khz * div_total / p_ctrl_table->cmnda_pll0_ip_div;
/* Get right row from the imx8mq_pll_table table.
* Check if vco_freq_khz and feedback_div_total
* column matching with imx8mq_pll_table. */
for (i = 0; i < ARRAY_SIZE(imx8mq_pll_table); i++) {
if (vco_freq == imx8mq_pll_table[i].vco_freq_min &&
div_total == imx8mq_pll_table[i].feedback_div_total) {
p_pll_table = &imx8mq_pll_table[i];
break;
}
}
if (i == ARRAY_SIZE(imx8mq_pll_table)) {
DRM_WARN("VCO (%d KHz) not supported\n", vco_freq);
return 0;
}
DRM_INFO("VCO frequency is %d KHz\n", vco_freq);
ret = hdmi_phy_config(mhdp, p_ctrl_table, p_pll_table, pclk_in);
if (ret == false)
return 0;
return char_freq;
}
static int hdmi_phy_cfg_ss28fdsoi(struct cdns_mhdp_device *mhdp,
struct drm_display_mode *mode)
{
const struct hdmi_ctrl *p_ctrl_table;
const struct hdmi_pll_tuning *p_pll_table;
const u8 pclk_in = true;
u32 pixel_freq = mode->clock;
u32 vco_freq, char_freq;
u32 div_total, feedback_factor;
u32 ret, i;
feedback_factor = hdmi_feedback_factor(mhdp);
char_freq = pixel_freq * feedback_factor / 1000;
DRM_INFO("Pixel clock: %d KHz, character clock: %d, bpc is %0d-bit.\n",
pixel_freq, char_freq, mhdp->video_info.color_depth);
/* Get right row from the ctrl_table table.
* Check if 'pixel_freq_khz' value matches the PIXEL_CLK_FREQ column.
* Consider only the rows with FEEDBACK_FACTOR column matching feedback_factor. */
for (i = 0; i < ARRAY_SIZE(imx8qm_ctrl_table); i++) {
if (feedback_factor == imx8qm_ctrl_table[i].feedback_factor &&
pixel_freq >= imx8qm_ctrl_table[i].pixel_clk_freq_min &&
pixel_freq <= imx8qm_ctrl_table[i].pixel_clk_freq_max) {
p_ctrl_table = &imx8qm_ctrl_table[i];
break;
}
}
if (i == ARRAY_SIZE(imx8qm_ctrl_table)) {
DRM_WARN("Pixel clk (%d KHz) not supported, color depth (%0d-bit)\n",
pixel_freq, mhdp->video_info.color_depth);
return 0;
}
div_total = p_ctrl_table->pll_fb_div_total;
vco_freq = pixel_freq * div_total / p_ctrl_table->cmnda_pll0_ip_div;
/* Get right row from the imx8mq_pll_table table.
* Check if vco_freq_khz and feedback_div_total
* column matching with imx8mq_pll_table. */
for (i = 0; i < ARRAY_SIZE(imx8qm_pll_table); i++) {
if (vco_freq >= imx8qm_pll_table[i].vco_freq_min &&
vco_freq < imx8qm_pll_table[i].vco_freq_max &&
div_total == imx8qm_pll_table[i].feedback_div_total) {
p_pll_table = &imx8qm_pll_table[i];
break;
}
}
if (i == ARRAY_SIZE(imx8qm_pll_table)) {
DRM_WARN("VCO (%d KHz) not supported\n", vco_freq);
return 0;
}
DRM_INFO("VCO frequency is %d KHz\n", vco_freq);
ret = hdmi_phy_config(mhdp, p_ctrl_table, p_pll_table, pclk_in);
if (ret == false)
return 0;
return char_freq;
}
static int hdmi_phy_power_up(struct cdns_mhdp_device *mhdp)
{
u32 val, i;
/* set Power State to A2 */
cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x0004);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_0, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_1, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_2, 1);
cdns_phy_reg_write(mhdp, TX_DIAG_ACYA_3, 1);
/* Wait for Power State A2 Ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_MODE_CTRL);
if (val & (1 << 6))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait A2 Ack failed\n");
return -1;
}
/* Power up ARC */
hdmi_arc_config(mhdp);
/* Configure PHY in A0 mode (PHY must be in the A0 power
* state in order to transmit data)
*/
//cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x0101); //imx8mq
cdns_phy_reg_write(mhdp, PHY_HDP_MODE_CTRL, 0x0001);
/* Wait for Power State A0 Ack */
for (i = 0; i < 10; i++) {
val = cdns_phy_reg_read(mhdp, PHY_HDP_MODE_CTRL);
if (val & (1 << 4))
break;
msleep(20);
}
if (i == 10) {
dev_err(mhdp->dev, "Wait A0 Ack failed\n");
return -1;
}
return 0;
}
bool cdns_hdmi_phy_video_valid_imx8mq(struct cdns_mhdp_device *mhdp)
{
u32 rate = mhdp->valid_mode->clock;
int i;
for (i = 0; i < ARRAY_SIZE(imx8mq_ctrl_table); i++)
if(rate == imx8mq_ctrl_table[i].pixel_clk_freq_min)
return true;
return false;
}
int cdns_hdmi_phy_set_imx8mq(struct cdns_mhdp_device *mhdp)
{
struct drm_display_mode *mode = &mhdp->mode;
int ret;
/* Check HDMI FW alive before HDMI PHY init */
ret = cdns_mhdp_check_alive(mhdp);
if (ret == false) {
DRM_ERROR("NO HDMI FW running\n");
return -ENXIO;
}
/* Configure PHY */
mhdp->hdmi.char_rate = hdmi_phy_cfg_t28hpc(mhdp, mode);
if (mhdp->hdmi.char_rate == 0) {
DRM_ERROR("failed to set phy pclock\n");
return -EINVAL;
}
ret = hdmi_phy_power_up(mhdp);
if (ret < 0)
return ret;
hdmi_phy_set_vswing(mhdp);
return true;
}
bool cdns_hdmi_phy_video_valid_imx8qm(struct cdns_mhdp_device *mhdp)
{
u32 rate = mhdp->valid_mode->clock;
int i;
for (i = 0; i < ARRAY_SIZE(imx8qm_ctrl_table); i++)
if(rate >= imx8qm_ctrl_table[i].pixel_clk_freq_min &&
rate <= imx8qm_ctrl_table[i].pixel_clk_freq_max)
return true;
return false;
}
int cdns_hdmi_phy_set_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct drm_display_mode *mode = &mhdp->mode;
int ret;
/* Check HDMI FW alive before HDMI PHY init */
ret = cdns_mhdp_check_alive(mhdp);
if (ret == false) {
DRM_ERROR("NO HDMI FW running\n");
return -ENXIO;
}
/* Configure PHY */
mhdp->hdmi.char_rate = hdmi_phy_cfg_ss28fdsoi(mhdp, mode);
if (mhdp->hdmi.char_rate == 0) {
DRM_ERROR("failed to set phy pclock\n");
return -EINVAL;
}
ret = hdmi_phy_power_up(mhdp);
if (ret < 0)
return ret;
hdmi_phy_set_vswing(mhdp);
return true;
}

View File

@ -0,0 +1,552 @@
/*
* copyright (c) 2019 nxp semiconductor, inc.
*
* this program is free software; you can redistribute it and/or modify
* it under the terms of the gnu general public license version 2 as
* published by the free software foundation.
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/pm_domain.h>
#include <linux/clk.h>
#include <drm/drmP.h>
#include "cdns-mhdp-imx.h"
#define PLL_800MHZ (800000000)
#define HDP_DUAL_MODE_MIN_PCLK_RATE 300000 /* KHz */
#define HDP_SINGLE_MODE_MAX_WIDTH 1920
#define CSR_PIXEL_LINK_MUX_CTL 0x00
#define CSR_PIXEL_LINK_MUX_VCP_OFFSET 5
#define CSR_PIXEL_LINK_MUX_HCP_OFFSET 4
static bool imx8qm_video_dual_mode(struct cdns_mhdp_device *mhdp)
{
struct drm_display_mode *mode = &mhdp->mode;
return (mode->clock > HDP_DUAL_MODE_MIN_PCLK_RATE ||
mode->hdisplay > HDP_SINGLE_MODE_MAX_WIDTH) ? true : false;
}
static void imx8qm_pixel_link_mux(struct imx_mhdp_device *imx_mhdp)
{
struct drm_display_mode *mode = &imx_mhdp->mhdp.mode;
bool dual_mode;
u32 val;
dual_mode = imx8qm_video_dual_mode(&imx_mhdp->mhdp);
val = 0x4; /* RGB */
if (dual_mode)
val |= 0x2; /* pixel link 0 and 1 are active */
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= 1 << CSR_PIXEL_LINK_MUX_VCP_OFFSET;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= 1 << CSR_PIXEL_LINK_MUX_HCP_OFFSET;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
val |= 0x2;
writel(val, imx_mhdp->mhdp.regs_sec);
}
static void imx8qm_pixel_link_valid(u32 dual_mode)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_PXL_LINK_MST1_VLD, 1);
if (dual_mode)
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_PXL_LINK_MST2_VLD, 1);
}
static void imx8qm_pixel_link_invalid(u32 dual_mode)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_PXL_LINK_MST1_VLD, 0);
if (dual_mode)
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_PXL_LINK_MST2_VLD, 0);
}
static void imx8qm_pixel_link_sync_enable(u32 dual_mode)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
if (dual_mode)
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_SYNC_CTRL, 3);
else
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_SYNC_CTRL0, 1);
}
static void imx8qm_pixel_link_sync_disable(u32 dual_mode)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
if (dual_mode)
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_SYNC_CTRL, 0);
else
imx_sc_misc_set_control(handle, IMX_SC_R_DC_0, IMX_SC_C_SYNC_CTRL0, 0);
}
static void imx8qm_phy_reset(u8 reset)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
/* set the pixel link mode and pixel type */
imx_sc_misc_set_control(handle, IMX_SC_R_HDMI, IMX_SC_C_PHY_RESET, reset);
}
static void imx8qm_clk_mux(u8 is_dp)
{
struct imx_sc_ipc *handle;
imx_scu_get_handle(&handle);
if (is_dp)
/* Enable the 24MHz for HDP PHY */
imx_sc_misc_set_control(handle, IMX_SC_R_HDMI, IMX_SC_C_MODE, 1);
else
imx_sc_misc_set_control(handle, IMX_SC_R_HDMI, IMX_SC_C_MODE, 0);
}
int imx8qm_clocks_init(struct imx_mhdp_device *imx_mhdp)
{
struct device *dev = imx_mhdp->mhdp.dev;
struct imx_hdp_clks *clks = &imx_mhdp->clks;
clks->dig_pll = devm_clk_get(dev, "dig_pll");
if (IS_ERR(clks->dig_pll)) {
dev_warn(dev, "failed to get dig pll clk\n");
return PTR_ERR(clks->dig_pll);
}
clks->av_pll = devm_clk_get(dev, "av_pll");
if (IS_ERR(clks->av_pll)) {
dev_warn(dev, "failed to get av pll clk\n");
return PTR_ERR(clks->av_pll);
}
clks->clk_ipg = devm_clk_get(dev, "clk_ipg");
if (IS_ERR(clks->clk_ipg)) {
dev_warn(dev, "failed to get dp ipg clk\n");
return PTR_ERR(clks->clk_ipg);
}
clks->clk_core = devm_clk_get(dev, "clk_core");
if (IS_ERR(clks->clk_core)) {
dev_warn(dev, "failed to get hdp core clk\n");
return PTR_ERR(clks->clk_core);
}
clks->clk_pxl = devm_clk_get(dev, "clk_pxl");
if (IS_ERR(clks->clk_pxl)) {
dev_warn(dev, "failed to get pxl clk\n");
return PTR_ERR(clks->clk_pxl);
}
clks->clk_pxl_mux = devm_clk_get(dev, "clk_pxl_mux");
if (IS_ERR(clks->clk_pxl_mux)) {
dev_warn(dev, "failed to get pxl mux clk\n");
return PTR_ERR(clks->clk_pxl_mux);
}
clks->clk_pxl_link = devm_clk_get(dev, "clk_pxl_link");
if (IS_ERR(clks->clk_pxl_mux)) {
dev_warn(dev, "failed to get pxl link clk\n");
return PTR_ERR(clks->clk_pxl_link);
}
clks->lpcg_hdp = devm_clk_get(dev, "lpcg_hdp");
if (IS_ERR(clks->lpcg_hdp)) {
dev_warn(dev, "failed to get lpcg hdp clk\n");
return PTR_ERR(clks->lpcg_hdp);
}
clks->lpcg_msi = devm_clk_get(dev, "lpcg_msi");
if (IS_ERR(clks->lpcg_msi)) {
dev_warn(dev, "failed to get lpcg msi clk\n");
return PTR_ERR(clks->lpcg_msi);
}
clks->lpcg_pxl = devm_clk_get(dev, "lpcg_pxl");
if (IS_ERR(clks->lpcg_pxl)) {
dev_warn(dev, "failed to get lpcg pxl clk\n");
return PTR_ERR(clks->lpcg_pxl);
}
clks->lpcg_vif = devm_clk_get(dev, "lpcg_vif");
if (IS_ERR(clks->lpcg_vif)) {
dev_warn(dev, "failed to get lpcg vif clk\n");
return PTR_ERR(clks->lpcg_vif);
}
clks->lpcg_lis = devm_clk_get(dev, "lpcg_lis");
if (IS_ERR(clks->lpcg_lis)) {
dev_warn(dev, "failed to get lpcg lis clk\n");
return PTR_ERR(clks->lpcg_lis);
}
clks->lpcg_apb = devm_clk_get(dev, "lpcg_apb");
if (IS_ERR(clks->lpcg_apb)) {
dev_warn(dev, "failed to get lpcg apb clk\n");
return PTR_ERR(clks->lpcg_apb);
}
clks->lpcg_apb_csr = devm_clk_get(dev, "lpcg_apb_csr");
if (IS_ERR(clks->lpcg_apb_csr)) {
dev_warn(dev, "failed to get apb csr clk\n");
return PTR_ERR(clks->lpcg_apb_csr);
}
clks->lpcg_apb_ctrl = devm_clk_get(dev, "lpcg_apb_ctrl");
if (IS_ERR(clks->lpcg_apb_ctrl)) {
dev_warn(dev, "failed to get lpcg apb ctrl clk\n");
return PTR_ERR(clks->lpcg_apb_ctrl);
}
clks->clk_i2s_bypass = devm_clk_get(dev, "clk_i2s_bypass");
if (IS_ERR(clks->clk_i2s_bypass)) {
dev_err(dev, "failed to get i2s bypass clk\n");
return PTR_ERR(clks->clk_i2s_bypass);
}
clks->lpcg_i2s = devm_clk_get(dev, "lpcg_i2s");
if (IS_ERR(clks->lpcg_i2s)) {
dev_err(dev, "failed to get lpcg i2s clk\n");
return PTR_ERR(clks->lpcg_i2s);
}
return true;
}
static int imx8qm_pixel_clk_enable(struct imx_mhdp_device *imx_mhdp)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
struct device *dev = imx_mhdp->mhdp.dev;
int ret;
ret = clk_prepare_enable(clks->av_pll);
if (ret < 0) {
dev_err(dev, "%s, pre av pll error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_pxl);
if (ret < 0) {
dev_err(dev, "%s, pre clk pxl error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_pxl_mux);
if (ret < 0) {
dev_err(dev, "%s, pre clk pxl mux error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_pxl_link);
if (ret < 0) {
dev_err(dev, "%s, pre clk pxl link error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_vif);
if (ret < 0) {
dev_err(dev, "%s, pre clk vif error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_pxl);
if (ret < 0) {
dev_err(dev, "%s, pre lpcg pxl error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_hdp);
if (ret < 0) {
dev_err(dev, "%s, pre lpcg hdp error\n", __func__);
return ret;
}
return ret;
}
static void imx8qm_pixel_clk_disable(struct imx_mhdp_device *imx_mhdp)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
clk_disable_unprepare(clks->lpcg_pxl);
clk_disable_unprepare(clks->lpcg_hdp);
clk_disable_unprepare(clks->lpcg_vif);
clk_disable_unprepare(clks->clk_pxl);
clk_disable_unprepare(clks->clk_pxl_link);
clk_disable_unprepare(clks->clk_pxl_mux);
clk_disable_unprepare(clks->av_pll);
}
static void imx8qm_pixel_clk_set_rate(struct imx_mhdp_device *imx_mhdp, u32 pclock)
{
bool dual_mode = imx8qm_video_dual_mode(&imx_mhdp->mhdp);
struct imx_hdp_clks *clks = &imx_mhdp->clks;
/* pixel clock for HDMI */
clk_set_rate(clks->av_pll, pclock);
if (dual_mode == true) {
clk_set_rate(clks->clk_pxl, pclock/2);
clk_set_rate(clks->clk_pxl_link, pclock/2);
} else {
clk_set_rate(clks->clk_pxl_link, pclock);
clk_set_rate(clks->clk_pxl, pclock);
}
clk_set_rate(clks->clk_pxl_mux, pclock);
}
static int imx8qm_ipg_clk_enable(struct imx_mhdp_device *imx_mhdp)
{
int ret;
struct imx_hdp_clks *clks = &imx_mhdp->clks;
struct device *dev = imx_mhdp->mhdp.dev;
ret = clk_prepare_enable(clks->dig_pll);
if (ret < 0) {
dev_err(dev, "%s, pre dig pll error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_ipg);
if (ret < 0) {
dev_err(dev, "%s, pre clk_ipg error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_core);
if (ret < 0) {
dev_err(dev, "%s, pre clk core error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_apb);
if (ret < 0) {
dev_err(dev, "%s, pre clk apb error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_lis);
if (ret < 0) {
dev_err(dev, "%s, pre clk lis error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_msi);
if (ret < 0) {
dev_err(dev, "%s, pre clk msierror\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_apb_csr);
if (ret < 0) {
dev_err(dev, "%s, pre clk apb csr error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_apb_ctrl);
if (ret < 0) {
dev_err(dev, "%s, pre clk apb ctrl error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->lpcg_i2s);
if (ret < 0) {
dev_err(dev, "%s, pre clk i2s error\n", __func__);
return ret;
}
ret = clk_prepare_enable(clks->clk_i2s_bypass);
if (ret < 0) {
dev_err(dev, "%s, pre clk i2s bypass error\n", __func__);
return ret;
}
return ret;
}
static void imx8qm_ipg_clk_set_rate(struct imx_mhdp_device *imx_mhdp)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
/* ipg/core clock */
clk_set_rate(clks->dig_pll, PLL_800MHZ);
clk_set_rate(clks->clk_core, PLL_800MHZ/4);
clk_set_rate(clks->clk_ipg, PLL_800MHZ/8);
}
static void imx8qm_detach_pm_domains(struct imx_mhdp_device *imx_mhdp)
{
if (imx_mhdp->pd_pll1_link && !IS_ERR(imx_mhdp->pd_pll1_link))
device_link_del(imx_mhdp->pd_pll1_link);
if (imx_mhdp->pd_pll1_dev && !IS_ERR(imx_mhdp->pd_pll1_dev))
dev_pm_domain_detach(imx_mhdp->pd_pll1_dev, true);
if (imx_mhdp->pd_pll0_link && !IS_ERR(imx_mhdp->pd_pll0_link))
device_link_del(imx_mhdp->pd_pll0_link);
if (imx_mhdp->pd_pll0_dev && !IS_ERR(imx_mhdp->pd_pll0_dev))
dev_pm_domain_detach(imx_mhdp->pd_pll0_dev, true);
if (imx_mhdp->pd_mhdp_link && !IS_ERR(imx_mhdp->pd_mhdp_link))
device_link_del(imx_mhdp->pd_mhdp_link);
if (imx_mhdp->pd_mhdp_dev && !IS_ERR(imx_mhdp->pd_mhdp_dev))
dev_pm_domain_detach(imx_mhdp->pd_mhdp_dev, true);
imx_mhdp->pd_mhdp_dev = NULL;
imx_mhdp->pd_mhdp_link = NULL;
imx_mhdp->pd_pll0_dev = NULL;
imx_mhdp->pd_pll0_link = NULL;
imx_mhdp->pd_pll1_dev = NULL;
imx_mhdp->pd_pll1_link = NULL;
}
static int imx8qm_attach_pm_domains(struct imx_mhdp_device *imx_mhdp)
{
struct device *dev = imx_mhdp->mhdp.dev;
u32 flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
int ret = 0;
imx_mhdp->pd_mhdp_dev = dev_pm_domain_attach_by_name(dev, "hdmi");
if (IS_ERR(imx_mhdp->pd_mhdp_dev)) {
ret = PTR_ERR(imx_mhdp->pd_mhdp_dev);
dev_err(dev, "Failed to attach dc pd dev: %d\n", ret);
goto fail;
}
imx_mhdp->pd_mhdp_link = device_link_add(dev, imx_mhdp->pd_mhdp_dev, flags);
if (IS_ERR(imx_mhdp->pd_mhdp_link)) {
ret = PTR_ERR(imx_mhdp->pd_mhdp_link);
dev_err(dev, "Failed to add device link to dc pd dev: %d\n",
ret);
goto fail;
}
imx_mhdp->pd_pll0_dev = dev_pm_domain_attach_by_name(dev, "pll0");
if (IS_ERR(imx_mhdp->pd_pll0_dev)) {
ret = PTR_ERR(imx_mhdp->pd_pll0_dev);
dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret);
goto fail;
}
imx_mhdp->pd_pll0_link = device_link_add(dev, imx_mhdp->pd_pll0_dev, flags);
if (IS_ERR(imx_mhdp->pd_pll0_link)) {
ret = PTR_ERR(imx_mhdp->pd_pll0_link);
dev_err(dev, "Failed to add device link to pll0 pd dev: %d\n",
ret);
goto fail;
}
imx_mhdp->pd_pll1_dev = dev_pm_domain_attach_by_name(dev, "pll1");
if (IS_ERR(imx_mhdp->pd_pll1_dev)) {
ret = PTR_ERR(imx_mhdp->pd_pll1_dev);
dev_err(dev, "Failed to attach pll0 pd dev: %d\n", ret);
goto fail;
}
imx_mhdp->pd_pll1_link = device_link_add(dev, imx_mhdp->pd_pll1_dev, flags);
if (IS_ERR(imx_mhdp->pd_pll1_link)) {
ret = PTR_ERR(imx_mhdp->pd_pll1_link);
dev_err(dev, "Failed to add device link to pll1 pd dev: %d\n",
ret);
goto fail;
}
fail:
imx8qm_detach_pm_domains(imx_mhdp);
return ret;
}
int cdns_mhdp_power_on_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp =
container_of(mhdp, struct imx_mhdp_device, mhdp);
/* Power on PM Domains */
imx8qm_attach_pm_domains(imx_mhdp);
/* clock init and rate set */
imx8qm_clocks_init(imx_mhdp);
imx8qm_ipg_clk_set_rate(imx_mhdp);
/* Init pixel clock with 148.5MHz before FW init */
imx8qm_pixel_clk_set_rate(imx_mhdp, 148500000);
imx8qm_ipg_clk_enable(imx_mhdp);
imx8qm_clk_mux(imx_mhdp->mhdp.plat_data->is_dp);
imx8qm_pixel_clk_enable(imx_mhdp);
imx8qm_phy_reset(1);
return 0;
}
void cdns_mhdp_plat_init_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp =
container_of(mhdp, struct imx_mhdp_device, mhdp);
bool dual_mode = imx8qm_video_dual_mode(&imx_mhdp->mhdp);
imx8qm_pixel_link_sync_disable(dual_mode);
imx8qm_pixel_link_invalid(dual_mode);
}
void cdns_mhdp_plat_deinit_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp =
container_of(mhdp, struct imx_mhdp_device, mhdp);
bool dual_mode = imx8qm_video_dual_mode(&imx_mhdp->mhdp);
imx8qm_pixel_link_valid(dual_mode);
imx8qm_pixel_link_sync_enable(dual_mode);
}
void cdns_mhdp_pclk_rate_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp =
container_of(mhdp, struct imx_mhdp_device, mhdp);
/* set pixel clock before video mode setup */
imx8qm_pixel_clk_disable(imx_mhdp);
imx8qm_pixel_clk_set_rate(imx_mhdp, imx_mhdp->mhdp.mode.clock * 1000);
imx8qm_pixel_clk_enable(imx_mhdp);
/* Config pixel link mux */
imx8qm_pixel_link_mux(imx_mhdp);
}
int cdns_mhdp_firmware_init_imx8qm(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp =
container_of(mhdp, struct imx_mhdp_device, mhdp);
u32 rate;
int ret;
/* configure HDMI/DP core clock */
rate = clk_get_rate(imx_mhdp->clks.clk_core);
if (mhdp->is_ls1028a)
rate = rate / 4;
cdns_mhdp_set_fw_clk(&imx_mhdp->mhdp, rate);
/* un-reset ucpu */
cdns_mhdp_bus_write(0, &imx_mhdp->mhdp, APB_CTRL);
DRM_INFO("Started firmware!\n");
ret = cdns_mhdp_check_alive(&imx_mhdp->mhdp);
if (ret == false) {
DRM_ERROR("NO HDMI FW running\n");
return -ENXIO;
}
/* turn on IP activity */
cdns_mhdp_set_firmware_active(&imx_mhdp->mhdp, 1);
DRM_INFO("HDP FW Version - ver %d verlib %d\n",
cdns_mhdp_bus_read(mhdp, VER_L) + (cdns_mhdp_bus_read(mhdp, VER_H) << 8),
cdns_mhdp_bus_read(mhdp, VER_LIB_H_ADDR) + (cdns_mhdp_bus_read(mhdp, VER_LIB_H_ADDR) << 8));
return 0;
}

View File

@ -0,0 +1,222 @@
/*
* copyright (c) 2019 nxp semiconductor, inc.
*
* this program is free software; you can redistribute it and/or modify
* it under the terms of the gnu general public license version 2 as
* published by the free software foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/component.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include "cdns-mhdp-imx.h"
#include "cdn-mhdp-phy.h"
#include "imx-drm.h"
static void cdns_mhdp_imx_encoder_disable(struct drm_encoder *encoder)
{
struct cdns_mhdp_device *mhdp = encoder->bridge->driver_private;
cdns_mhdp_plat_call(mhdp, plat_init);
}
static void cdns_mhdp_imx_encoder_enable(struct drm_encoder *encoder)
{
struct cdns_mhdp_device *mhdp = encoder->bridge->driver_private;
cdns_mhdp_plat_call(mhdp, plat_deinit);
}
static int cdns_mhdp_imx_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct cdns_mhdp_device *mhdp = encoder->bridge->driver_private;
if (mhdp->plat_data->video_format != 0)
imx_crtc_state->bus_format = mhdp->plat_data->video_format;
return 0;
}
static const struct drm_encoder_helper_funcs cdns_mhdp_imx_encoder_helper_funcs = {
.enable = cdns_mhdp_imx_encoder_enable,
.disable = cdns_mhdp_imx_encoder_disable,
.atomic_check = cdns_mhdp_imx_encoder_atomic_check,
};
static const struct drm_encoder_funcs cdns_mhdp_imx_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static struct cdns_plat_data imx8mq_hdmi_drv_data = {
.plat_name = "imx8mq-hdmi",
.bind = cdns_hdmi_bind,
.unbind = cdns_hdmi_unbind,
.phy_set = cdns_hdmi_phy_set_imx8mq,
.phy_video_valid = cdns_hdmi_phy_video_valid_imx8mq,
.bus_type = BUS_TYPE_NORMAL_APB,
};
static struct cdns_plat_data imx8mq_dp_drv_data = {
.plat_name = "imx8mq-dp",
.bind = cdns_dp_bind,
.unbind = cdns_dp_unbind,
.phy_set = cdns_dp_phy_set_imx8mq,
.bus_type = BUS_TYPE_NORMAL_APB,
};
static struct cdns_plat_data imx8qm_hdmi_drv_data = {
.plat_name = "imx8qm-hdmi",
.bind = cdns_hdmi_bind,
.unbind = cdns_hdmi_unbind,
.phy_set = cdns_hdmi_phy_set_imx8qm,
.phy_video_valid = cdns_hdmi_phy_video_valid_imx8qm,
.power_on = cdns_mhdp_power_on_imx8qm,
.firmware_init = cdns_mhdp_firmware_init_imx8qm,
.pclk_rate = cdns_mhdp_pclk_rate_imx8qm,
.plat_init = cdns_mhdp_plat_init_imx8qm,
.plat_deinit = cdns_mhdp_plat_deinit_imx8qm,
.bus_type = BUS_TYPE_LOW4K_APB,
.video_format = MEDIA_BUS_FMT_RGB101010_1X30,
};
static struct cdns_plat_data imx8qm_dp_drv_data = {
.plat_name = "imx8qm-dp",
.bind = cdns_dp_bind,
.unbind = cdns_dp_unbind,
.phy_set = cdns_dp_phy_set_imx8qm,
.power_on = cdns_mhdp_power_on_imx8qm,
.firmware_init = cdns_mhdp_firmware_init_imx8qm,
.pclk_rate = cdns_mhdp_pclk_rate_imx8qm,
.plat_init = cdns_mhdp_plat_init_imx8qm,
.plat_deinit = cdns_mhdp_plat_deinit_imx8qm,
.bus_type = BUS_TYPE_LOW4K_APB,
.video_format = MEDIA_BUS_FMT_RGB101010_1X30,
.is_dp = true,
};
static struct cdns_plat_data ls1028a_dp_drv_data = {
.bind = cdns_dp_bind,
.unbind = cdns_dp_unbind,
.phy_set = cdns_dp_phy_set_imx8mq,
.power_on = cdns_mhdp_power_on_ls1028a,
.firmware_init = cdns_mhdp_firmware_init_imx8qm,
.pclk_rate = cdns_mhdp_pclk_rate_ls1028a,
.bus_type = BUS_TYPE_NORMAL_APB,
};
static const struct of_device_id cdns_mhdp_imx_dt_ids[] = {
{ .compatible = "cdn,imx8mq-hdmi",
.data = &imx8mq_hdmi_drv_data
},
{ .compatible = "cdn,imx8mq-dp",
.data = &imx8mq_dp_drv_data
},
{ .compatible = "cdn,imx8qm-hdmi",
.data = &imx8qm_hdmi_drv_data
},
{ .compatible = "cdn,imx8qm-dp",
.data = &imx8qm_dp_drv_data
},
{ .compatible = "cdn,ls1028a-dp",
.data = &ls1028a_dp_drv_data
},
{},
};
MODULE_DEVICE_TABLE(of, cdns_mhdp_imx_dt_ids);
static int cdns_mhdp_imx_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct cdns_plat_data *plat_data;
const struct of_device_id *match;
struct drm_device *drm = data;
struct drm_encoder *encoder;
struct imx_mhdp_device *imx_mhdp;
int ret;
if (!pdev->dev.of_node)
return -ENODEV;
imx_mhdp = devm_kzalloc(&pdev->dev, sizeof(*imx_mhdp), GFP_KERNEL);
if (!imx_mhdp)
return -ENOMEM;
match = of_match_node(cdns_mhdp_imx_dt_ids, pdev->dev.of_node);
plat_data = match->data;
encoder = &imx_mhdp->encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
* supposed to be connected to, it's because the CRTC has
* not been registered yet. Defer probing, and hope that
* the required CRTC is added later.
*/
if (encoder->possible_crtcs == 0)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &cdns_mhdp_imx_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &cdns_mhdp_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
imx_mhdp->mhdp.plat_data = plat_data;
imx_mhdp->mhdp.dev = dev;
imx_mhdp->mhdp.bus_type = plat_data->bus_type;
ret = plat_data->bind(pdev, encoder, &imx_mhdp->mhdp);
/*
* If cdns_mhdp_bind() fails we'll never call cdns_mhdp_unbind(),
* which would have called the encoder cleanup. Do it manually.
*/
if (ret < 0)
drm_encoder_cleanup(encoder);
return ret;
}
static void cdns_mhdp_imx_unbind(struct device *dev, struct device *master,
void *data)
{
struct imx_mhdp_device *imx_mhdp = dev_get_drvdata(dev);
imx_mhdp->mhdp.plat_data->unbind(dev);
}
static const struct component_ops cdns_mhdp_imx_ops = {
.bind = cdns_mhdp_imx_bind,
.unbind = cdns_mhdp_imx_unbind,
};
static int cdns_mhdp_imx_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &cdns_mhdp_imx_ops);
}
static int cdns_mhdp_imx_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &cdns_mhdp_imx_ops);
return 0;
}
static struct platform_driver cdns_mhdp_imx_platform_driver = {
.probe = cdns_mhdp_imx_probe,
.remove = cdns_mhdp_imx_remove,
.driver = {
.name = "cdns-mhdp-imx",
.of_match_table = cdns_mhdp_imx_dt_ids,
},
};
module_platform_driver(cdns_mhdp_imx_platform_driver);
MODULE_AUTHOR("Sandor YU <sandor.yu@nxp.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdnhdmi-imx");

View File

@ -0,0 +1,110 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP
*
*/
#include <linux/clk.h>
#include <drm/drmP.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include "cdns-mhdp-imx.h"
static const struct of_device_id scfg_device_ids[] = {
{ .compatible = "fsl,ls1028a-scfg", },
{}
};
static void ls1028a_phy_reset(u8 reset)
{
struct device_node *scfg_node;
void __iomem *scfg_base = NULL;
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
iowrite32(reset, scfg_base + 0x230);
}
int ls1028a_clocks_init(struct imx_mhdp_device *imx_mhdp)
{
struct device *dev = imx_mhdp->mhdp.dev;
struct imx_hdp_clks *clks = &imx_mhdp->clks;
clks->clk_core = devm_clk_get(dev, "clk_core");
if (IS_ERR(clks->clk_core)) {
dev_warn(dev, "failed to get hdp core clk\n");
return PTR_ERR(clks->clk_core);
}
clks->clk_pxl = devm_clk_get(dev, "clk_pxl");
if (IS_ERR(clks->clk_pxl)) {
dev_warn(dev, "failed to get pxl clk\n");
return PTR_ERR(clks->clk_pxl);
}
return true;
}
static int ls1028a_pixel_clk_enable(struct imx_mhdp_device *imx_mhdp)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
struct device *dev = imx_mhdp->mhdp.dev;
int ret;
ret = clk_prepare_enable(clks->clk_pxl);
if (ret < 0) {
dev_err(dev, "%s, pre clk pxl error\n", __func__);
return ret;
}
return ret;
}
static void ls1028a_pixel_clk_disable(struct imx_mhdp_device *imx_mhdp)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
clk_disable_unprepare(clks->clk_pxl);
}
static void ls1028a_pixel_clk_set_rate(struct imx_mhdp_device *imx_mhdp,
u32 pclock)
{
struct imx_hdp_clks *clks = &imx_mhdp->clks;
clk_set_rate(clks->clk_pxl, pclock);
}
int cdns_mhdp_power_on_ls1028a(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp = container_of
(mhdp, struct imx_mhdp_device, mhdp);
/* clock init and rate set */
ls1028a_clocks_init(imx_mhdp);
ls1028a_pixel_clk_enable(imx_mhdp);
/* Init pixel clock with 148.5MHz before FW init */
ls1028a_pixel_clk_set_rate(imx_mhdp, 148500000);
ls1028a_phy_reset(1);
return 0;
}
void cdns_mhdp_pclk_rate_ls1028a(struct cdns_mhdp_device *mhdp)
{
struct imx_mhdp_device *imx_mhdp = container_of
(mhdp, struct imx_mhdp_device, mhdp);
/* set pixel clock before video mode setup */
ls1028a_pixel_clk_disable(imx_mhdp);
ls1028a_pixel_clk_set_rate(imx_mhdp, imx_mhdp->mhdp.mode.clock * 1000);
ls1028a_pixel_clk_enable(imx_mhdp);
}

View File

@ -0,0 +1,155 @@
/*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _CDN_DP_PHY_H
#define _CDN_DP_PHY_H
#include <drm/bridge/cdns-mhdp-common.h>
#define CMN_SSM_BIAS_TMR 0x0022
#define CMN_PLLSM0_PLLEN_TMR 0x0029
#define CMN_PLLSM0_PLLPRE_TMR 0x002A
#define CMN_PLLSM0_PLLVREF_TMR 0x002B
#define CMN_PLLSM0_PLLLOCK_TMR 0x002C
#define CMN_PLLSM0_USER_DEF_CTRL 0x002F
#define CMN_PSM_CLK_CTRL 0x0061
#define CMN_CDIAG_REFCLK_CTRL 0x0062
#define CMN_PLL0_VCOCAL_START 0x0081
#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084
#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085
#define CMN_PLL0_INTDIV 0x0094
#define CMN_PLL0_FRACDIV 0x0095
#define CMN_PLL0_HIGH_THR 0x0096
#define CMN_PLL0_DSM_DIAG 0x0097
#define CMN_PLL0_SS_CTRL1 0x0098
#define CMN_PLL0_SS_CTRL2 0x0099
#define CMN_ICAL_INIT_TMR 0x00C4
#define CMN_ICAL_ITER_TMR 0x00C5
#define CMN_RXCAL_INIT_TMR 0x00D4
#define CMN_RXCAL_ITER_TMR 0x00D5
#define CMN_TXPUCAL_CTRL 0x00E0
#define CMN_TXPUCAL_INIT_TMR 0x00E4
#define CMN_TXPUCAL_ITER_TMR 0x00E5
#define CMN_TXPDCAL_CTRL 0x00F0
#define CMN_TXPDCAL_INIT_TMR 0x00F4
#define CMN_TXPDCAL_ITER_TMR 0x00F5
#define CMN_ICAL_ADJ_INIT_TMR 0x0102
#define CMN_ICAL_ADJ_ITER_TMR 0x0103
#define CMN_RX_ADJ_INIT_TMR 0x0106
#define CMN_RX_ADJ_ITER_TMR 0x0107
#define CMN_TXPU_ADJ_CTRL 0x0108
#define CMN_TXPU_ADJ_INIT_TMR 0x010A
#define CMN_TXPU_ADJ_ITER_TMR 0x010B
#define CMN_TXPD_ADJ_CTRL 0x010c
#define CMN_TXPD_ADJ_INIT_TMR 0x010E
#define CMN_TXPD_ADJ_ITER_TMR 0x010F
#define CMN_DIAG_PLL0_FBH_OVRD 0x01C0
#define CMN_DIAG_PLL0_FBL_OVRD 0x01C1
#define CMN_DIAG_PLL0_OVRD 0x01C2
#define CMN_DIAG_PLL0_TEST_MODE 0x01C4
#define CMN_DIAG_PLL0_V2I_TUNE 0x01C5
#define CMN_DIAG_PLL0_CP_TUNE 0x01C6
#define CMN_DIAG_PLL0_LF_PROG 0x01C7
#define CMN_DIAG_PLL0_PTATIS_TUNE1 0x01C8
#define CMN_DIAG_PLL0_PTATIS_TUNE2 0x01C9
#define CMN_DIAG_PLL0_INCLK_CTRL 0x01CA
#define CMN_DIAG_PLL0_PXL_DIVH 0x01CB
#define CMN_DIAG_PLL0_PXL_DIVL 0x01CC
#define CMN_DIAG_HSCLK_SEL 0x01E0
#define CMN_DIAG_PER_CAL_ADJ 0x01EC
#define CMN_DIAG_CAL_CTRL 0x01ED
#define CMN_DIAG_ACYA 0x01FF
#define XCVR_PSM_RCTRL 0x4001
#define XCVR_PSM_CAL_TMR 0x4002
#define XCVR_PSM_A0IN_TMR 0x4003
#define TX_TXCC_CAL_SCLR_MULT_0 0x4047
#define TX_TXCC_CPOST_MULT_00_0 0x404C
#define TX_TXCC_MGNFS_MULT_000_0 0x4050
#define XCVR_DIAG_PLLDRC_CTRL 0x40E0
#define XCVR_DIAG_PLLDRC_CTRL 0x40E0
#define XCVR_DIAG_HSCLK_SEL 0x40E1
#define XCVR_DIAG_BIDI_CTRL 0x40E8
#define XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40F2
#define XCVR_DIAG_LANE_FCM_EN_MGN 0x40F2
#define TX_PSC_A0 0x4100
#define TX_PSC_A1 0x4101
#define TX_PSC_A2 0x4102
#define TX_PSC_A3 0x4103
#define TX_RCVDET_CTRL 0x4120
#define TX_RCVDET_EN_TMR 0x4122
#define TX_RCVDET_EN_TMR 0x4122
#define TX_RCVDET_ST_TMR 0x4123
#define TX_RCVDET_ST_TMR 0x4123
#define TX_BIST_CTRL 0x4140
#define TX_BIST_UDDWR 0x4141
#define TX_DIAG_TX_CTRL 0x41E0
#define TX_DIAG_TX_DRV 0x41E1
#define TX_DIAG_BGREF_PREDRV_DELAY 0x41E7
#define TX_DIAG_BGREF_PREDRV_DELAY 0x41E7
#define XCVR_PSM_RCTRL_1 0x4201
#define TX_TXCC_CAL_SCLR_MULT_1 0x4247
#define TX_TXCC_CPOST_MULT_00_1 0x424C
#define TX_TXCC_MGNFS_MULT_000_1 0x4250
#define XCVR_DIAG_PLLDRC_CTRL_1 0x42E0
#define XCVR_DIAG_HSCLK_SEL_1 0x42E1
#define XCVR_DIAG_LANE_FCM_EN_MGN_TMR_1 0x42F2
#define TX_RCVDET_EN_TMR_1 0x4322
#define TX_RCVDET_ST_TMR_1 0x4323
#define TX_DIAG_ACYA_0 0x41FF
#define TX_DIAG_ACYA_1 0x43FF
#define TX_DIAG_ACYA_2 0x45FF
#define TX_DIAG_ACYA_3 0x47FF
#define TX_ANA_CTRL_REG_1 0x5020
#define TX_ANA_CTRL_REG_2 0x5021
#define TXDA_COEFF_CALC 0x5022
#define TX_DIG_CTRL_REG_1 0x5023
#define TX_DIG_CTRL_REG_2 0x5024
#define TXDA_CYA_AUXDA_CYA 0x5025
#define TX_ANA_CTRL_REG_3 0x5026
#define TX_ANA_CTRL_REG_4 0x5027
#define TX_ANA_CTRL_REG_5 0x5029
#define RX_PSC_A0 0x8000
#define RX_PSC_CAL 0x8006
#define PMA_LANE_CFG 0xC000
#define PIPE_CMN_CTRL1 0xC001
#define PIPE_CMN_CTRL2 0xC002
#define PIPE_COM_LOCK_CFG1 0xC003
#define PIPE_COM_LOCK_CFG2 0xC004
#define PIPE_RCV_DET_INH 0xC005
#define PHY_HDP_MODE_CTRL 0xC008
#define PHY_HDP_CLK_CTL 0xC009
#define STS 0xC00F
#define PHY_ISO_CMN_CTRL 0xC010
#define PHY_ISO_CMN_CTRL 0xC010
#define PHY_HDP_TX_CTL_L0 0xC408
#define PHY_DP_TX_CTL 0xC408
#define PHY_HDP_TX_CTL_L1 0xC448
#define PHY_HDP_TX_CTL_L2 0xC488
#define PHY_HDP_TX_CTL_L3 0xC4C8
#define PHY_PMA_CMN_CTRL1 0xC800
#define PMA_CMN_CTRL1 0xC800
#define PHY_PMA_ISO_CMN_CTRL 0xC810
#define PHY_PMA_ISO_PLL_CTRL1 0xC812
#define PHY_PMA_ISOLATION_CTRL 0xC81F
#define PHY_ISOLATION_CTRL 0xC81F
#define PHY_PMA_ISO_XCVR_CTRL 0xCC11
#define PHY_PMA_ISO_LINK_MODE 0xCC12
#define PHY_PMA_ISO_PWRST_CTRL 0xCC13
#define PHY_PMA_ISO_TX_DATA_LO 0xCC14
#define PHY_PMA_ISO_TX_DATA_HI 0xCC15
#define PHY_PMA_ISO_RX_DATA_LO 0xCC16
#define PHY_PMA_ISO_RX_DATA_HI 0xCC17
int cdns_dp_phy_set_imx8mq(struct cdns_mhdp_device *hdp);
int cdns_dp_phy_set_imx8qm(struct cdns_mhdp_device *hdp);
bool cdns_hdmi_phy_video_valid_imx8mq(struct cdns_mhdp_device *hdp);
bool cdns_hdmi_phy_video_valid_imx8qm(struct cdns_mhdp_device *hdp);
int cdns_hdmi_phy_set_imx8mq(struct cdns_mhdp_device *hdp);
int cdns_hdmi_phy_set_imx8qm(struct cdns_mhdp_device *hdp);
#endif /* _CDNS_MHDP_PHY_H */

View File

@ -0,0 +1,71 @@
/*
* Cadence High-Definition Multimedia Interface (HDMI) driver
*
* Copyright (C) 2019 NXP Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#ifndef CDNS_MHDP_IMX_H_
#define CDNS_MHDP_IMX_H_
#include <drm/bridge/cdns-mhdp-common.h>
#include <drm/drm_encoder_slave.h>
struct imx_mhdp_device;
struct imx_hdp_clks {
struct clk *av_pll;
struct clk *dig_pll;
struct clk *clk_ipg;
struct clk *clk_core;
struct clk *clk_pxl;
struct clk *clk_pxl_mux;
struct clk *clk_pxl_link;
struct clk *lpcg_hdp;
struct clk *lpcg_msi;
struct clk *lpcg_pxl;
struct clk *lpcg_vif;
struct clk *lpcg_lis;
struct clk *lpcg_apb;
struct clk *lpcg_apb_csr;
struct clk *lpcg_apb_ctrl;
struct clk *lpcg_i2s;
struct clk *clk_i2s_bypass;
};
struct imx_mhdp_device {
struct cdns_mhdp_device mhdp;
struct drm_encoder encoder;
struct mutex audio_mutex;
spinlock_t audio_lock;
bool connected;
bool active;
bool suspended;
struct imx_hdp_clks clks;
int bus_type;
struct device *pd_mhdp_dev;
struct device *pd_pll0_dev;
struct device *pd_pll1_dev;
struct device_link *pd_mhdp_link;
struct device_link *pd_pll0_link;
struct device_link *pd_pll1_link;
};
void cdns_mhdp_plat_init_imx8qm(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_plat_deinit_imx8qm(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_pclk_rate_imx8qm(struct cdns_mhdp_device *mhdp);
int cdns_mhdp_firmware_init_imx8qm(struct cdns_mhdp_device *mhdp);
int cdns_mhdp_power_on_imx8qm(struct cdns_mhdp_device *mhdp);
int cdns_mhdp_power_on_ls1028a(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_pclk_rate_ls1028a(struct cdns_mhdp_device *mhdp);
#endif /* CDNS_MHDP_IMX_H_ */

View File

@ -0,0 +1,7 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select RESET_CONTROLLER
select IMX_IRQSTEER
help
Choose this if you have a NXP i.MX8MQ based system and want to use the
Display Controller Subsystem. This option enables DCSS support.

View File

@ -0,0 +1,7 @@
imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \
dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \
dcss-plane.o dcss-dec400d.o dcss-hdr10.o dcss-wrscl.o \
dcss-rdsrc.o dcss-dtrc.o
obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/of.h>
#include "dcss-dev.h"
#define DCSS_BLKCTL_RESET_CTRL 0x00
#define B_CLK_RESETN BIT(0)
#define APB_CLK_RESETN BIT(1)
#define P_CLK_RESETN BIT(2)
#define RTR_CLK_RESETN BIT(3)
#define DCSS_BLKCTL_CONTROL0 0x10
#define HDMI_MIPI_CLK_SEL BIT(0)
#define DISPMIX_REFCLK_SEL_POS 4
#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4)
#define DISPMIX_PIXCLK_SEL BIT(8)
#define HDMI_SRC_SECURE_EN BIT(16)
struct dcss_blkctl {
struct device *dev;
void __iomem *base_reg;
bool hdmi_output;
};
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl)
{
if (blkctl->hdmi_output)
dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
else
dcss_writel(DISPMIX_PIXCLK_SEL,
blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN,
blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
}
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
{
struct dcss_blkctl *blkctl;
blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL);
if (!blkctl)
return -ENOMEM;
blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K);
if (!blkctl->base_reg) {
dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
devm_kfree(dcss->dev, blkctl);
return -ENOMEM;
}
dcss->blkctl = blkctl;
blkctl->dev = dcss->dev;
blkctl->hdmi_output = dcss->hdmi_output;
dcss_blkctl_cfg(blkctl);
return 0;
}
void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
{
dcss_clr(P_CLK_RESETN | RTR_CLK_RESETN,
blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
if (blkctl->base_reg)
devm_iounmap(blkctl->dev, blkctl->base_reg);
devm_kfree(blkctl->dev, blkctl);
}

View File

@ -0,0 +1,256 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <linux/pm_runtime.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static int dcss_enable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = crtc->dev->dev_private;
if (dcss_crtc->irq_enabled)
return 0;
dcss_crtc->irq_enabled = true;
dcss_dtg_vblank_irq_enable(dcss->dtg, true);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
enable_irq(dcss_crtc->irq);
return 0;
}
static void dcss_disable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
disable_irq_nosync(dcss_crtc->irq);
dcss_dtg_vblank_irq_enable(dcss->dtg, false);
if (!dcss_dtrc_is_running(dcss->dtrc))
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
dcss_crtc->irq_enabled = false;
}
static const struct drm_crtc_funcs dcss_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = dcss_enable_vblank,
.disable_vblank = dcss_disable_vblank,
};
static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
drm_crtc_vblank_on(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
if (dcss_dtg_is_enabled(dcss->dtg))
dcss_ctxld_enable(dcss->ctxld);
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
drm_display_mode_to_videomode(mode, &vm);
pm_runtime_get_sync(dcss->dev);
dcss_enable_vblank(crtc);
vm.pixelclock = mode->crtc_clock * 1000;
dcss_dtg_sync_set(dcss->dtg, &vm);
dcss_ss_subsam_set(dcss->ss, dcss_crtc->output_is_yuv);
dcss_ss_sync_set(dcss->ss, &vm, mode->flags & DRM_MODE_FLAG_PHSYNC,
mode->flags & DRM_MODE_FLAG_PVSYNC);
dcss_dtg_css_set(dcss->dtg, dcss_crtc->output_is_yuv);
dcss_ss_enable(dcss->ss);
dcss_dtg_enable(dcss->dtg, true, NULL);
dcss_ctxld_enable(dcss->ctxld);
reinit_completion(&dcss_crtc->en_completion);
wait_for_completion_timeout(&dcss_crtc->en_completion,
msecs_to_jiffies(500));
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
dcss_ss_disable(dcss->ss);
dcss_dtg_enable(dcss->dtg, false, &dcss_crtc->dis_completion);
dcss_ctxld_enable(dcss->ctxld);
reinit_completion(&dcss_crtc->dis_completion);
wait_for_completion_timeout(&dcss_crtc->dis_completion,
msecs_to_jiffies(100));
drm_crtc_vblank_off(crtc);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
pm_runtime_put_sync(dcss->dev);
}
static const struct drm_crtc_helper_funcs dcss_helper_funcs = {
.atomic_begin = dcss_crtc_atomic_begin,
.atomic_flush = dcss_crtc_atomic_flush,
.atomic_enable = dcss_crtc_atomic_enable,
.atomic_disable = dcss_crtc_atomic_disable,
};
static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id)
{
struct dcss_crtc *dcss_crtc = dev_id;
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
if (!dcss_dtg_vblank_irq_valid(dcss->dtg))
return IRQ_HANDLED;
complete(&dcss_crtc->en_completion);
if (dcss_ctxld_is_flushed(dcss->ctxld))
drm_crtc_handle_vblank(&dcss_crtc->base);
dcss_dtg_vblank_irq_clear(dcss->dtg);
return IRQ_HANDLED;
}
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
{
struct dcss_dev *dcss = drm->dev_private;
struct platform_device *pdev = to_platform_device(dcss->dev);
int ret;
crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_PRIMARY, 2);
if (IS_ERR(crtc->plane[0]))
return PTR_ERR(crtc->plane[0]);
crtc->base.port = dcss->of_port;
drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base,
NULL, &dcss_crtc_funcs, NULL);
if (ret) {
dev_err(dcss->dev, "failed to init crtc\n");
return ret;
}
crtc->plane[1] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_OVERLAY, 1);
if (IS_ERR(crtc->plane[1]))
crtc->plane[1] = NULL;
crtc->plane[2] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_OVERLAY, 0);
if (IS_ERR(crtc->plane[2]))
crtc->plane[2] = NULL;
drm_plane_create_alpha_property(&crtc->plane[0]->base);
crtc->irq = platform_get_irq_byname(pdev, "vblank");
if (crtc->irq < 0) {
dev_err(dcss->dev, "unable to get vblank interrupt\n");
return crtc->irq;
}
init_completion(&crtc->en_completion);
init_completion(&crtc->dis_completion);
ret = devm_request_irq(dcss->dev, crtc->irq, dcss_crtc_irq_handler,
IRQF_TRIGGER_RISING, "dcss_drm", crtc);
if (ret) {
dev_err(dcss->dev, "irq request failed with %d.\n", ret);
return ret;
}
disable_irq(crtc->irq);
return 0;
}
void dcss_crtc_attach_color_mgmt_properties(struct dcss_crtc *crtc)
{
int i;
/* create color management properties only for video planes */
for (i = 1; i < 3; i++) {
if (crtc->plane[i]->type == DRM_PLANE_TYPE_PRIMARY)
return;
drm_plane_create_color_properties(&crtc->plane[i]->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_FULL_RANGE);
}
}
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm)
{
struct dcss_dev *dcss = drm->dev_private;
devm_free_irq(dcss->dev, crtc->irq, crtc);
}

View File

@ -0,0 +1,452 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include "dcss-dev.h"
#define DCSS_CTXLD_DEVNAME "dcss_ctxld"
#define DCSS_CTXLD_CONTROL_STATUS 0x0
#define CTXLD_ENABLE BIT(0)
#define ARB_SEL BIT(1)
#define RD_ERR_EN BIT(2)
#define DB_COMP_EN BIT(3)
#define SB_HP_COMP_EN BIT(4)
#define SB_LP_COMP_EN BIT(5)
#define DB_PEND_SB_REC_EN BIT(6)
#define SB_PEND_DISP_ACTIVE_EN BIT(7)
#define AHB_ERR_EN BIT(8)
#define RD_ERR BIT(16)
#define DB_COMP BIT(17)
#define SB_HP_COMP BIT(18)
#define SB_LP_COMP BIT(19)
#define DB_PEND_SB_REC BIT(20)
#define SB_PEND_DISP_ACTIVE BIT(21)
#define AHB_ERR BIT(22)
#define DCSS_CTXLD_DB_BASE_ADDR 0x10
#define DCSS_CTXLD_DB_COUNT 0x14
#define DCSS_CTXLD_SB_BASE_ADDR 0x18
#define DCSS_CTXLD_SB_COUNT 0x1C
#define SB_HP_COUNT_POS 0
#define SB_HP_COUNT_MASK 0xffff
#define SB_LP_COUNT_POS 16
#define SB_LP_COUNT_MASK 0xffff0000
#define DCSS_AHB_ERR_ADDR 0x20
#define CTXLD_IRQ_NAME "ctx_ld"
#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
/* The following sizes are in context loader entries, 8 bytes each. */
#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
CTXLD_SB_HP_CTX_ENTRIES)
/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
static u16 dcss_ctxld_ctx_size[3] = {
CTXLD_DB_CTX_ENTRIES,
CTXLD_SB_HP_CTX_ENTRIES,
CTXLD_SB_LP_CTX_ENTRIES
};
/* this represents an entry in the context loader map */
struct dcss_ctxld_item {
u32 val;
u32 ofs;
};
#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
struct dcss_ctxld {
struct device *dev;
void __iomem *ctxld_reg;
int irq;
bool irq_en;
struct dcss_ctxld_item *db[2];
struct dcss_ctxld_item *sb_hp[2];
struct dcss_ctxld_item *sb_lp[2];
dma_addr_t db_paddr[2];
dma_addr_t sb_paddr[2];
u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
u8 current_ctx;
bool in_use;
bool armed;
spinlock_t lock; /* protects concurent access to private data */
void (*dtg_disable_cb)(void *data);
void *dtg_disable_data;
};
static int __dcss_ctxld_enable(struct dcss_ctxld *ctxld);
static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
{
struct dcss_ctxld *ctxld = data;
u32 irq_status;
irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
if (irq_status & CTXLD_IRQ_COMPLETION &&
!(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
ctxld->in_use = false;
if (ctxld->dtg_disable_cb) {
ctxld->dtg_disable_cb(ctxld->dtg_disable_data);
ctxld->dtg_disable_cb = NULL;
ctxld->dtg_disable_data = NULL;
}
} else if (irq_status & CTXLD_IRQ_ERROR) {
/*
* Except for throwing an error message and clearing the status
* register, there's not much we can do here.
*/
dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
irq_status);
dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
}
dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
return IRQ_HANDLED;
}
static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
struct platform_device *pdev)
{
int ret;
ctxld->irq = platform_get_irq_byname(pdev, CTXLD_IRQ_NAME);
if (ctxld->irq < 0) {
dev_err(ctxld->dev, "ctxld: can't get irq number\n");
return ctxld->irq;
}
ret = devm_request_irq(ctxld->dev, ctxld->irq,
dcss_ctxld_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
DCSS_CTXLD_DEVNAME, ctxld);
if (ret) {
dev_err(ctxld->dev, "ctxld: irq request failed.\n");
return ret;
}
ctxld->irq_en = true;
return 0;
}
void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
{
dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
}
static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
for (i = 0; i < 2; i++) {
if (ctxld->db[i]) {
dmam_free_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
ctxld->db[i], ctxld->db_paddr[i]);
ctxld->db[i] = NULL;
ctxld->db_paddr[i] = 0;
}
if (ctxld->sb_hp[i]) {
dmam_free_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
ctxld->sb_hp[i], ctxld->sb_paddr[i]);
ctxld->sb_hp[i] = NULL;
ctxld->sb_paddr[i] = 0;
}
}
}
static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
dma_addr_t dma_handle;
for (i = 0; i < 2; i++) {
ctx = dmam_alloc_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
&dma_handle, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->db[i] = ctx;
ctxld->db_paddr[i] = dma_handle;
ctx = dmam_alloc_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
&dma_handle, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->sb_hp[i] = ctx;
ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
ctxld->sb_paddr[i] = dma_handle;
}
return 0;
}
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
{
struct dcss_ctxld *ctxld;
int ret;
ctxld = devm_kzalloc(dcss->dev, sizeof(struct dcss_ctxld),
GFP_KERNEL);
if (!ctxld)
return -ENOMEM;
dcss->ctxld = ctxld;
ctxld->dev = dcss->dev;
spin_lock_init(&ctxld->lock);
ret = dcss_ctxld_alloc_ctx(ctxld);
if (ret) {
dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
goto err;
}
ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
if (!ctxld->ctxld_reg) {
dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
ret = -ENOMEM;
goto err;
}
ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
dcss_ctxld_hw_cfg(ctxld);
return 0;
err_irq:
devm_iounmap(ctxld->dev, ctxld->ctxld_reg);
err:
dcss_ctxld_free_ctx(ctxld);
devm_kfree(ctxld->dev, ctxld);
return ret;
}
void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
{
devm_free_irq(ctxld->dev, ctxld->irq, ctxld);
if (ctxld->ctxld_reg)
devm_iounmap(ctxld->dev, ctxld->ctxld_reg);
dcss_ctxld_free_ctx(ctxld);
devm_kfree(ctxld->dev, ctxld);
}
static int __dcss_ctxld_enable(struct dcss_ctxld *ctxld)
{
int curr_ctx = ctxld->current_ctx;
u32 db_base, sb_base, sb_count;
u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
dcss_dpr_write_sysctrl(dcss->dpr);
dcss_scaler_write_sclctrl(dcss->scaler);
if (dcss_dtrc_is_running(dcss->dtrc)) {
dcss_dtrc_switch_banks(dcss->dtrc);
ctxld->armed = true;
}
sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
/* make sure SB_LP context area comes after SB_HP */
if (sb_lp_cnt &&
ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
struct dcss_ctxld_item *sb_lp_adjusted;
sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
sb_lp_cnt * CTX_ITEM_SIZE);
}
db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
if (sb_hp_cnt)
sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
else
sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
/* enable the context loader */
dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
ctxld->in_use = true;
/*
* Toggle the current context to the alternate one so that any updates
* in the modules' settings take place there.
*/
ctxld->current_ctx ^= 1;
ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
return 0;
}
int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
ctxld->armed = true;
spin_unlock_irqrestore(&ctxld->lock, flags);
return 0;
}
void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
if (ctxld->armed && !ctxld->in_use) {
ctxld->armed = false;
__dcss_ctxld_enable(ctxld);
}
spin_unlock_irqrestore(&ctxld->lock, flags);
}
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
u32 reg_ofs)
{
int curr_ctx = ctxld->current_ctx;
struct dcss_ctxld_item *ctx[] = {
[CTX_DB] = ctxld->db[curr_ctx],
[CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
[CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
};
int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
WARN_ON(1);
return;
}
ctx[ctx_id][item_idx].val = val;
ctx[ctx_id][item_idx].ofs = reg_ofs;
ctxld->ctx_size[curr_ctx][ctx_id] += 1;
}
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_ofs)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
spin_unlock_irqrestore(&ctxld->lock, flags);
}
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
{
return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
}
int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
{
dcss_ctxld_hw_cfg(ctxld);
if (!ctxld->irq_en) {
enable_irq(ctxld->irq);
ctxld->irq_en = true;
}
return 0;
}
int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
{
int ret = 0;
int wait_time_ms = 0;
unsigned long flags;
dcss_ctxld_kick(ctxld);
while (ctxld->in_use && wait_time_ms < 500) {
msleep(20);
wait_time_ms += 20;
}
if (wait_time_ms > 500)
return -ETIMEDOUT;
spin_lock_irqsave(&ctxld->lock, flags);
if (ctxld->irq_en) {
disable_irq_nosync(ctxld->irq);
ctxld->irq_en = false;
}
/* reset context region and sizes */
ctxld->current_ctx = 0;
ctxld->ctx_size[0][CTX_DB] = 0;
ctxld->ctx_size[0][CTX_SB_HP] = 0;
ctxld->ctx_size[0][CTX_SB_LP] = 0;
spin_unlock_irqrestore(&ctxld->lock, flags);
return ret;
}
void dcss_ctxld_register_dtg_disable_cb(struct dcss_ctxld *ctxld,
void (*cb)(void *),
void *data)
{
ctxld->dtg_disable_cb = cb;
ctxld->dtg_disable_data = data;
}

View File

@ -0,0 +1,270 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <drm/drm_fourcc.h>
#include "dcss-dev.h"
/* DEC400D registers offsets */
#define DEC400D_READCONFIG_BASE 0x800
#define DEC400D_READCONFIG(i) (DEC400D_READCONFIG_BASE + ((i) << 2))
#define COMPRESSION_ENABLE_BIT BIT(0)
#define COMPRESSION_FORMAT_POS 3
#define COMPRESSION_ALIGN_MODE_POS 16
#define TILE_ALIGN_MODE_POS 22
#define TILE_MODE_POS 25
#define DEC400D_READBUFFERBASE0 0x900
#define DEC400D_READCACHEBASE0 0x980
#define DEC400D_CONTROL 0xB00
#define DEC400D_CLEAR 0xB80
#define DEC400D_READBUFFERBASE0 0x900
#define DEC400D_READCACHEBASE0 0x980
#define DEC400D_CONTROL 0xB00
#define DISABLE_COMPRESSION_BIT BIT(1)
#define SHADOW_TRIGGER_BIT BIT(29)
#define DEC400_CFMT_ARGB8 0x0
#define DEC400_CFMT_XRGB8 0x1
#define DEC400_CFMT_AYUV 0x2
#define DEC400_CFMT_UYVY 0x3
#define DEC400_CFMT_YUY2 0x4
#define DEC400_CFMT_YUV_ONLY 0x5
#define DEC400_CFMT_UV_MIX 0x6
#define DEC400_CFMT_ARGB4 0x7
#define DEC400_CFMT_XRGB4 0x8
#define DEC400_CFMT_A1R5G5B5 0x9
#define DEC400_CFMT_X1R5G5B5 0xA
#define DEC400_CFMT_R5G6B5 0xB
#define DEC400_CFMT_Z24S8 0xC
#define DEC400_CFMT_Z24 0xD
#define DEC400_CFMT_Z16 0xE
#define DEC400_CFMT_A2R10G10B10 0xF
#define DEC400_CFMT_BAYER 0x10
#define DEC400_CFMT_SIGNED_BAYER 0x11
struct dcss_dec400d {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
bool bypass; /* bypass or decompress */
};
static void dcss_dec400d_write(struct dcss_dec400d *dec400d,
u32 value,
u32 offset)
{
dcss_ctxld_write(dec400d->ctxld, dec400d->ctx_id,
value, dec400d->base_ofs + offset);
}
int dcss_dec400d_init(struct dcss_dev *dcss, unsigned long dec400d_base)
{
struct dcss_dec400d *dec400d;
int ret;
dec400d = devm_kzalloc(dcss->dev, sizeof(*dec400d), GFP_KERNEL);
if (!dec400d)
return -ENOMEM;
dcss->dec400d = dec400d;
dec400d->dev = dcss->dev;
dec400d->ctxld = dcss->ctxld;
dec400d->base_reg = devm_ioremap(dcss->dev, dec400d_base, SZ_4K);
if (!dec400d->base_reg) {
dev_err(dcss->dev, "dec400d: unable to remap dec400d base\n");
ret = -ENOMEM;
goto free_mem;
}
dec400d->base_ofs = dec400d_base;
dec400d->ctx_id = CTX_SB_HP;
return 0;
free_mem:
devm_kfree(dcss->dev, dcss->dec400d);
return ret;
}
void dcss_dec400d_exit(struct dcss_dec400d *dec400d)
{
if (dec400d->base_reg)
devm_iounmap(dec400d->dev, dec400d->base_reg);
devm_kfree(dec400d->dev, dec400d);
}
void dcss_dec400d_read_config(struct dcss_dec400d *dec400d,
u32 read_id,
bool compress_en,
u32 compress_format)
{
u32 cformat = 0;
u32 read_config = 0x0;
/* TODO: using 'read_id' 0 by default */
if (read_id) {
WARN_ON(1);
return;
}
if (!compress_en)
goto config;
switch (compress_format) {
case _VIV_CFMT_ARGB8:
cformat = DEC400_CFMT_ARGB8;
break;
case _VIV_CFMT_XRGB8:
cformat = DEC400_CFMT_XRGB8;
break;
case _VIV_CFMT_AYUV:
cformat = DEC400_CFMT_AYUV;
break;
case _VIV_CFMT_UYVY:
cformat = DEC400_CFMT_UYVY;
break;
case _VIV_CFMT_YUY2:
cformat = DEC400_CFMT_YUY2;
break;
case _VIV_CFMT_YUV_ONLY:
cformat = DEC400_CFMT_YUV_ONLY;
break;
case _VIV_CFMT_UV_MIX:
cformat = DEC400_CFMT_UV_MIX;
break;
case _VIV_CFMT_ARGB4:
cformat = DEC400_CFMT_ARGB4;
break;
case _VIV_CFMT_XRGB4:
cformat = DEC400_CFMT_XRGB4;
break;
case _VIV_CFMT_A1R5G5B5:
cformat = DEC400_CFMT_A1R5G5B5;
break;
case _VIV_CFMT_X1R5G5B5:
cformat = DEC400_CFMT_X1R5G5B5;
break;
case _VIV_CFMT_R5G6B5:
cformat = DEC400_CFMT_R5G6B5;
break;
case _VIV_CFMT_Z24S8:
cformat = DEC400_CFMT_Z24S8;
break;
case _VIV_CFMT_Z24:
cformat = DEC400_CFMT_Z24;
break;
case _VIV_CFMT_Z16:
cformat = DEC400_CFMT_Z16;
break;
case _VIV_CFMT_A2R10G10B10:
cformat = DEC400_CFMT_A2R10G10B10;
break;
case _VIV_CFMT_BAYER:
cformat = DEC400_CFMT_BAYER;
break;
case _VIV_CFMT_SIGNED_BAYER:
cformat = DEC400_CFMT_SIGNED_BAYER;
break;
default:
/* TODO: not support yet */
WARN_ON(1);
return;
}
/* Dec compress format */
read_config |= cformat << COMPRESSION_FORMAT_POS;
/* ALIGN32_BYTE */
read_config |= 0x2 << COMPRESSION_ALIGN_MODE_POS;
/* TILE1_ALIGN */
read_config |= 0x0 << TILE_ALIGN_MODE_POS;
/* TILE8x4 */
read_config |= 0x3 << TILE_MODE_POS;
/* Compression Enable */
read_config |= COMPRESSION_ENABLE_BIT;
config:
dcss_dec400d_write(dec400d, read_config, DEC400D_READCONFIG(read_id));
}
void dcss_dec400d_bypass(struct dcss_dec400d *dec400d)
{
u32 control;
dcss_dec400d_read_config(dec400d, 0, false, 0);
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
dev_dbg(dec400d->dev, "%s: dec400d control = %#x\n", __func__, control);
control |= DISABLE_COMPRESSION_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
dec400d->bypass = true;
}
void dcss_dec400d_shadow_trig(struct dcss_dec400d *dec400d)
{
u32 control;
/* do nothing */
if (dec400d->bypass)
return;
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
}
void dcss_dec400d_addr_set(struct dcss_dec400d *dec400d, u32 baddr, u32 caddr)
{
/* set frame buffer base addr */
dcss_dec400d_write(dec400d, baddr, DEC400D_READBUFFERBASE0);
/* set tile status cache addr */
dcss_dec400d_write(dec400d, caddr, DEC400D_READCACHEBASE0);
dec400d->bypass = false;
}
void dcss_dec400d_fast_clear_config(struct dcss_dec400d *dec400d,
u32 fc_value,
bool enable)
{
dcss_dec400d_write(dec400d, fc_value, DEC400D_CLEAR);
}
void dcss_dec400d_enable(struct dcss_dec400d *dec400d)
{
u32 control;
if (dec400d->bypass)
return;
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
/* enable compression */
control &= ~(DISABLE_COMPRESSION_BIT);
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
}

View File

@ -0,0 +1,369 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/busfreq-imx.h>
#include <drm/drm_modeset_helper.h>
#include "dcss-dev.h"
static void dcss_clocks_enable(struct dcss_dev *dcss)
{
if (dcss->clks_on)
return;
if (dcss->hdmi_output) {
clk_prepare_enable(dcss->pll_phy_ref_clk);
clk_prepare_enable(dcss->pll_src_clk);
}
clk_prepare_enable(dcss->axi_clk);
clk_prepare_enable(dcss->apb_clk);
clk_prepare_enable(dcss->rtrm_clk);
clk_prepare_enable(dcss->dtrc_clk);
clk_prepare_enable(dcss->pix_clk);
dcss->clks_on = true;
}
static void dcss_clocks_disable(struct dcss_dev *dcss)
{
if (!dcss->clks_on)
return;
clk_disable_unprepare(dcss->pix_clk);
clk_disable_unprepare(dcss->dtrc_clk);
clk_disable_unprepare(dcss->rtrm_clk);
clk_disable_unprepare(dcss->apb_clk);
clk_disable_unprepare(dcss->axi_clk);
if (dcss->hdmi_output) {
clk_disable_unprepare(dcss->pll_src_clk);
clk_disable_unprepare(dcss->pll_phy_ref_clk);
}
dcss->clks_on = false;
}
static void dcss_busfreq_enable(struct dcss_dev *dcss)
{
if (dcss->bus_freq_on)
return;
request_bus_freq(BUS_FREQ_HIGH);
dcss->bus_freq_on = true;
}
static void dcss_busfreq_disable(struct dcss_dev *dcss)
{
if (!dcss->bus_freq_on)
return;
release_bus_freq(BUS_FREQ_HIGH);
dcss->bus_freq_on = false;
}
static int dcss_submodules_init(struct dcss_dev *dcss)
{
int ret = 0;
u32 base_addr = dcss->start_addr;
const struct dcss_type_data *devtype = dcss->devtype;
dcss_clocks_enable(dcss);
ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs);
if (ret)
return ret;
ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs);
if (ret)
goto ctxld_err;
ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs);
if (ret)
goto dtg_err;
ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs);
if (ret)
goto ss_err;
ret = dcss_dtrc_init(dcss, base_addr + devtype->dtrc_ofs);
if (ret)
goto dtrc_err;
ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs);
if (ret)
goto dpr_err;
ret = dcss_wrscl_init(dcss, base_addr + devtype->wrscl_ofs);
if (ret)
goto wrscl_err;
ret = dcss_rdsrc_init(dcss, base_addr + devtype->rdsrc_ofs);
if (ret)
goto rdsrc_err;
ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs);
if (ret)
goto scaler_err;
ret = dcss_dec400d_init(dcss, base_addr + devtype->dec400d_ofs);
if (ret)
goto dec400d_err;
ret = dcss_hdr10_init(dcss, base_addr + devtype->hdr10_ofs);
if (ret)
goto hdr10_err;
return 0;
hdr10_err:
dcss_dec400d_exit(dcss->dec400d);
dec400d_err:
dcss_scaler_exit(dcss->scaler);
scaler_err:
dcss_rdsrc_exit(dcss->rdsrc);
rdsrc_err:
dcss_wrscl_exit(dcss->wrscl);
wrscl_err:
dcss_dpr_exit(dcss->dpr);
dpr_err:
dcss_dtrc_exit(dcss->dtrc);
dtrc_err:
dcss_ss_exit(dcss->ss);
ss_err:
dcss_dtg_exit(dcss->dtg);
dtg_err:
dcss_ctxld_exit(dcss->ctxld);
ctxld_err:
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
return ret;
}
static void dcss_submodules_stop(struct dcss_dev *dcss)
{
dcss_clocks_enable(dcss);
dcss_hdr10_exit(dcss->hdr10);
dcss_dec400d_exit(dcss->dec400d);
dcss_scaler_exit(dcss->scaler);
dcss_rdsrc_exit(dcss->rdsrc);
dcss_wrscl_exit(dcss->wrscl);
dcss_dpr_exit(dcss->dpr);
dcss_dtrc_exit(dcss->dtrc);
dcss_ss_exit(dcss->ss);
dcss_dtg_exit(dcss->dtg);
dcss_ctxld_exit(dcss->ctxld);
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
}
static int dcss_clks_init(struct dcss_dev *dcss)
{
int i;
struct {
const char *id;
struct clk **clk;
bool required;
} clks[] = {
{"apb", &dcss->apb_clk, true},
{"axi", &dcss->axi_clk, true},
{"pix", &dcss->pix_clk, true},
{"rtrm", &dcss->rtrm_clk, true},
{"dtrc", &dcss->dtrc_clk, true},
{"pll_src", &dcss->pll_src_clk, dcss->hdmi_output},
{"pll_phy_ref", &dcss->pll_phy_ref_clk, dcss->hdmi_output},
};
for (i = 0; i < ARRAY_SIZE(clks); i++) {
*clks[i].clk = devm_clk_get(dcss->dev, clks[i].id);
if (IS_ERR(*clks[i].clk) && clks[i].required) {
dev_err(dcss->dev, "failed to get %s clock\n",
clks[i].id);
return PTR_ERR(*clks[i].clk);
}
}
dcss->clks_on = false;
return 0;
}
static void dcss_clks_release(struct dcss_dev *dcss)
{
devm_clk_put(dcss->dev, dcss->dtrc_clk);
devm_clk_put(dcss->dev, dcss->rtrm_clk);
devm_clk_put(dcss->dev, dcss->pix_clk);
devm_clk_put(dcss->dev, dcss->axi_clk);
devm_clk_put(dcss->dev, dcss->apb_clk);
}
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
struct resource *res;
struct dcss_dev *dcss;
const struct dcss_type_data *devtype;
devtype = of_device_get_match_data(dev);
if (!devtype) {
dev_err(dev, "no device match found\n");
return ERR_PTR(-ENODEV);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "cannot get memory resource\n");
return ERR_PTR(-EINVAL);
}
dcss = devm_kzalloc(dev, sizeof(struct dcss_dev), GFP_KERNEL);
if (!dcss)
return ERR_PTR(-ENOMEM);
dcss->dev = dev;
dcss->devtype = devtype;
dcss->hdmi_output = hdmi_output;
ret = dcss_clks_init(dcss);
if (ret) {
dev_err(dev, "clocks initialization failed\n");
goto err;
}
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
if (!dcss->of_port) {
dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
ret = -ENODEV;
goto clks_err;
}
dcss->start_addr = res->start;
ret = dcss_submodules_init(dcss);
if (ret) {
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
pm_runtime_enable(dev);
return dcss;
clks_err:
dcss_clks_release(dcss);
err:
devm_kfree(dcss->dev, dcss);
return ERR_PTR(ret);
}
void dcss_dev_destroy(struct dcss_dev *dcss)
{
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
dcss_clks_release(dcss);
devm_kfree(dcss->dev, dcss);
}
#ifdef CONFIG_PM_SLEEP
int dcss_dev_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
drm_mode_config_helper_suspend(dcss_drv_dev_to_drm(dev));
if (pm_runtime_suspended(dev))
return 0;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
dcss_busfreq_disable(dcss);
return 0;
}
int dcss_dev_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
if (pm_runtime_suspended(dev)) {
drm_mode_config_helper_resume(dcss_drv_dev_to_drm(dev));
return 0;
}
dcss_busfreq_enable(dcss);
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
drm_mode_config_helper_resume(dcss_drv_dev_to_drm(dev));
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
int dcss_dev_runtime_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
dcss_busfreq_disable(dcss);
return 0;
}
int dcss_dev_runtime_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
dcss_busfreq_enable(dcss);
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
return 0;
}
#endif /* CONFIG_PM */

View File

@ -0,0 +1,359 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef __DCSS_PRV_H__
#define __DCSS_PRV_H__
#include <drm/drm_atomic.h>
#include <drm/drm_fourcc.h>
#include <linux/io.h>
#include <video/videomode.h>
#define SET 0x04
#define CLR 0x08
#define TGL 0x0C
#define dcss_writel(v, c) writel((v), (c))
#define dcss_readl(c) readl(c)
#define dcss_set(v, c) writel((v), (c) + SET)
#define dcss_clr(v, c) writel((v), (c) + CLR)
#define dcss_toggle(v, c) writel((v), (c) + TGL)
static inline void dcss_update(u32 v, u32 m, void __iomem *c)
{
writel((readl(c) & ~(m)) | (v), (c));
}
#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg}
enum {
DCSS_IMX8MQ = 0,
};
struct dcss_type_data {
const char *name;
u32 blkctl_ofs;
u32 ctxld_ofs;
u32 rdsrc_ofs;
u32 wrscl_ofs;
u32 dtg_ofs;
u32 scaler_ofs;
u32 ss_ofs;
u32 dpr_ofs;
u32 dtrc_ofs;
u32 dec400d_ofs;
u32 hdr10_ofs;
};
struct dcss_debug_reg {
char *name;
u32 ofs;
};
enum dcss_ctxld_ctx_type {
CTX_DB,
CTX_SB_HP, /* high-priority */
CTX_SB_LP, /* low-priority */
};
struct dcss_dev {
struct device *dev;
const struct dcss_type_data *devtype;
struct device_node *of_port;
u32 start_addr;
struct dcss_blkctl *blkctl;
struct dcss_ctxld *ctxld;
struct dcss_dpr *dpr;
struct dcss_dtg *dtg;
struct dcss_ss *ss;
struct dcss_hdr10 *hdr10;
struct dcss_scaler *scaler;
struct dcss_dtrc *dtrc;
struct dcss_dec400d *dec400d;
struct dcss_wrscl *wrscl;
struct dcss_rdsrc *rdsrc;
struct clk *apb_clk;
struct clk *axi_clk;
struct clk *pix_clk;
struct clk *rtrm_clk;
struct clk *dtrc_clk;
struct clk *pll_src_clk;
struct clk *pll_phy_ref_clk;
void (*dcss_disable_callback)(void *data);
bool clks_on;
bool bus_freq_on;
bool hdmi_output;
};
enum dcss_color_space {
DCSS_COLORSPACE_RGB,
DCSS_COLORSPACE_YUV,
DCSS_COLORSPACE_UNKNOWN,
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
struct dcss_dev *dcss_dev_create(struct device *dev, bool mipi_output);
void dcss_dev_destroy(struct dcss_dev *dcss);
int dcss_dev_runtime_suspend(struct device *dev);
int dcss_dev_runtime_resume(struct device *dev);
int dcss_dev_suspend(struct device *dev);
int dcss_dev_resume(struct device *dev);
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
/* CTXLD */
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
void dcss_ctxld_exit(struct dcss_ctxld *ctxld);
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_idx);
int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld);
int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld);
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val,
u32 reg_ofs);
void dcss_ctxld_kick(struct dcss_ctxld *ctxld);
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld);
int dcss_ctxld_enable(struct dcss_ctxld *ctxld);
void dcss_ctxld_register_dtg_disable_cb(struct dcss_ctxld *ctxld,
void (*cb)(void *),
void *data);
void dcss_ctxld_register_dtrc_cb(struct dcss_ctxld *ctxld,
bool (*cb)(void *),
void *data);
/* DPR */
enum dcss_tile_type {
TILE_LINEAR = 0,
TILE_GPU_STANDARD,
TILE_GPU_SUPER,
TILE_VPU_YUV420,
TILE_VPU_VP9,
};
enum dcss_pix_size {
PIX_SIZE_8,
PIX_SIZE_16,
PIX_SIZE_32,
};
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base);
void dcss_dpr_exit(struct dcss_dpr *dpr);
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr);
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres);
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch);
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en);
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier);
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation);
/* DTG */
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base);
void dcss_dtg_exit(struct dcss_dtg *dtg);
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg);
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en);
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg);
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm);
void dcss_dtg_css_set(struct dcss_dtg *dtg, bool out_is_yuv);
void dcss_dtg_enable(struct dcss_dtg *dtg, bool en,
struct completion *dis_completion);
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg);
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en);
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha);
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha);
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph);
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en);
/* SUBSAM */
int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base);
void dcss_ss_exit(struct dcss_ss *ss);
void dcss_ss_enable(struct dcss_ss *ss);
void dcss_ss_disable(struct dcss_ss *ss);
void dcss_ss_subsam_set(struct dcss_ss *ss, bool output_is_yuv);
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync);
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz);
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en);
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max);
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl);
/* DEC400D */
#define VIV_VIDMEM_METADATA_MAGIC fourcc_code('v', 'i', 'v', 'm')
/* Compressed format now was defined same as dec400d, should be general. */
typedef enum _VIV_COMPRESS_FMT
{
_VIV_CFMT_ARGB8 = 0,
_VIV_CFMT_XRGB8,
_VIV_CFMT_AYUV,
_VIV_CFMT_UYVY,
_VIV_CFMT_YUY2,
_VIV_CFMT_YUV_ONLY,
_VIV_CFMT_UV_MIX,
_VIV_CFMT_ARGB4,
_VIV_CFMT_XRGB4,
_VIV_CFMT_A1R5G5B5,
_VIV_CFMT_X1R5G5B5,
_VIV_CFMT_R5G6B5,
_VIV_CFMT_Z24S8,
_VIV_CFMT_Z24,
_VIV_CFMT_Z16,
_VIV_CFMT_A2R10G10B10,
_VIV_CFMT_BAYER,
_VIV_CFMT_SIGNED_BAYER,
_VIV_CFMT_VAA16,
_VIV_CFMT_S8,
_VIV_CFMT_MAX,
} _VIV_COMPRESS_FMT;
/* Metadata for cross-device fd share with additional (ts) info. */
typedef struct _VIV_VIDMEM_METADATA
{
uint32_t magic;
int32_t ts_fd;
void * ts_dma_buf;
uint32_t fc_enabled;
uint32_t fc_value;
uint32_t fc_value_upper;
uint32_t compressed;
uint32_t compress_format;
} _VIV_VIDMEM_METADATA;
int dcss_dec400d_init(struct dcss_dev *dcss, unsigned long dec400d_base);
void dcss_dec400d_exit(struct dcss_dec400d *dec400d);
void dcss_dec400d_bypass(struct dcss_dec400d *dec400d);
void dcss_dec400d_shadow_trig(struct dcss_dec400d *dec400d);
void dcss_dec400d_enable(struct dcss_dec400d *dec400d);
void dcss_dec400d_fast_clear_config(struct dcss_dec400d *dec400d,
u32 fc_value,
bool enable);
void dcss_dec400d_read_config(struct dcss_dec400d *dec400d,
u32 read_id,
bool compress_en,
u32 compress_format);
void dcss_dec400d_addr_set(struct dcss_dec400d *dec400d, u32 baddr, u32 caddr);
/* HDR10 */
enum dcss_hdr10_nonlinearity {
NL_REC2084,
NL_REC709,
NL_BT1886,
NL_2100HLG,
NL_SRGB,
};
enum dcss_hdr10_pixel_range {
PR_LIMITED,
PR_FULL,
};
enum dcss_hdr10_gamut {
G_REC2020,
G_REC709,
G_REC601_NTSC,
G_REC601_PAL,
G_ADOBE_ARGB,
};
struct dcss_hdr10_pipe_cfg {
bool is_yuv;
enum dcss_hdr10_nonlinearity nl;
enum dcss_hdr10_pixel_range pr;
enum dcss_hdr10_gamut g;
};
int dcss_hdr10_init(struct dcss_dev *dcss, unsigned long hdr10_base);
void dcss_hdr10_exit(struct dcss_hdr10 *hdr10);
void dcss_hdr10_setup(struct dcss_hdr10 *hdr10, int ch_num,
struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg);
/* enums common to both WRSCL and RDSRC */
enum dcss_wrscl_rdsrc_psize {
PSIZE_64,
PSIZE_128,
PSIZE_256,
PSIZE_512,
PSIZE_1024,
PSIZE_2048,
PSIZE_4096,
};
enum dcss_wrscl_rdsrc_tsize {
TSIZE_64,
TSIZE_128,
TSIZE_256,
TSIZE_512,
};
enum dcss_wrscl_rdsrc_fifo_size {
FIFO_512,
FIFO_1024,
FIFO_2048,
FIFO_4096,
};
enum dcss_wrscl_rdsrc_bpp {
BPP_38, /* 38 bit unpacked components */
BPP_32_UPCONVERT,
BPP_32_10BIT_OUTPUT,
BPP_20, /* 10-bit YUV422 */
BPP_16, /* 8-bit YUV422 */
};
/* WRSCL */
int dcss_wrscl_init(struct dcss_dev *dcss, unsigned long wrscl_base);
void dcss_wrscl_exit(struct dcss_wrscl *wrscl);
u32 dcss_wrscl_setup(struct dcss_wrscl *wrscl, u32 pix_format, u32 pix_clk_hz,
u32 dst_xres, u32 dst_yres);
void dcss_wrscl_enable(struct dcss_wrscl *wrscl);
void dcss_wrscl_disable(struct dcss_wrscl *wrscl);
/* RDSRC */
int dcss_rdsrc_init(struct dcss_dev *dcss, unsigned long rdsrc_base);
void dcss_rdsrc_exit(struct dcss_rdsrc *rdsrc);
void dcss_rdsrc_setup(struct dcss_rdsrc *rdsrc, u32 pix_format, u32 dst_xres,
u32 dst_yres, u32 base_addr);
void dcss_rdsrc_enable(struct dcss_rdsrc *rdsrc);
void dcss_rdsrc_disable(struct dcss_rdsrc *rdsrc);
/* DTRC */
int dcss_dtrc_init(struct dcss_dev *dcss, unsigned long dtrc_base);
void dcss_dtrc_exit(struct dcss_dtrc *dtrc);
void dcss_dtrc_bypass(struct dcss_dtrc *dtrc, int ch_num);
void dcss_dtrc_set_format_mod(struct dcss_dtrc *dtrc, int ch_num, u64 modifier);
void dcss_dtrc_addr_set(struct dcss_dtrc *dtrc, int ch_num,
u32 p1_ba, u32 p2_ba, uint64_t dec_table_ofs);
bool dcss_dtrc_ch_running(struct dcss_dtrc *dtrc, int ch_num);
bool dcss_dtrc_is_running(struct dcss_dtrc *dtrc);
void dcss_dtrc_enable(struct dcss_dtrc *dtrc, int ch_num, bool enable);
void dcss_dtrc_set_res(struct dcss_dtrc *dtrc, int ch_num,
struct drm_plane_state *state, u32 *dtrc_w, u32 *dtrc_h);
void dcss_dtrc_switch_banks(struct dcss_dtrc *dtrc);
#endif /* __DCSS_PRV_H__ */

View File

@ -0,0 +1,571 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_DPR_SYSTEM_CTRL0 0x000
#define RUN_EN BIT(0)
#define SOFT_RESET BIT(1)
#define REPEAT_EN BIT(2)
#define SHADOW_LOAD_EN BIT(3)
#define SW_SHADOW_LOAD_SEL BIT(4)
#define BCMD2AXI_MSTR_ID_CTRL BIT(16)
#define DCSS_DPR_IRQ_MASK 0x020
#define DCSS_DPR_IRQ_MASK_STATUS 0x030
#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040
#define IRQ_DPR_CTRL_DONE BIT(0)
#define IRQ_DPR_RUN BIT(1)
#define IRQ_DPR_SHADOW_LOADED BIT(2)
#define IRQ_AXI_READ_ERR BIT(3)
#define DPR2RTR_YRGB_FIFO_OVFL BIT(4)
#define DPR2RTR_UV_FIFO_OVFL BIT(5)
#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6)
#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7)
#define DCSS_DPR_MODE_CTRL0 0x050
#define RTR_3BUF_EN BIT(0)
#define RTR_4LINE_BUF_EN BIT(1)
#define TILE_TYPE_POS 2
#define TILE_TYPE_MASK GENMASK(4, 2)
#define YUV_EN BIT(6)
#define COMP_2PLANE_EN BIT(7)
#define PIX_SIZE_POS 8
#define PIX_SIZE_MASK GENMASK(9, 8)
#define PIX_LUMA_UV_SWAP BIT(10)
#define PIX_UV_SWAP BIT(11)
#define B_COMP_SEL_POS 12
#define B_COMP_SEL_MASK GENMASK(13, 12)
#define G_COMP_SEL_POS 14
#define G_COMP_SEL_MASK GENMASK(15, 14)
#define R_COMP_SEL_POS 16
#define R_COMP_SEL_MASK GENMASK(17, 16)
#define A_COMP_SEL_POS 18
#define A_COMP_SEL_MASK GENMASK(19, 18)
#define DCSS_DPR_FRAME_CTRL0 0x070
#define HFLIP_EN BIT(0)
#define VFLIP_EN BIT(1)
#define ROT_ENC_POS 2
#define ROT_ENC_MASK GENMASK(3, 2)
#define ROT_FLIP_ORDER_EN BIT(4)
#define PITCH_POS 16
#define PITCH_MASK GENMASK(31, 16)
#define DCSS_DPR_FRAME_1P_CTRL0 0x090
#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0
#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0
#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0
#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0
#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0
#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100
#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110
#define DCSS_DPR_STATUS_CTRL0 0x130
#define STATUS_MUX_SEL_MASK GENMASK(2, 0)
#define STATUS_SRC_SEL_POS 16
#define STATUS_SRC_SEL_MASK GENMASK(18, 16)
#define DCSS_DPR_STATUS_CTRL1 0x140
#define DCSS_DPR_RTRAM_CTRL0 0x200
#define NUM_ROWS_ACTIVE BIT(0)
#define THRES_HIGH_POS 1
#define THRES_HIGH_MASK GENMASK(3, 1)
#define THRES_LOW_POS 4
#define THRES_LOW_MASK GENMASK(6, 4)
#define ABORT_SEL BIT(7)
struct dcss_dpr_ch {
struct dcss_dpr *dpr;
void __iomem *base_reg;
u32 base_ofs;
struct drm_format_info format;
enum dcss_pix_size pix_size;
enum dcss_tile_type tile;
bool rtram_4line_en;
bool rtram_3buf_en;
u32 frame_ctrl;
u32 mode_ctrl;
u32 sys_ctrl;
u32 rtram_ctrl;
bool sys_ctrl_chgd;
u32 pitch;
int ch_num;
int irq;
bool use_dtrc;
};
struct dcss_dpr {
struct device *dev;
struct dcss_dtrc *dtrc;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_dpr_ch ch[3];
};
static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs)
{
struct dcss_dpr *dpr = ch->dpr;
dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
{
struct dcss_dpr_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &dpr->ch[i];
ch->base_ofs = dpr_base + i * 0x1000;
ch->base_reg = devm_ioremap(dpr->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
i);
return -ENOMEM;
}
ch->dpr = dpr;
ch->ch_num = i;
}
return 0;
}
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
{
struct dcss_dpr *dpr;
dpr = devm_kzalloc(dcss->dev, sizeof(struct dcss_dpr), GFP_KERNEL);
if (!dpr)
return -ENOMEM;
dcss->dpr = dpr;
dpr->dev = dcss->dev;
dpr->ctxld = dcss->ctxld;
dpr->ctx_id = CTX_SB_HP;
dpr->dtrc = dcss->dtrc;
if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
int i;
for (i = 0; i < 3; i++) {
if (dpr->ch[i].base_reg)
devm_iounmap(dpr->dev, dpr->ch[i].base_reg);
}
devm_kfree(dpr->dev, dpr);
return -ENOMEM;
}
return 0;
}
void dcss_dpr_exit(struct dcss_dpr *dpr)
{
int ch_no;
/* stop DPR on all channels */
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
if (ch->base_reg)
devm_iounmap(dpr->dev, ch->base_reg);
}
devm_kfree(dpr->dev, dpr);
}
static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
u32 pix_format)
{
u8 pix_in_64byte_map[3][5] = {
/* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */
{ 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */
{ 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */
{ 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */
};
u32 offset;
u32 div_64byte_mod, pix_in_64byte;
pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile];
if (pix_format == DRM_FORMAT_NV12_10LE40)
pix_wide = pix_wide * 10 / 8;
div_64byte_mod = pix_wide % pix_in_64byte;
offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod);
return pix_wide + offset;
}
static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high,
u32 pix_format)
{
u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8;
u32 offset, pix_y_mod;
pix_y_mod = pix_high % num_rows_buf;
offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0;
return pix_high + offset;
}
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 pix_format = ch->format.format;
u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR;
int plane, max_planes = 1;
u32 pix_x_wide, pix_y_high;
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40)
max_planes = 2;
for (plane = 0; plane < max_planes; plane++) {
yres = plane == 1 ? yres >> 1 : yres;
pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format);
pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format);
if (plane == 0)
ch->pitch = pix_x_wide;
dcss_dpr_write(ch, pix_x_wide,
DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap);
dcss_dpr_write(ch, pix_y_high,
DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap);
dcss_dpr_write(ch, ch->use_dtrc ? 7 : 2,
DCSS_DPR_FRAME_1P_CTRL0 + plane * gap);
}
}
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
if (ch->use_dtrc) {
luma_base_addr = 0x0;
chroma_base_addr = 0x10000000;
}
if (!dcss_dtrc_ch_running(dpr->dtrc, ch_num)) {
dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR);
dcss_dpr_write(ch, chroma_base_addr,
DCSS_DPR_FRAME_2P_BASE_ADDR);
}
if (ch->use_dtrc)
pitch = ch->pitch;
ch->frame_ctrl &= ~PITCH_MASK;
ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK);
}
static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel,
int g_sel, int b_sel)
{
u32 sel;
sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) |
((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) |
((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) |
((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK);
ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK |
G_COMP_SEL_MASK | B_COMP_SEL_MASK);
ch->mode_ctrl |= sel;
}
static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
u32 val;
switch (format->format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12_10LE40:
val = 0;
break;
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
val = 1;
break;
default:
val = 2;
break;
}
ch->pix_size = val;
ch->mode_ctrl &= ~PIX_SIZE_MASK;
ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK);
}
static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0);
}
static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0);
}
static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~COMP_2PLANE_EN;
ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0);
}
static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~YUV_EN;
ch->mode_ctrl |= (en ? YUV_EN : 0);
}
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 sys_ctrl;
sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0);
if (en) {
dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0);
dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0);
dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0);
}
if (ch->sys_ctrl != sys_ctrl)
ch->sys_ctrl_chgd = true;
ch->sys_ctrl = sys_ctrl;
}
struct rgb_comp_sel {
u32 drm_format;
int a_sel;
int r_sel;
int g_sel;
int b_sel;
};
static struct rgb_comp_sel comp_sel_map[] = {
{DRM_FORMAT_ARGB8888, 3, 2, 1, 0},
{DRM_FORMAT_XRGB8888, 3, 2, 1, 0},
{DRM_FORMAT_ABGR8888, 3, 0, 1, 2},
{DRM_FORMAT_XBGR8888, 3, 0, 1, 2},
{DRM_FORMAT_RGBA8888, 0, 3, 2, 1},
{DRM_FORMAT_RGBX8888, 0, 3, 2, 1},
{DRM_FORMAT_BGRA8888, 0, 1, 2, 3},
{DRM_FORMAT_BGRX8888, 0, 1, 2, 3},
};
static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel,
int *b_sel)
{
int i;
for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) {
if (comp_sel_map[i].drm_format == pix_fmt) {
*a_sel = comp_sel_map[i].a_sel;
*r_sel = comp_sel_map[i].r_sel;
*g_sel = comp_sel_map[i].g_sel;
*b_sel = comp_sel_map[i].b_sel;
return 0;
}
}
return -1;
}
static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format)
{
u32 val, mask;
switch (pix_format) {
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12_10LE40:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = false;
break;
default:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = true;
break;
}
val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0);
val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0);
mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN;
ch->mode_ctrl &= ~mask;
ch->mode_ctrl |= (val & mask);
val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE);
val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK;
val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK;
mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE;
ch->rtram_ctrl &= ~mask;
ch->rtram_ctrl |= (val & mask);
}
static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
int a_sel, r_sel, g_sel, b_sel;
bool uv_swap, y_uv_swap;
switch (format->format) {
case DRM_FORMAT_YVYU:
uv_swap = true;
y_uv_swap = true;
break;
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV21:
uv_swap = true;
y_uv_swap = false;
break;
case DRM_FORMAT_YUYV:
uv_swap = false;
y_uv_swap = true;
break;
default:
uv_swap = false;
y_uv_swap = false;
break;
}
dcss_dpr_uv_swap(ch, uv_swap);
dcss_dpr_y_uv_swap(ch, y_uv_swap);
if (!format->is_yuv) {
if (!to_comp_sel(format->format, &a_sel, &r_sel,
&g_sel, &b_sel)) {
dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel);
} else {
dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0);
}
} else {
dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0);
}
}
static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier)
{
switch (ch->ch_num) {
case 0:
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
ch->tile = TILE_LINEAR;
break;
case DRM_FORMAT_MOD_VIVANTE_TILED:
ch->tile = TILE_GPU_STANDARD;
break;
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC:
ch->tile = TILE_GPU_SUPER;
break;
default:
WARN_ON(1);
break;
}
break;
case 1:
case 2:
ch->tile = TILE_LINEAR;
break;
default:
WARN_ON(1);
return;
}
ch->mode_ctrl &= ~TILE_TYPE_MASK;
ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK);
}
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->format = *format;
ch->use_dtrc = ch_num && modifier != DRM_FORMAT_MOD_LINEAR;
dcss_dpr_yuv_en(ch, format->is_yuv);
dcss_dpr_pix_size_set(ch, format);
dcss_dpr_setup_components(ch, format);
dcss_dpr_2plane_en(ch, format->num_planes == 2);
dcss_dpr_rtram_set(ch, format->format);
dcss_dpr_tile_set(ch, modifier);
}
/* This function will be called from interrupt context. */
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr)
{
int chnum;
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_dpr_ch *ch = &dpr->ch[chnum];
if (ch->sys_ctrl_chgd) {
dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id,
ch->sys_ctrl,
ch->base_ofs +
DCSS_DPR_SYSTEM_CTRL0);
ch->sys_ctrl_chgd = false;
}
}
}
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK);
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0;
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0;
if (rotation & DRM_MODE_ROTATE_90)
ch->frame_ctrl |= 1 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_180)
ch->frame_ctrl |= 2 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_270)
ch->frame_ctrl |= 3 << ROT_ENC_POS;
}

View File

@ -0,0 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
struct dcss_drv {
struct dcss_dev *dcss;
struct dcss_kms_dev *kms;
bool is_componentized;
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? mdrv->dcss : NULL;
}
struct drm_device *dcss_drv_dev_to_drm(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? &mdrv->kms->base : NULL;
}
static int dcss_drv_init(struct device *dev, bool componentized)
{
struct dcss_drv *mdrv;
int err = 0;
mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->is_componentized = componentized;
mdrv->dcss = dcss_dev_create(dev, componentized);
if (IS_ERR(mdrv->dcss)) {
err = PTR_ERR(mdrv->dcss);
goto err;
}
dev_set_drvdata(dev, mdrv);
mdrv->kms = dcss_kms_attach(mdrv->dcss, componentized);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
goto dcss_shutoff;
}
return 0;
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(dev, NULL);
err:
devm_kfree(dev, mdrv);
return err;
}
static void dcss_drv_deinit(struct device *dev, bool componentized)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
if (!mdrv)
return;
dcss_kms_detach(mdrv->kms, componentized);
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(dev, NULL);
}
static int dcss_drv_bind(struct device *dev)
{
return dcss_drv_init(dev, true);
}
static void dcss_drv_unbind(struct device *dev)
{
return dcss_drv_deinit(dev, true);
}
static const struct component_master_ops dcss_master_ops = {
.bind = dcss_drv_bind,
.unbind = dcss_drv_unbind,
};
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int dcss_drv_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
struct device_node *remote;
if (!dev->of_node)
return -ENODEV;
remote = of_graph_get_remote_node(dev->of_node, 0, 0);
if (!remote)
return -ENODEV;
if (of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi")) {
of_node_put(remote);
return dcss_drv_init(dev, false);
}
drm_of_component_match_add(dev, &match, compare_of, remote);
of_node_put(remote);
return component_master_add_with_match(dev, &dcss_master_ops, match);
}
static int dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
if (mdrv->is_componentized)
component_master_del(&pdev->dev, &dcss_master_ops);
else
dcss_drv_deinit(&pdev->dev, false);
return 0;
}
static struct dcss_type_data dcss_types[] = {
[DCSS_IMX8MQ] = {
.name = "DCSS_IMX8MQ",
.blkctl_ofs = 0x2F000,
.ctxld_ofs = 0x23000,
.dtg_ofs = 0x20000,
.rdsrc_ofs = 0x22000,
.wrscl_ofs = 0x21000,
.scaler_ofs = 0x1C000,
.ss_ofs = 0x1B000,
.dpr_ofs = 0x18000,
.dec400d_ofs = 0x15000,
.hdr10_ofs = 0x00000,
.dtrc_ofs = 0x16000,
},
};
static const struct of_device_id dcss_of_match[] = {
{ .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], },
{},
};
MODULE_DEVICE_TABLE(of, dcss_of_match);
static const struct dev_pm_ops dcss_dev_pm = {
SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
dcss_dev_runtime_resume, NULL)
};
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
.pm = &dcss_dev_pm,
},
};
module_platform_driver(dcss_platform_driver);
MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,454 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "dcss-dev.h"
#define DCSS_DTG_TC_CONTROL_STATUS 0x00
#define CH3_EN BIT(0)
#define CH2_EN BIT(1)
#define CH1_EN BIT(2)
#define OVL_DATA_MODE BIT(3)
#define BLENDER_VIDEO_ALPHA_SEL BIT(7)
#define DTG_START BIT(8)
#define DBY_MODE_EN BIT(9)
#define CH1_ALPHA_SEL BIT(10)
#define CSS_PIX_COMP_SWAP_POS 12
#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12)
#define DEFAULT_FG_ALPHA_POS 24
#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24)
#define DCSS_DTG_TC_DTG 0x04
#define DCSS_DTG_TC_DISP_TOP 0x08
#define DCSS_DTG_TC_DISP_BOT 0x0C
#define DCSS_DTG_TC_CH1_TOP 0x10
#define DCSS_DTG_TC_CH1_BOT 0x14
#define DCSS_DTG_TC_CH2_TOP 0x18
#define DCSS_DTG_TC_CH2_BOT 0x1C
#define DCSS_DTG_TC_CH3_TOP 0x20
#define DCSS_DTG_TC_CH3_BOT 0x24
#define TC_X_POS 0
#define TC_X_MASK GENMASK(12, 0)
#define TC_Y_POS 16
#define TC_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CTXLD 0x28
#define TC_CTXLD_DB_Y_POS 0
#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0)
#define TC_CTXLD_SB_Y_POS 16
#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CH1_BKRND 0x2C
#define DCSS_DTG_TC_CH2_BKRND 0x30
#define BKRND_R_Y_COMP_POS 20
#define BKRND_R_Y_COMP_MASK GENMASK(29, 20)
#define BKRND_G_U_COMP_POS 10
#define BKRND_G_U_COMP_MASK GENMASK(19, 10)
#define BKRND_B_V_COMP_POS 0
#define BKRND_B_V_COMP_MASK GENMASK(9, 0)
#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38
#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C
#define DCSS_DTG_BLENDER_DBY_BDP 0x40
#define DCSS_DTG_BLENDER_BKRND_I 0x44
#define DCSS_DTG_BLENDER_BKRND_P 0x48
#define DCSS_DTG_BLENDER_BKRND_T 0x4C
#define DCSS_DTG_LINE0_INT 0x50
#define DCSS_DTG_LINE1_INT 0x54
#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58
#define DCSS_DTG_INT_STATUS 0x5C
#define DCSS_DTG_INT_CONTROL 0x60
#define DCSS_DTG_TC_CH3_BKRND 0x64
#define DCSS_DTG_INT_MASK 0x68
#define LINE0_IRQ BIT(0)
#define LINE1_IRQ BIT(1)
#define LINE2_IRQ BIT(2)
#define LINE3_IRQ BIT(3)
#define DCSS_DTG_LINE2_INT 0x6C
#define DCSS_DTG_LINE3_INT 0x70
#define DCSS_DTG_DBY_OL 0x74
#define DCSS_DTG_DBY_BL 0x78
#define DCSS_DTG_DBY_EL 0x7C
struct dcss_dtg {
struct device *dev;
struct dcss_ctxld *ctxld;
void __iomem *base_reg;
u32 base_ofs;
u32 ctx_id;
bool in_use;
bool hdmi_output;
u32 dis_ulc_x;
u32 dis_ulc_y;
u32 control_status;
u32 alpha;
int ctxld_kick_irq;
bool ctxld_kick_irq_en;
struct clk *pix_clk;
struct clk *pll_src_clk;
struct clk *pll_phy_ref_clk;
/*
* This will be passed on by DRM CRTC so that we can signal when DTG has
* been successfully stopped. Otherwise, any modesetting while DTG is
* still ON may result in unpredictable behavior.
*/
struct completion *dis_completion;
};
static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs)
{
if (!dtg->in_use)
dcss_writel(val, dtg->base_reg + ofs);
dcss_ctxld_write(dtg->ctxld, dtg->ctx_id, val, dtg->base_ofs + ofs);
}
static irqreturn_t dcss_dtg_irq_handler(int irq, void *data)
{
struct dcss_dtg *dtg = data;
u32 status;
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!(status & LINE0_IRQ))
return IRQ_HANDLED;
dcss_ctxld_kick(dtg->ctxld);
dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
return IRQ_HANDLED;
}
static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
struct platform_device *pdev)
{
int ret;
dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick");
if (dtg->ctxld_kick_irq < 0) {
dev_err(dtg->dev, "dtg: can't get line2 irq number\n");
return dtg->ctxld_kick_irq;
}
ret = devm_request_irq(dtg->dev, dtg->ctxld_kick_irq,
dcss_dtg_irq_handler,
IRQF_TRIGGER_HIGH,
"dcss_ctxld_kick", dtg);
if (ret) {
dev_err(dtg->dev, "dtg: irq request failed.\n");
return ret;
}
disable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
dcss_update(LINE0_IRQ, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
return 0;
}
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
{
int ret = 0;
struct dcss_dtg *dtg;
dtg = devm_kzalloc(dcss->dev, sizeof(*dtg), GFP_KERNEL);
if (!dtg)
return -ENOMEM;
dcss->dtg = dtg;
dtg->dev = dcss->dev;
dtg->ctxld = dcss->ctxld;
dtg->hdmi_output = dcss->hdmi_output;
dtg->base_reg = devm_ioremap(dcss->dev, dtg_base, SZ_4K);
if (!dtg->base_reg) {
dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
ret = -ENOMEM;
goto err_ioremap;
}
dtg->base_ofs = dtg_base;
dtg->ctx_id = CTX_DB;
dtg->pix_clk = dcss->pix_clk;
dtg->pll_src_clk = dcss->pll_src_clk;
dtg->pll_phy_ref_clk = dcss->pll_phy_ref_clk;
dtg->alpha = 255;
dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
return 0;
err_irq:
devm_iounmap(dtg->dev, dtg->base_reg);
err_ioremap:
devm_kfree(dtg->dev, dtg);
return ret;
}
void dcss_dtg_exit(struct dcss_dtg *dtg)
{
/* stop DTG */
dcss_writel(DTG_START, dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
devm_free_irq(dtg->dev, dtg->ctxld_kick_irq, dtg);
if (dtg->base_reg)
devm_iounmap(dtg->dev, dtg->base_reg);
devm_kfree(dtg->dev, dtg);
}
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
{
u16 dtg_lrc_x, dtg_lrc_y;
u16 dis_ulc_x, dis_ulc_y;
u16 dis_lrc_x, dis_lrc_y;
u32 sb_ctxld_trig, db_ctxld_trig;
u32 pixclock = vm->pixelclock;
u32 actual_clk;
dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dis_ulc_x = vm->hsync_len + vm->hback_porch - 1;
dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1;
dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
clk_disable_unprepare(dtg->pix_clk);
if (dtg->hdmi_output) {
int err;
clk_disable_unprepare(dtg->pll_src_clk);
err = clk_set_parent(dtg->pll_src_clk, dtg->pll_phy_ref_clk);
if (err < 0)
dev_warn(dtg->dev, "clk_set_parent() returned %d", err);
clk_prepare_enable(dtg->pll_src_clk);
}
clk_set_rate(dtg->pix_clk, vm->pixelclock);
clk_prepare_enable(dtg->pix_clk);
actual_clk = clk_get_rate(dtg->pix_clk);
if (pixclock != actual_clk) {
dev_info(dtg->dev,
"Pixel clock set to %u kHz instead of %u kHz.\n",
(actual_clk / 1000), (pixclock / 1000));
}
msleep(50);
dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x),
DCSS_DTG_TC_DTG);
dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x),
DCSS_DTG_TC_DISP_TOP);
dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x),
DCSS_DTG_TC_DISP_BOT);
dtg->dis_ulc_x = dis_ulc_x;
dtg->dis_ulc_y = dis_ulc_y;
sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) &
TC_CTXLD_SB_Y_MASK;
db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) &
TC_CTXLD_DB_Y_MASK;
dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD);
/* vblank trigger */
dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT);
/* CTXLD trigger */
dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT);
}
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph)
{
u16 p_ulc_x, p_ulc_y;
u16 p_lrc_x, p_lrc_y;
p_ulc_x = dtg->dis_ulc_x + px;
p_ulc_y = dtg->dis_ulc_y + py;
p_lrc_x = p_ulc_x + pw;
p_lrc_y = p_ulc_y + ph;
if (!px && !py && !pw && !ph) {
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
} else {
dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x),
DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x),
DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
}
}
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha)
{
if (ch_num)
return false;
return alpha != dtg->alpha;
}
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha)
{
u32 alpha_val;
/* we care about alpha only when channel 0 is concerned */
if (ch_num)
return;
alpha_val = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK;
/*
* Use global alpha if pixel format does not have alpha channel or the
* user explicitly chose to use global alpha (i.e. alpha is not OPAQUE).
*/
if (!format->has_alpha || alpha != 255) {
dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
dtg->control_status |= alpha_val;
} else { /* use per-pixel alpha otherwise */
dtg->control_status |= CH1_ALPHA_SEL;
}
dtg->alpha = alpha;
}
void dcss_dtg_css_set(struct dcss_dtg *dtg, bool out_is_yuv)
{
dtg->control_status &= ~CSS_PIX_COMP_SWAP_MASK;
if (out_is_yuv)
return;
dtg->control_status |=
(0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK;
}
static void dcss_dtg_disable_callback(void *data)
{
struct dcss_dtg *dtg = data;
dtg->control_status &= ~DTG_START;
dcss_writel(dtg->control_status,
dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = false;
complete(dtg->dis_completion);
}
void dcss_dtg_enable(struct dcss_dtg *dtg, bool en,
struct completion *dis_completion)
{
if (!en) {
dtg->dis_completion = dis_completion;
dcss_ctxld_register_dtg_disable_cb(dtg->ctxld,
dcss_dtg_disable_callback,
dtg);
return;
}
dtg->dis_completion = NULL;
dtg->control_status |= DTG_START;
dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = true;
}
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg)
{
return dtg->in_use;
}
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en)
{
u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN};
u32 control_status;
control_status = dtg->control_status & ~ch_en_map[ch_num];
control_status |= en ? ch_en_map[ch_num] : 0;
if (dtg->control_status != control_status)
dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->control_status = control_status;
}
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
dcss_writel(status & LINE1_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
}
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!dtg->ctxld_kick_irq_en) {
dcss_writel(status & LINE0_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
enable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = true;
return;
}
return;
}
if (!dtg->ctxld_kick_irq_en)
return;
disable_irq_nosync(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
}
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg)
{
dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg)
{
return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ);
}

View File

@ -0,0 +1,514 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "dcss-dev.h"
#define DTRC_F0_OFS 0x00
#define DTRC_F1_OFS 0x60
#define DCSS_DTRC_DYDSADDR 0x00
#define DCSS_DTRC_DCDSADDR 0x04
#define DCSS_DTRC_DYTSADDR 0x08
#define DCSS_DTRC_DCTSADDR 0x0C
#define DCSS_DTRC_SIZE 0x10
#define FRAME_WIDTH_POS 0
#define FRAME_WIDTH_MASK GENMASK(9, 0)
#define FRAME_HEIGHT_POS 16
#define FRAME_HEIGHT_MASK GENMASK(25, 16)
#define DCSS_DTRC_SYSSA 0x14
#define DCSS_DTRC_SYSEA 0x18
#define DCSS_DTRC_SUVSSA 0x1C
#define DCSS_DTRC_SUVSEA 0x20
#define DCSS_DTRC_CROPORIG 0x24
#define DCSS_DTRC_CROPSIZE 0x28
#define CROP_HEIGHT_POS 16
#define CROP_HEIGHT_MASK GENMASK(28, 16)
#define CROP_WIDTH_POS 0
#define CROP_WIDTH_MASK GENMASK(12, 0)
#define DCSS_DTRC_DCTL 0x2C
#define CROPPING_EN BIT(18)
#define COMPRESSION_DIS BIT(17)
#define PIX_DEPTH_8BIT_EN BIT(1)
#define CONFIG_READY BIT(0)
#define DCSS_DTRC_DYDSADDR_EXT 0x30
#define DCSS_DTRC_DCDSADDR_EXT 0x34
#define DCSS_DTRC_DYTSADDR_EXT 0x38
#define DCSS_DTRC_DCTSADDR_EXT 0x3C
#define DCSS_DTRC_SYSSA_EXT 0x40
#define DCSS_DTRC_SYSEA_EXT 0x44
#define DCSS_DTRC_SUVSSA_EXT 0x48
#define DCSS_DTRC_SUVSEA_EXT 0x4C
#define DCSS_DTRC_INTEN 0xC0
#define DCSS_DTRC_FDINTR 0xC4
#define DCSS_DTRC_DTCTRL 0xC8
#define CURRENT_FRAME BIT(31)
#define ADDRESS_ID_ENABLE BIT(30)
#define ENDIANNESS_10BIT BIT(29)
#define MERGE_ARID_ENABLE BIT(28)
#define NON_G1_2_SWAP_MODE_POS 24
#define NON_G1_2_SWAP_MODE_MASK GENMASK(27, 24)
#define TABLE_DATA_SWAP_POS 20
#define TABLE_DATA_SWAP_MASK GENMASK(23, 20)
#define TILED_SWAP_POS 16
#define TILED_SWAP_MASK GENMASK(19, 16)
#define RASTER_SWAP_POS 12
#define RASTER_SWAP_MASK GENMASK(15, 12)
#define BURST_LENGTH_POS 4
#define BURST_LENGTH_MASK GENMASK(11, 4)
#define G1_TILED_DATA_EN BIT(3)
#define HOT_RESET BIT(2)
#define ARIDR_MODE_DETILE 0
#define ARIDR_MODE_BYPASS 2
#define DCSS_DTRC_ARIDR 0xCC
#define DCSS_DTRC_DTID2DDR 0xD0
#define DCSS_DTRC_CONFIG 0xD4
#define DCSS_DTRC_VER 0xD8
#define DCSS_DTRC_PFCTRL 0xF0
#define DCSS_DTRC_PFCR 0xF4
#define DCSS_DTRC_TOCR 0xF8
struct dcss_dtrc_ch {
struct dcss_dtrc *dtrc;
void __iomem *base_reg;
u32 base_ofs;
u32 xres;
u32 yres;
u32 pix_format;
u64 format_modifier;
u32 y_dec_ofs;
u32 uv_dec_ofs;
int curr_frame;
u32 dctl;
bool bypass;
bool running;
int irq;
int ch_num;
};
struct dcss_dtrc {
struct device *dev;
struct dcss_dtrc_ch ch[2];
u32 ctx_id;
struct dcss_ctxld *ctxld;
};
static irqreturn_t dcss_dtrc_irq_handler(int irq, void *data)
{
struct dcss_dtrc_ch *ch = data;
u32 b0, b1, curr_bank;
b0 = dcss_readl(ch->base_reg + DCSS_DTRC_DCTL) & 0x1;
b1 = dcss_readl(ch->base_reg + DTRC_F1_OFS + DCSS_DTRC_DCTL) & 0x1;
curr_bank = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
dcss_update(1, 1, ch->base_reg + DCSS_DTRC_FDINTR);
return IRQ_HANDLED;
}
static int dcss_dtrc_irq_config(struct dcss_dtrc *dtrc, int ch_num)
{
struct platform_device *pdev = to_platform_device(dtrc->dev);
struct dcss_dtrc_ch *ch = &dtrc->ch[ch_num];
char irq_name[20];
int ret;
sprintf(irq_name, "dtrc_ch%d", ch_num + 1);
irq_name[8] = 0;
ch->irq = platform_get_irq_byname(pdev, irq_name);
if (ch->irq < 0) {
dev_err(dtrc->dev, "dtrc: can't get DTRC irq\n");
return ch->irq;
}
ret = devm_request_irq(dtrc->dev, ch->irq,
dcss_dtrc_irq_handler,
IRQF_TRIGGER_HIGH,
"dcss-dtrc", ch);
if (ret) {
dev_err(dtrc->dev, "dtrc: irq request failed.\n");
return ret;
}
dcss_writel(1, ch->base_reg + DCSS_DTRC_INTEN);
return 0;
}
static int dcss_dtrc_ch_init_all(struct dcss_dtrc *dtrc, u32 dtrc_base)
{
struct dcss_dtrc_ch *ch;
int i, ret;
for (i = 0; i < 2; i++) {
ch = &dtrc->ch[i];
ch->base_ofs = dtrc_base + i * 0x1000;
ch->base_reg = devm_ioremap(dtrc->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dtrc->dev, "dtrc: unable to remap ch base\n");
return -ENOMEM;
}
ch->ch_num = i;
ch->dtrc = dtrc;
ret = dcss_dtrc_irq_config(dtrc, i);
if (ret)
return ret;
}
return 0;
}
static void dcss_dtrc_write(struct dcss_dtrc_ch *ch, u32 val, u32 ofs)
{
dcss_ctxld_write(ch->dtrc->ctxld, ch->dtrc->ctx_id,
val, ch->base_ofs + ofs);
}
static void dcss_dtrc_write_irqsafe(struct dcss_dtrc_ch *ch, u32 val, u32 ofs)
{
dcss_ctxld_write_irqsafe(ch->dtrc->ctxld, ch->dtrc->ctx_id,
val, ch->base_ofs + ofs);
}
int dcss_dtrc_init(struct dcss_dev *dcss, unsigned long dtrc_base)
{
struct dcss_dtrc *dtrc;
dtrc = devm_kzalloc(dcss->dev, sizeof(*dtrc), GFP_KERNEL);
if (!dtrc)
return -ENOMEM;
dcss->dtrc = dtrc;
dtrc->dev = dcss->dev;
dtrc->ctxld = dcss->ctxld;
dtrc->ctx_id = CTX_SB_HP;
if (dcss_dtrc_ch_init_all(dtrc, dtrc_base)) {
struct dcss_dtrc_ch *ch;
int i;
for (i = 0; i < 2; i++) {
ch = &dtrc->ch[i];
if (ch->irq)
devm_free_irq(dtrc->dev, ch->irq, ch);
if (ch->base_reg)
devm_iounmap(dtrc->dev, ch->base_reg);
}
devm_kfree(dtrc->dev, dtrc);
return -ENOMEM;
}
return 0;
}
void dcss_dtrc_exit(struct dcss_dtrc *dtrc)
{
int ch_no;
for (ch_no = 0; ch_no < 2; ch_no++) {
struct dcss_dtrc_ch *ch = &dtrc->ch[ch_no];
if (ch->base_reg) {
/* reset the module to default */
dcss_writel(HOT_RESET,
ch->base_reg + DCSS_DTRC_DTCTRL);
devm_iounmap(dtrc->dev, ch->base_reg);
}
}
devm_kfree(dtrc->dev, dtrc);
}
void dcss_dtrc_bypass(struct dcss_dtrc *dtrc, int ch_num)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
if (ch->bypass)
return;
dcss_dtrc_write(ch, ARIDR_MODE_BYPASS, DCSS_DTRC_DTCTRL);
dcss_dtrc_write(ch, 0, DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, 0, DCSS_DTRC_DCTSADDR);
dcss_dtrc_write(ch, 0x0f0e0100, DCSS_DTRC_ARIDR);
dcss_dtrc_write(ch, 0x0f0e, DCSS_DTRC_DTID2DDR);
ch->bypass = true;
}
void dcss_dtrc_addr_set(struct dcss_dtrc *dtrc, int ch_num,
u32 p1_ba, u32 p2_ba, uint64_t dec_table_ofs)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
dcss_dtrc_write(ch, p1_ba, DCSS_DTRC_DYDSADDR);
dcss_dtrc_write(ch, p2_ba, DCSS_DTRC_DCDSADDR);
dcss_dtrc_write(ch, p1_ba, DTRC_F1_OFS + DCSS_DTRC_DYDSADDR);
dcss_dtrc_write(ch, p2_ba, DTRC_F1_OFS + DCSS_DTRC_DCDSADDR);
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED) {
ch->y_dec_ofs = dec_table_ofs & 0xFFFFFFFF;
ch->uv_dec_ofs = dec_table_ofs >> 32;
dcss_dtrc_write(ch, p1_ba + ch->y_dec_ofs,
DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->uv_dec_ofs,
DCSS_DTRC_DCTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->y_dec_ofs,
DTRC_F1_OFS + DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->uv_dec_ofs,
DTRC_F1_OFS + DCSS_DTRC_DCTSADDR);
}
ch->bypass = false;
}
void dcss_dtrc_set_res(struct dcss_dtrc *dtrc, int ch_num,
struct drm_plane_state *state, u32 *dtrc_w, u32 *dtrc_h)
{
struct drm_framebuffer *fb = state->fb;
u32 pixel_format = fb->format->format;
struct dcss_dtrc_ch *ch;
u32 frame_height, frame_width;
u32 crop_w, crop_h, crop_orig_w, crop_orig_h;
int bank;
u32 old_xres, old_yres, xres, yres;
u32 x1, y1, x2, y2;
u32 pix_depth;
u16 width_align = 0;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
bank = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
ch->pix_format = pixel_format;
ch->format_modifier = fb->modifier;
pix_depth = ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 10 : 8;
old_xres = state->src_w >> 16;
old_yres = state->src_h >> 16;
x1 = (state->src.x1 >> 16) & ~1;
y1 = (state->src.y1 >> 16) & ~1;
x2 = state->src.x2 >> 16;
y2 = state->src.y2 >> 16;
xres = x2 - x1;
yres = y2 - y1;
frame_height = ((old_yres >> 3) << FRAME_HEIGHT_POS) & FRAME_HEIGHT_MASK;
frame_width = ((old_xres >> 3) << FRAME_WIDTH_POS) & FRAME_WIDTH_MASK;
dcss_dtrc_write(ch, frame_height | frame_width,
DTRC_F1_OFS * bank + DCSS_DTRC_SIZE);
dcss_dtrc_write(ch, frame_height | frame_width,
DTRC_F1_OFS * (bank ^ 1) + DCSS_DTRC_SIZE);
/*
* Image original size is aligned:
* - 128 pixels for width (8-bit) or 256 (10-bit);
* - 8 lines for height;
*/
width_align = ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 0xff : 0x7f;
if (xres == old_xres && !(xres & width_align) &&
yres == old_yres && !(yres & 0xf)) {
ch->dctl &= ~CROPPING_EN;
goto exit;
}
/* align the image size: down align for compressed formats */
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED && x1)
xres = xres & ~width_align;
else
xres = (xres + width_align) & ~width_align;
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED && y1)
yres = yres & ~0xf;
else
yres = (yres + 0xf) & ~0xf;
crop_orig_w = (x1 << CROP_WIDTH_POS) & CROP_WIDTH_MASK;
crop_orig_h = (y1 << CROP_HEIGHT_POS) & CROP_HEIGHT_MASK;
dcss_dtrc_write(ch, crop_orig_w | crop_orig_h,
DCSS_DTRC_CROPORIG);
dcss_dtrc_write(ch, crop_orig_w | crop_orig_h,
DTRC_F1_OFS + DCSS_DTRC_CROPORIG);
crop_w = (xres << CROP_WIDTH_POS) & CROP_WIDTH_MASK;
crop_h = (yres << CROP_HEIGHT_POS) & CROP_HEIGHT_MASK;
dcss_dtrc_write(ch, crop_w | crop_h,
DTRC_F1_OFS * bank + DCSS_DTRC_CROPSIZE);
dcss_dtrc_write(ch, crop_w | crop_h,
DTRC_F1_OFS * (bank ^ 1) + DCSS_DTRC_CROPSIZE);
ch->dctl |= CROPPING_EN;
exit:
dcss_dtrc_write(ch, xres * yres * pix_depth / 8,
DCSS_DTRC_SYSEA);
dcss_dtrc_write(ch, xres * yres * pix_depth / 8,
DTRC_F1_OFS + DCSS_DTRC_SYSEA);
dcss_dtrc_write(ch, 0x10000000 + xres * yres * pix_depth / 8 / 2,
DCSS_DTRC_SUVSEA);
dcss_dtrc_write(ch, 0x10000000 + xres * yres * pix_depth / 8 / 2,
DTRC_F1_OFS + DCSS_DTRC_SUVSEA);
*dtrc_w = xres;
*dtrc_h = yres;
if (ch->running)
return;
dcss_dtrc_write(ch, 0x0, DCSS_DTRC_SYSSA);
dcss_dtrc_write(ch, 0x0, DTRC_F1_OFS + DCSS_DTRC_SYSSA);
dcss_dtrc_write(ch, 0x10000000, DCSS_DTRC_SUVSSA);
dcss_dtrc_write(ch, 0x10000000, DTRC_F1_OFS + DCSS_DTRC_SUVSSA);
}
void dcss_dtrc_enable(struct dcss_dtrc *dtrc, int ch_num, bool enable)
{
struct dcss_dtrc_ch *ch;
int curr_frame;
u32 fdctl, dtctrl;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
if (ch->bypass)
return;
if (!enable) {
ch->running = false;
return;
}
if (ch->running)
return;
dcss_update(HOT_RESET, HOT_RESET, ch->base_reg + DCSS_DTRC_DTCTRL);
while (dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) & HOT_RESET)
usleep_range(100, 200);
dcss_dtrc_write(ch, 0x0f0e0100,
DCSS_DTRC_ARIDR);
dcss_dtrc_write(ch, 0x0f0e,
DCSS_DTRC_DTID2DDR);
dtctrl = ADDRESS_ID_ENABLE | MERGE_ARID_ENABLE |
((0xF << TABLE_DATA_SWAP_POS) & TABLE_DATA_SWAP_MASK) |
((0x10 << BURST_LENGTH_POS) & BURST_LENGTH_MASK);
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G1_TILED)
dtctrl |= G1_TILED_DATA_EN;
dcss_dtrc_write(ch, dtctrl, DCSS_DTRC_DTCTRL);
curr_frame = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
fdctl = ch->dctl & ~(PIX_DEPTH_8BIT_EN | COMPRESSION_DIS);
fdctl |= ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 0 : PIX_DEPTH_8BIT_EN;
if (ch->format_modifier != DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED)
fdctl |= COMPRESSION_DIS;
dcss_dtrc_write(ch, fdctl,
(curr_frame ^ 1) * DTRC_F1_OFS + DCSS_DTRC_DCTL);
dcss_dtrc_write(ch, fdctl | CONFIG_READY,
curr_frame * DTRC_F1_OFS + DCSS_DTRC_DCTL);
ch->curr_frame = curr_frame;
ch->dctl = fdctl;
ch->running = true;
}
bool dcss_dtrc_ch_running(struct dcss_dtrc *dtrc, int ch_num)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return false;
ch = &dtrc->ch[ch_num - 1];
return ch->running;
}
bool dcss_dtrc_is_running(struct dcss_dtrc *dtrc)
{
return dtrc->ch[0].running || dtrc->ch[1].running;
}
static void dcss_dtrc_ch_switch_banks(struct dcss_dtrc *dtrc, int dtrc_ch)
{
struct dcss_dtrc_ch *ch = &dtrc->ch[dtrc_ch];
u32 b0, b1;
if (!ch->running)
return;
b0 = dcss_readl(ch->base_reg + DCSS_DTRC_DCTL) & 0x1;
b1 = dcss_readl(ch->base_reg + DTRC_F1_OFS + DCSS_DTRC_DCTL) & 0x1;
ch->curr_frame = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
dcss_dtrc_write_irqsafe(ch, ch->dctl | CONFIG_READY,
(ch->curr_frame ^ 1) * DTRC_F1_OFS + DCSS_DTRC_DCTL);
}
void dcss_dtrc_switch_banks(struct dcss_dtrc *dtrc)
{
dcss_dtrc_ch_switch_banks(dtrc, 0);
dcss_dtrc_ch_switch_banks(dtrc, 1);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,585 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <drm/drm_fourcc.h>
#include "dcss-dev.h"
#define USE_TBL_HEADER
#ifdef USE_TBL_HEADER
#include "dcss-hdr10-tables.h"
#endif
#define USE_CTXLD
#define DCSS_HDR10_A0_LUT 0x0000
#define DCSS_HDR10_A1_LUT 0x1000
#define DCSS_HDR10_A2_LUT 0x2000
/* one CSCA and CSCB for each channel(pipe) */
#define DCSS_HDR10_CSCA_BASE 0x3000
#define DCSS_HDR10_CSCB_BASE 0x3800
/* one CSCO for all channels(pipes) */
#define DCSS_HDR10_CSCO_BASE 0x3000
#define DCSS_HDR10_LUT_CONTROL (DCSS_HDR10_CSCA_BASE + 0x80)
#define LUT_ENABLE BIT(0)
#define LUT_EN_FOR_ALL_PELS BIT(1)
#define LUT_BYPASS BIT(15)
#define DCSS_HDR10_FL2FX (DCSS_HDR10_CSCB_BASE + 0x74)
#define DCSS_HDR10_LTNL (DCSS_HDR10_CSCO_BASE + 0x74)
#define LTNL_PASS_THRU BIT(0)
#define FIX2FLT_DISABLE BIT(1)
#define LTNL_EN_FOR_ALL_PELS BIT(2)
#define FIX2FLT_EN_FOR_ALL_PELS BIT(3)
/* following offsets are relative to CSC(A|B|O)_BASE */
#define DCSS_HDR10_CSC_CONTROL 0x00
#define CSC_EN BIT(0)
#define CSC_ALL_PIX_EN BIT(1)
#define CSC_BYPASS BIT(15)
#define DCSS_HDR10_CSC_H00 0x04
#define DCSS_HDR10_CSC_H10 0x08
#define DCSS_HDR10_CSC_H20 0x0C
#define DCSS_HDR10_CSC_H01 0x10
#define DCSS_HDR10_CSC_H11 0x14
#define DCSS_HDR10_CSC_H21 0x18
#define DCSS_HDR10_CSC_H02 0x1C
#define DCSS_HDR10_CSC_H12 0x20
#define DCSS_HDR10_CSC_H22 0x24
#define H_COEF_MASK GENMASK(15, 0)
#define DCSS_HDR10_CSC_IO0 0x28
#define DCSS_HDR10_CSC_IO1 0x2C
#define DCSS_HDR10_CSC_IO2 0x30
#define PRE_OFFSET_MASK GENMASK(9, 0)
#define DCSS_HDR10_CSC_IO_MIN0 0x34
#define DCSS_HDR10_CSC_IO_MIN1 0x38
#define DCSS_HDR10_CSC_IO_MIN2 0x3C
#define DCSS_HDR10_CSC_IO_MAX0 0x40
#define DCSS_HDR10_CSC_IO_MAX1 0x44
#define DCSS_HDR10_CSC_IO_MAX2 0x48
#define IO_CLIP_MASK GENMASK(9, 0)
#define DCSS_HDR10_CSC_NORM 0x4C
#define NORM_MASK GENMASK(4, 0)
#define DCSS_HDR10_CSC_OO0 0x50
#define DCSS_HDR10_CSC_OO1 0x54
#define DCSS_HDR10_CSC_OO2 0x58
#define POST_OFFSET_MASK GENMASK(27, 0)
#define DCSS_HDR10_CSC_OMIN0 0x5C
#define DCSS_HDR10_CSC_OMIN1 0x60
#define DCSS_HDR10_CSC_OMIN2 0x64
#define DCSS_HDR10_CSC_OMAX0 0x68
#define DCSS_HDR10_CSC_OMAX1 0x6C
#define DCSS_HDR10_CSC_OMAX2 0x70
#define POST_CLIP_MASK GENMASK(9, 0)
#define HDR10_IPIPE_LUT_MAX_ENTRIES 1024
#define HDR10_OPIPE_LUT_MAX_ENTRIES 1023
#define HDR10_CSC_MAX_REGS 29
#define OPIPE_CH_NO 3
/* Pipe config descriptor */
/* bits per component */
#define HDR10_BPC_POS 0
#define HDR10_BPC_MASK GENMASK(1, 0)
/* colorspace */
#define HDR10_CS_POS 2
#define HDR10_CS_MASK GENMASK(3, 2)
/* nonlinearity type */
#define HDR10_NL_POS 4
#define HDR10_NL_MASK GENMASK(8, 4)
/* pixel range */
#define HDR10_PR_POS 9
#define HDR10_PR_MASK GENMASK(10, 9)
/* gamut type */
#define HDR10_G_POS 11
#define HDR10_G_MASK GENMASK(15, 11)
/* FW Table Type Descriptor */
#define HDR10_TT_LUT BIT(0)
#define HDR10_TT_CSCA BIT(1)
#define HDR10_TT_CSCB BIT(2)
/* Pipe type */
#define HDR10_PT_OUTPUT BIT(3)
/* Output pipe config descriptor */
#define HDR10_IPIPE_DESC_POS 4
#define HDR10_IPIPE_DESC_MASK GENMASK(19, 4)
/* Input pipe config descriptor */
#define HDR10_OPIPE_DESC_POS 20
#define HDR10_OPIPE_DESC_MASK GENMASK(35, 20)
/* config invalid */
#define HDR10_DESC_INVALID BIT(63)
enum dcss_hdr10_csc {
HDR10_CSCA,
HDR10_CSCB,
};
struct dcss_hdr10_tbl_node {
struct list_head node;
u64 tbl_descriptor;
u32 *tbl_data;
};
struct dcss_hdr10_opipe_tbls {
struct list_head lut;
struct list_head csc;
};
struct dcss_hdr10_ipipe_tbls {
struct list_head lut;
struct list_head csca;
struct list_head cscb;
};
struct dcss_hdr10_ch {
struct dcss_hdr10 *hdr10;
void __iomem *base_reg;
u32 base_ofs;
u64 old_cfg_desc;
u32 id;
};
struct dcss_hdr10 {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_hdr10_ch ch[4]; /* 4th channel is, actually, OPIPE */
struct dcss_hdr10_ipipe_tbls *ipipe_tbls;
struct dcss_hdr10_opipe_tbls *opipe_tbls;
u8 *fw_data;
u32 fw_size;
};
static void dcss_hdr10_write(struct dcss_hdr10_ch *ch, u32 val, u32 ofs)
{
struct dcss_hdr10 *hdr10 = ch->hdr10;
dcss_ctxld_write(hdr10->ctxld, hdr10->ctx_id, val, ch->base_ofs + ofs);
}
static void dcss_hdr10_csc_fill(struct dcss_hdr10_ch *ch,
enum dcss_hdr10_csc csc_to_use,
u32 *map)
{
int i;
u32 csc_base_ofs[] = {
DCSS_HDR10_CSCA_BASE + DCSS_HDR10_CSC_CONTROL,
DCSS_HDR10_CSCB_BASE + DCSS_HDR10_CSC_CONTROL,
};
for (i = 0; i < HDR10_CSC_MAX_REGS; i++) {
u32 reg_ofs = csc_base_ofs[csc_to_use] + i * sizeof(u32);
dcss_hdr10_write(ch, map[i], reg_ofs);
}
}
static void dcss_hdr10_lut_fill(struct dcss_hdr10_ch *ch, u32 *map)
{
int i, comp;
u32 lut_base_ofs, ctrl_ofs, lut_entries;
if (ch->id == OPIPE_CH_NO) {
ctrl_ofs = DCSS_HDR10_LTNL;
lut_entries = HDR10_OPIPE_LUT_MAX_ENTRIES;
} else {
ctrl_ofs = DCSS_HDR10_LUT_CONTROL;
lut_entries = HDR10_IPIPE_LUT_MAX_ENTRIES;
}
if (ch->id != OPIPE_CH_NO)
dcss_hdr10_write(ch, *map++, ctrl_ofs);
for (comp = 0; comp < 3; comp++) {
lut_base_ofs = DCSS_HDR10_A0_LUT + comp * 0x1000;
if (ch->id == OPIPE_CH_NO) {
dcss_hdr10_write(ch, map[0], lut_base_ofs);
lut_base_ofs += 4;
}
for (i = 0; i < lut_entries; i++) {
u32 reg_ofs = lut_base_ofs + i * sizeof(u32);
dcss_hdr10_write(ch, map[i], reg_ofs);
}
}
map += lut_entries;
if (ch->id != OPIPE_CH_NO)
dcss_hdr10_write(ch, *map, DCSS_HDR10_FL2FX);
else
dcss_hdr10_write(ch, *map, ctrl_ofs);
}
static int dcss_hdr10_ch_init_all(struct dcss_hdr10 *hdr10,
unsigned long hdr10_base)
{
struct dcss_hdr10_ch *ch;
int i;
for (i = 0; i < 4; i++) {
ch = &hdr10->ch[i];
ch->base_ofs = hdr10_base + i * 0x4000;
ch->base_reg = devm_ioremap(hdr10->dev, ch->base_ofs, SZ_16K);
if (!ch->base_reg) {
dev_err(hdr10->dev, "hdr10: unable to remap ch base\n");
return -ENOMEM;
}
ch->old_cfg_desc = HDR10_DESC_INVALID;
ch->id = i;
ch->hdr10 = hdr10;
}
return 0;
}
static u32 *dcss_hdr10_find_tbl(u64 desc, struct list_head *head)
{
struct list_head *node;
struct dcss_hdr10_tbl_node *tbl_node;
list_for_each(node, head) {
tbl_node = container_of(node, struct dcss_hdr10_tbl_node, node);
if ((tbl_node->tbl_descriptor & desc) == desc)
return tbl_node->tbl_data;
}
return NULL;
}
static int dcss_hdr10_get_tbls(struct dcss_hdr10 *hdr10, bool input,
u64 desc, u32 **lut, u32 **csca, u32 **cscb)
{
struct list_head *lut_list, *csca_list, *cscb_list;
lut_list = input ? &hdr10->ipipe_tbls->lut : &hdr10->opipe_tbls->lut;
csca_list = input ? &hdr10->ipipe_tbls->csca : &hdr10->opipe_tbls->csc;
cscb_list = input ? &hdr10->ipipe_tbls->cscb : NULL;
*lut = dcss_hdr10_find_tbl(desc, lut_list);
*csca = dcss_hdr10_find_tbl(desc, csca_list);
*cscb = NULL;
if (cscb_list)
*cscb = dcss_hdr10_find_tbl(desc, cscb_list);
return 0;
}
static void dcss_hdr10_write_pipe_tbls(struct dcss_hdr10_ch *ch,
u32 *lut, u32 *csca, u32 *cscb)
{
if (csca)
dcss_hdr10_csc_fill(ch, HDR10_CSCA, csca);
if (ch->id != OPIPE_CH_NO && cscb)
dcss_hdr10_csc_fill(ch, HDR10_CSCB, cscb);
if (lut)
dcss_hdr10_lut_fill(ch, lut);
}
static int dcss_hdr10_tbl_add(struct dcss_hdr10 *hdr10, u64 desc, u32 sz,
u32 *data)
{
struct device *dev = hdr10->dev;
struct dcss_hdr10_tbl_node *node;
node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
/* we don't need to store the table type and pipe type */
node->tbl_descriptor = desc >> 4;
node->tbl_data = data;
if (!(desc & HDR10_PT_OUTPUT)) {
if (desc & HDR10_TT_LUT)
list_add(&node->node, &hdr10->ipipe_tbls->lut);
else if (desc & HDR10_TT_CSCA)
list_add(&node->node, &hdr10->ipipe_tbls->csca);
else if (desc & HDR10_TT_CSCB)
list_add(&node->node, &hdr10->ipipe_tbls->cscb);
return 0;
}
if (desc & HDR10_TT_LUT)
list_add(&node->node, &hdr10->opipe_tbls->lut);
else if (desc & HDR10_TT_CSCA)
list_add(&node->node, &hdr10->opipe_tbls->csc);
return 0;
}
static int dcss_hdr10_parse_fw_data(struct dcss_hdr10 *hdr10)
{
u32 *data = (u32 *)hdr10->fw_data;
u32 remaining = hdr10->fw_size / sizeof(u32);
u64 tbl_desc;
u32 tbl_size;
int ret;
while (remaining) {
tbl_desc = *((u64 *)data);
data += 2;
tbl_size = *data++;
ret = dcss_hdr10_tbl_add(hdr10, tbl_desc, tbl_size, data);
if (ret)
return ret;
data += tbl_size;
remaining -= tbl_size + 3;
}
return 0;
}
static void dcss_hdr10_cleanup_tbls(struct dcss_hdr10 *hdr10)
{
int i;
struct dcss_hdr10_tbl_node *tbl_node, *next;
struct list_head *tbls[] = {
&hdr10->ipipe_tbls->lut,
&hdr10->ipipe_tbls->csca,
&hdr10->ipipe_tbls->cscb,
&hdr10->opipe_tbls->lut,
&hdr10->opipe_tbls->csc,
};
for (i = 0; i < 5; i++) {
list_for_each_entry_safe(tbl_node, next, tbls[i], node) {
list_del(&tbl_node->node);
devm_kfree(hdr10->dev, tbl_node);
}
}
devm_kfree(hdr10->dev, hdr10->opipe_tbls);
devm_kfree(hdr10->dev, hdr10->ipipe_tbls);
}
#ifndef USE_TBL_HEADER
static void dcss_hdr10_fw_handler(const struct firmware *fw, void *context)
{
struct dcss_hdr10 *hdr10 = context;
int i;
if (!fw) {
dev_err(hdr10->dev, "hdr10: DCSS FW load failed.\n");
return;
}
/* we need to keep the tables for the entire life of the driver */
hdr10->fw_data = devm_kzalloc(hdr10->dev, fw->size, GFP_KERNEL);
if (!hdr10->fw_data)
return;
memcpy(hdr10->fw_data, fw->data, fw->size);
hdr10->fw_size = fw->size;
release_firmware(fw);
if (dcss_hdr10_parse_fw_data(hdr10)) {
dcss_hdr10_cleanup_tbls(hdr10);
return;
}
for (i = 0; i < 4; i++) {
u32 *lut, *csca, *cscb;
struct dcss_hdr10_ch *ch = &hdr10->ch[i];
bool is_input_pipe = i != OPIPE_CH_NO ? true : false;
if (ch->old_cfg_desc != HDR10_DESC_INVALID) {
dcss_hdr10_get_tbls(hdr10, is_input_pipe,
ch->old_cfg_desc, &lut,
&csca, &cscb);
dcss_hdr10_write_pipe_tbls(ch, lut, csca, cscb);
}
}
dev_info(hdr10->dev, "hdr10: DCSS FW loaded successfully\n");
}
#endif
static int dcss_hdr10_tbls_init(struct dcss_hdr10 *hdr10)
{
struct device *dev = hdr10->dev;
hdr10->ipipe_tbls = devm_kzalloc(dev, sizeof(*hdr10->ipipe_tbls),
GFP_KERNEL);
if (!hdr10->ipipe_tbls)
return -ENOMEM;
INIT_LIST_HEAD(&hdr10->ipipe_tbls->lut);
INIT_LIST_HEAD(&hdr10->ipipe_tbls->csca);
INIT_LIST_HEAD(&hdr10->ipipe_tbls->cscb);
hdr10->opipe_tbls = devm_kzalloc(dev, sizeof(*hdr10->opipe_tbls),
GFP_KERNEL);
if (!hdr10->opipe_tbls) {
devm_kfree(dev, hdr10->ipipe_tbls);
return -ENOMEM;
}
INIT_LIST_HEAD(&hdr10->opipe_tbls->lut);
INIT_LIST_HEAD(&hdr10->opipe_tbls->csc);
return 0;
}
int dcss_hdr10_init(struct dcss_dev *dcss, unsigned long hdr10_base)
{
int ret;
struct dcss_hdr10 *hdr10;
hdr10 = devm_kzalloc(dcss->dev, sizeof(*hdr10), GFP_KERNEL);
if (!hdr10)
return -ENOMEM;
dcss->hdr10 = hdr10;
hdr10->dev = dcss->dev;
hdr10->ctx_id = CTX_SB_HP;
hdr10->ctxld = dcss->ctxld;
ret = dcss_hdr10_tbls_init(hdr10);
if (ret < 0) {
dev_err(dcss->dev, "hdr10: Cannot init table lists.\n");
goto cleanup;
}
#ifndef USE_TBL_HEADER
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, "dcss.fw",
dcss->dev, GFP_KERNEL, hdr10,
dcss_hdr10_fw_handler);
if (ret < 0) {
dev_err(dcss->dev, "hdr10: Cannot async load DCSS FW.\n");
goto cleanup_tbls;
}
#else
hdr10->fw_data = (u8 *)dcss_hdr10_tables;
hdr10->fw_size = sizeof(dcss_hdr10_tables);
ret = dcss_hdr10_parse_fw_data(hdr10);
if (ret)
goto cleanup_tbls;
#endif
ret = dcss_hdr10_ch_init_all(hdr10, hdr10_base);
if (ret) {
int i;
for (i = 0; i < 4; i++) {
if (hdr10->ch[i].base_reg)
devm_iounmap(hdr10->dev, hdr10->ch[i].base_reg);
}
goto cleanup_tbls;
}
return 0;
cleanup_tbls:
dcss_hdr10_cleanup_tbls(hdr10);
cleanup:
devm_kfree(hdr10->dev, hdr10);
return ret;
}
void dcss_hdr10_exit(struct dcss_hdr10 *hdr10)
{
int i;
for (i = 0; i < 4; i++) {
if (hdr10->ch[i].base_reg)
devm_iounmap(hdr10->dev, hdr10->ch[i].base_reg);
}
dcss_hdr10_cleanup_tbls(hdr10);
devm_kfree(hdr10->dev, hdr10);
}
static u32 dcss_hdr10_pipe_desc(struct dcss_hdr10_pipe_cfg *pipe_cfg)
{
u32 desc;
desc = 2 << HDR10_BPC_POS;
desc |= pipe_cfg->is_yuv ? 2 << HDR10_CS_POS : 1 << HDR10_CS_POS;
desc |= ((1 << pipe_cfg->nl) << HDR10_NL_POS) & HDR10_NL_MASK;
desc |= ((1 << pipe_cfg->pr) << HDR10_PR_POS) & HDR10_PR_MASK;
desc |= ((1 << pipe_cfg->g) << HDR10_G_POS) & HDR10_G_MASK;
return desc;
}
static u64 dcss_hdr10_get_desc(struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg)
{
u32 ipipe_desc, opipe_desc;
ipipe_desc = dcss_hdr10_pipe_desc(ipipe_cfg);
opipe_desc = dcss_hdr10_pipe_desc(opipe_cfg);
return (ipipe_desc & 0xFFFF) | ((opipe_desc & 0xFFFF) << 16);
}
static void dcss_hdr10_pipe_setup(struct dcss_hdr10_ch *ch, u64 desc)
{
bool pipe_cfg_chgd;
u32 *csca, *cscb, *lut;
pipe_cfg_chgd = ch->old_cfg_desc != desc;
if (!pipe_cfg_chgd)
return;
dcss_hdr10_get_tbls(ch->hdr10, ch->id != OPIPE_CH_NO,
desc, &lut, &csca, &cscb);
dcss_hdr10_write_pipe_tbls(ch, lut, csca, cscb);
ch->old_cfg_desc = desc;
}
void dcss_hdr10_setup(struct dcss_hdr10 *hdr10, int ch_num,
struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg)
{
u64 desc = dcss_hdr10_get_desc(ipipe_cfg, opipe_cfg);
dcss_hdr10_pipe_setup(&hdr10->ch[ch_num], desc);
/*
* Input pipe configuration doesn't matter for configuring the output
* pipe. So, will just mask off the input part of the descriptor.
*/
dcss_hdr10_pipe_setup(&hdr10->ch[OPIPE_CH_NO], desc & ~0xffff);
}

View File

@ -0,0 +1,429 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drmP.h>
#include <linux/component.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
struct dcss_drm_commit {
struct work_struct work;
struct drm_device *drm;
struct drm_atomic_state *state;
};
static void dcss_kms_setup_opipe_gamut(u32 colorspace,
const struct drm_display_mode *mode,
enum dcss_hdr10_gamut *g,
enum dcss_hdr10_nonlinearity *nl)
{
u8 vic;
switch (colorspace) {
case DRM_MODE_COLORIMETRY_BT709_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_709:
*g = G_REC709;
*nl = NL_REC709;
return;
case DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_601:
case DRM_MODE_COLORIMETRY_SYCC_601:
case DRM_MODE_COLORIMETRY_OPYCC_601:
*g = G_REC601_NTSC;
*nl = NL_REC709;
return;
case DRM_MODE_COLORIMETRY_BT2020_CYCC:
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
*g = G_REC2020;
*nl = NL_REC2084;
return;
case DRM_MODE_COLORIMETRY_OPRGB:
*g = G_ADOBE_ARGB;
*nl = NL_SRGB;
return;
default:
break;
}
/*
* If we reached this point, it means the default colorimetry is used.
*/
/* non-CEA mode, sRGB is used */
vic = drm_match_cea_mode(mode);
if (vic == 0) {
*g = G_ADOBE_ARGB;
*nl = NL_SRGB;
return;
}
if (mode->vdisplay == 480 || mode->vdisplay == 576 ||
mode->vdisplay == 240 || mode->vdisplay == 288) {
*g = G_REC601_NTSC;
*nl = NL_REC709;
return;
}
/* 2160p, 1080p, 720p */
*g = G_REC709;
*nl = NL_REC709;
}
#define YUV_MODE BIT(0)
void dcss_kms_setup_opipe(struct drm_connector_state *conn_state)
{
struct drm_crtc *crtc = conn_state->crtc;
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
int mode_flags = crtc->state->adjusted_mode.private_flags;
enum hdmi_quantization_range qr;
qr = drm_default_rgb_quant_range(&crtc->state->adjusted_mode);
dcss_kms_setup_opipe_gamut(conn_state->colorspace,
&crtc->state->adjusted_mode,
&dcss_crtc->opipe_g,
&dcss_crtc->opipe_nl);
dcss_crtc->opipe_pr = qr == HDMI_QUANTIZATION_RANGE_FULL ? PR_FULL :
PR_LIMITED;
dcss_crtc->output_is_yuv = !!(mode_flags & YUV_MODE);
}
static void dcss_kms_setup_output_pipe(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_display_info *di;
int i;
for_each_new_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->best_encoder)
continue;
if (!conn_state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(conn_state->crtc->state))
continue;
crtc = connector->state->crtc;
di = &connector->display_info;
dcss_kms_setup_opipe(conn_state);
}
}
static void dcss_drm_atomic_commit_tail(struct dcss_drm_commit *commit)
{
struct drm_atomic_state *state = commit->state;
struct drm_device *drm = commit->drm;
struct dcss_kms_dev *kms = container_of(drm, struct dcss_kms_dev, base);
drm_atomic_helper_wait_for_fences(drm, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_commit_modeset_disables(drm, state);
dcss_kms_setup_output_pipe(state);
drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_commit_planes(drm, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
spin_lock(&kms->commit.wait.lock);
kms->commit.pending = false;
wake_up_all_locked(&kms->commit.wait);
spin_unlock(&kms->commit.wait.lock);
kfree(commit);
}
static void dcss_commit_work(struct work_struct *work)
{
struct dcss_drm_commit *commit = container_of(work,
struct dcss_drm_commit,
work);
dcss_drm_atomic_commit_tail(commit);
}
static int dcss_drm_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state,
bool nonblock)
{
int ret;
struct dcss_kms_dev *kms = container_of(drm, struct dcss_kms_dev, base);
struct dcss_drm_commit *commit;
if (state->async_update) {
ret = drm_atomic_helper_prepare_planes(drm, state);
if (ret)
return ret;
drm_atomic_helper_async_commit(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
return 0;
}
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
return -ENOMEM;
commit->drm = drm;
commit->state = state;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
goto err_free;
INIT_WORK(&commit->work, dcss_commit_work);
ret = drm_atomic_helper_prepare_planes(drm, state);
if (ret)
goto err_free;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(drm, state, true);
if (ret)
goto err;
}
spin_lock(&kms->commit.wait.lock);
ret = wait_event_interruptible_locked(kms->commit.wait,
!kms->commit.pending);
if (ret == 0)
kms->commit.pending = true;
spin_unlock(&kms->commit.wait.lock);
if (ret)
goto err;
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err;
drm_atomic_state_get(state);
if (nonblock)
queue_work(kms->commit_wq, &commit->work);
else
dcss_drm_atomic_commit_tail(commit);
return 0;
err:
drm_atomic_helper_cleanup_planes(drm, state);
err_free:
kfree(commit);
return ret;
}
const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = dcss_drm_atomic_commit,
};
static struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
.date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
};
static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
{
struct drm_mode_config *config = &kms->base.mode_config;
drm_mode_config_init(&kms->base);
config->min_width = 1;
config->min_height = 1;
config->max_width = 4096;
config->max_height = 4096;
config->allow_fb_modifiers = true;
config->normalize_zpos = true;
config->funcs = &dcss_drm_mode_config_funcs;
config->helper_private = &dcss_mode_config_helpers;
}
static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int dcss_kms_setup_encoder(struct dcss_kms_dev *kms)
{
struct drm_device *ddev = &kms->base;
struct drm_encoder *encoder = &kms->encoder;
struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
&panel, &bridge);
if (ret)
return ret;
if (!bridge) {
dev_err(ddev->dev, "No bridge found %d.\n", ret);
return -ENODEV;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(&kms->base, encoder,
&dcss_kms_simple_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret) {
dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret);
return ret;
}
return drm_bridge_attach(encoder, bridge, NULL);
}
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss, bool componentized)
{
struct dcss_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
struct drm_device *drm;
struct dcss_crtc *crtc;
int ret;
if (!kms)
return ERR_PTR(-ENOMEM);
drm = &kms->base;
crtc = &kms->crtc;
ret = drm_dev_init(drm, &dcss_kms_driver, dcss->dev);
if (ret)
goto free_kms;
drm->dev_private = dcss;
dcss_kms_mode_config_init(kms);
ret = drm_vblank_init(drm, 1);
if (ret)
goto cleanup_mode_config;
drm->irq_enabled = true;
ret = dcss_crtc_init(crtc, drm);
if (ret)
goto cleanup_mode_config;
kms->commit_wq = alloc_ordered_workqueue("dcss_nonblock_commit_wq", 0);
if (!kms->commit_wq) {
ret = -ENOMEM;
goto cleanup_crtc;
}
init_waitqueue_head(&kms->commit.wait);
if (componentized)
ret = component_bind_all(dcss->dev, kms);
else
ret = dcss_kms_setup_encoder(kms);
if (ret)
goto cleanup_wq;
drm_mode_config_reset(drm);
dcss_crtc_attach_color_mgmt_properties(crtc);
drm_kms_helper_poll_init(drm);
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_wq;
drm_fbdev_generic_setup(drm, 32);
return kms;
cleanup_wq:
drm_kms_helper_poll_fini(drm);
destroy_workqueue(kms->commit_wq);
cleanup_crtc:
dcss_crtc_deinit(crtc, drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
free_kms:
kfree(kms);
return ERR_PTR(ret);
}
void dcss_kms_detach(struct dcss_kms_dev *kms, bool componentized)
{
struct drm_device *drm = &kms->base;
struct dcss_dev *dcss = drm->dev_private;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_crtc_vblank_off(&kms->crtc.base);
drm->irq_enabled = false;
drm_mode_config_cleanup(drm);
destroy_workqueue(kms->commit_wq);
dcss_crtc_deinit(&kms->crtc, drm);
if (componentized)
component_unbind_all(dcss->dev, drm);
drm->dev_private = NULL;
drm_dev_put(drm);
}

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef _DCSS_KMS_H_
#define _DCSS_KMS_H_
#include <drm/drm_encoder.h>
struct dcss_plane {
struct drm_plane base;
uint64_t dtrc_table_ofs_val;
struct drm_property *dtrc_table_ofs_prop;
int ch_num;
enum drm_plane_type type;
bool use_dtrc;
};
struct dcss_crtc {
struct drm_crtc base;
struct drm_crtc_state *state;
struct dcss_plane *plane[3];
int irq;
bool irq_enabled;
struct completion en_completion;
struct completion dis_completion;
bool output_is_yuv;
enum dcss_hdr10_nonlinearity opipe_nl;
enum dcss_hdr10_gamut opipe_g;
enum dcss_hdr10_pixel_range opipe_pr;
};
struct commit {
wait_queue_head_t wait;
bool pending;
};
struct dcss_kms_dev {
struct drm_device base;
struct dcss_crtc crtc;
struct drm_encoder encoder;
struct workqueue_struct *commit_wq;
struct commit commit;
};
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss, bool componentized);
void dcss_kms_detach(struct dcss_kms_dev *kms, bool componentized);
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos);
void dcss_crtc_attach_color_mgmt_properties(struct dcss_crtc *crtc);
#endif

View File

@ -0,0 +1,673 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <linux/dma-buf.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static const u32 dcss_graphics_formats[] = {
/* RGB */
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_RGBX1010102,
DRM_FORMAT_BGRX1010102,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_BGRA1010102,
};
static const u32 dcss_video_formats[] = {
/* YUV444 */
DRM_FORMAT_AYUV,
/* YUV422 */
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
/* YUV420 */
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV12_10LE40,
};
static const u64 dcss_video_format_modifiers[] = {
DRM_FORMAT_MOD_VSI_G1_TILED,
DRM_FORMAT_MOD_VSI_G2_TILED,
DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const u64 dcss_graphics_format_modifiers[] = {
DRM_FORMAT_MOD_VIVANTE_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p)
{
return container_of(p, struct dcss_plane, base);
}
static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb)
{
return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) ||
((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 &&
fb->modifier == DRM_FORMAT_MOD_LINEAR);
}
static void dcss_plane_destroy(struct drm_plane *plane)
{
struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane,
base);
drm_plane_cleanup(plane);
kfree(dcss_plane);
}
static int dcss_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
if (property == dcss_plane->dtrc_table_ofs_prop)
dcss_plane->dtrc_table_ofs_val = val;
else
return -EINVAL;
return 0;
}
static int dcss_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
if (property == dcss_plane->dtrc_table_ofs_prop)
*val = dcss_plane->dtrc_table_ofs_val;
else
return -EINVAL;
return 0;
}
static bool dcss_plane_format_mod_supported(struct drm_plane *plane,
u32 format,
u64 modifier)
{
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
switch (format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB2101010:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC;
default:
return modifier == DRM_FORMAT_MOD_LINEAR;
}
break;
case DRM_PLANE_TYPE_OVERLAY:
switch (format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12_10LE40:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VSI_G1_TILED ||
modifier == DRM_FORMAT_MOD_VSI_G2_TILED ||
modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED;
default:
return modifier == DRM_FORMAT_MOD_LINEAR;
}
break;
default:
return false;
}
return false;
}
static const struct drm_plane_funcs dcss_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = dcss_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = dcss_plane_atomic_set_property,
.atomic_get_property = dcss_plane_atomic_get_property,
.format_mod_supported = dcss_plane_format_mod_supported,
};
static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
bool linear_format = !mod_present ||
(mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
(format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21))
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
format->format == DRM_FORMAT_NV12_10LE40)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y;
return !!(rotation & supported_rotation);
}
static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
{
if (src_w < 64 &&
(pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21 ||
pix_fmt == DRM_FORMAT_NV12_10LE40))
return false;
else if (src_w < 32 &&
(pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY ||
pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU))
return false;
return src_w >= 16 && src_h >= 8;
}
static inline bool dcss_plane_use_dtrc(struct drm_framebuffer *fb,
enum drm_plane_type type)
{
u64 pix_format = fb->format->format;
return !dcss_plane_fb_is_linear(fb) &&
type == DRM_PLANE_TYPE_OVERLAY &&
(pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40);
}
static int dcss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
struct drm_gem_cma_object *cma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
int ret;
if (!fb || !state->crtc)
return 0;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
WARN_ON(!cma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
if (!dcss_plane_is_source_size_allowed(state->src_w >> 16,
state->src_h >> 16,
fb->format->format)) {
DRM_DEBUG_KMS("Source plane size is not allowed!\n");
return -EINVAL;
}
dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
&min, &max);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min, max, !is_primary_plane,
false);
if (ret)
return ret;
if (!state->visible)
return 0;
if (!dcss_plane_can_rotate(fb->format,
!!(fb->flags & DRM_MODE_FB_MODIFIERS),
fb->modifier,
state->rotation)) {
DRM_DEBUG_KMS("requested rotation is not allowed!\n");
return -EINVAL;
}
if ((fb->flags & DRM_MODE_FB_MODIFIERS) &&
!plane->funcs->format_mod_supported(plane,
fb->format->format,
fb->modifier)) {
DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier);
return -EINVAL;
}
dcss_plane->use_dtrc = dcss_plane_use_dtrc(fb, plane->type);
return 0;
}
static struct drm_gem_object *dcss_plane_gem_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct drm_gem_object *obj;
if (IS_ERR(dma_buf))
return ERR_CAST(dma_buf);
mutex_lock(&dev->object_name_lock);
obj = dev->driver->gem_prime_import(dev, dma_buf);
mutex_unlock(&dev->object_name_lock);
return obj;
}
static void dcss_plane_set_primary_base(struct dcss_plane *dcss_plane,
u32 baddr)
{
struct drm_plane *plane = &dcss_plane->base;
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf *dma_buf = cma_obj->base.dma_buf;
struct drm_gem_object *gem_obj;
dma_addr_t caddr;
bool compressed = true;
u32 compressed_format = _VIV_CFMT_ARGB8;
_VIV_VIDMEM_METADATA *mdata;
if (dcss_plane_fb_is_linear(fb) ||
((fb->flags & DRM_MODE_FB_MODIFIERS) &&
(fb->modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
fb->modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))) {
dcss_dec400d_bypass(dcss->dec400d);
return;
}
if (!dma_buf) {
caddr = cma_obj->paddr + ALIGN(fb->height, 64) * fb->pitches[0];
} else {
mdata = dma_buf->priv;
if (!mdata || mdata->magic != VIV_VIDMEM_METADATA_MAGIC)
return;
gem_obj = dcss_plane_gem_import(plane->dev, mdata->ts_dma_buf);
if (IS_ERR(gem_obj))
return;
caddr = to_drm_gem_cma_obj(gem_obj)->paddr;
/* release gem_obj */
drm_gem_object_put_unlocked(gem_obj);
dcss_dec400d_fast_clear_config(dcss->dec400d, mdata->fc_value,
mdata->fc_enabled);
compressed = !!mdata->compressed;
compressed_format = mdata->compress_format;
}
dcss_dec400d_read_config(dcss->dec400d, 0, compressed,
compressed_format);
dcss_dec400d_addr_set(dcss->dec400d, baddr, caddr);
}
static void dcss_plane_set_dtrc_base(struct dcss_plane *dcss_plane,
u32 p1_ba, u32 p2_ba)
{
struct drm_plane *plane = &dcss_plane->base;
struct dcss_dev *dcss = plane->dev->dev_private;
if (!dcss_plane->use_dtrc) {
dcss_dtrc_bypass(dcss->dtrc, dcss_plane->ch_num);
return;
}
dcss_dtrc_addr_set(dcss->dtrc, dcss_plane->ch_num,
p1_ba, p2_ba, dcss_plane->dtrc_table_ofs_val);
}
static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
{
struct drm_plane *plane = &dcss_plane->base;
struct drm_plane_state *state = plane->state;
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
u16 x1, y1;
x1 = state->src.x1 >> 16;
y1 = state->src.y1 >> 16;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
format->char_per_block[0] * x1;
else if (format->format == DRM_FORMAT_NV12_10LE40)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
format->char_per_block[0] * (x1 >> 2);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
2 * format->char_per_block[0] * (x1 >> 1);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p2_ba = cma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (y1 >> 1) +
(x1 >> 1)) << 1);
else if (format->format == DRM_FORMAT_NV12_10LE40)
p2_ba = cma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (y1 >> 1)) << 1) +
format->char_per_block[1] * (x1 >> 2);
dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba,
fb->pitches[0]);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_plane_set_primary_base(dcss_plane, p1_ba);
else
dcss_plane_set_dtrc_base(dcss_plane,
cma_obj->paddr + fb->offsets[0],
cma_obj->paddr + fb->offsets[1]);
}
static bool dcss_plane_needs_setup(struct drm_plane_state *state,
struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
return state->crtc_x != old_state->crtc_x ||
state->crtc_y != old_state->crtc_y ||
state->crtc_w != old_state->crtc_w ||
state->crtc_h != old_state->crtc_h ||
state->src_x != old_state->src_x ||
state->src_y != old_state->src_y ||
state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
state->rotation != old_state->rotation;
}
static void dcss_plane_setup_hdr10_pipes(struct drm_plane *plane)
{
struct dcss_dev *dcss = plane->dev->dev_private;
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct drm_plane_state *state = plane->state;
struct drm_crtc *crtc = state->crtc;
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct drm_framebuffer *fb = state->fb;
struct dcss_hdr10_pipe_cfg ipipe_cfg, opipe_cfg;
opipe_cfg.is_yuv = dcss_crtc->output_is_yuv;
opipe_cfg.g = dcss_crtc->opipe_g;
opipe_cfg.nl = dcss_crtc->opipe_nl;
opipe_cfg.pr = dcss_crtc->opipe_pr;
ipipe_cfg.is_yuv = fb->format->is_yuv;
if (!fb->format->is_yuv) {
ipipe_cfg.nl = NL_SRGB;
ipipe_cfg.pr = PR_FULL;
ipipe_cfg.g = G_ADOBE_ARGB;
goto setup;
}
switch (state->color_encoding) {
case DRM_COLOR_YCBCR_BT709:
ipipe_cfg.nl = NL_REC709;
ipipe_cfg.g = G_REC709;
break;
case DRM_COLOR_YCBCR_BT2020:
ipipe_cfg.nl = NL_REC2084;
ipipe_cfg.g = G_REC2020;
break;
default:
ipipe_cfg.nl = NL_REC709;
ipipe_cfg.g = G_REC601_PAL;
break;
}
ipipe_cfg.pr = state->color_range;
setup:
dcss_hdr10_setup(dcss->hdr10, dcss_plane->ch_num,
&ipipe_cfg, &opipe_cfg);
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
if (!fb || !state->crtc || !state->visible)
return;
pixel_format = fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
!dcss_plane_needs_setup(state, old_state) &&
!dcss_dtg_global_alpha_changed(dcss->dtg, dcss_plane->ch_num,
state->alpha >> 8)) {
dcss_plane_atomic_set_base(dcss_plane);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_dec400d_shadow_trig(dcss->dec400d);
return;
}
src = plane->state->src;
dst = plane->state->dst;
/*
* The width and height after clipping.
*/
src_w = drm_rect_width(&src) >> 16;
src_h = drm_rect_height(&src) >> 16;
dst_w = drm_rect_width(&dst);
dst_h = drm_rect_height(&dst);
dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format,
modifiers_present ? fb->modifier :
DRM_FORMAT_MOD_LINEAR);
if (dcss_plane->use_dtrc) {
u32 dtrc_w, dtrc_h;
dcss_dtrc_set_res(dcss->dtrc, dcss_plane->ch_num, state,
&dtrc_w, &dtrc_h);
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, dtrc_w, dtrc_h);
} else {
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
}
dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
state->rotation);
dcss_plane_atomic_set_base(dcss_plane);
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
state->fb->format, src_w, src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
dcss_plane_setup_hdr10_pipes(plane);
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
dst.x1, dst.y1, dst_w, dst_h);
dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
fb->format, state->alpha >> 8);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_dec400d_enable(dcss->dec400d);
else if (dcss_plane->use_dtrc)
dcss_dtrc_enable(dcss->dtrc, dcss_plane->ch_num, true);
if (!dcss_plane->ch_num && (state->alpha >> 8) == 0)
enable = false;
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable);
if (!enable)
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable);
}
static void dcss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
if (dcss_plane->use_dtrc)
dcss_dtrc_enable(dcss->dtrc, dcss_plane->ch_num, false);
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false);
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false);
}
static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = dcss_plane_atomic_check,
.atomic_update = dcss_plane_atomic_update,
.atomic_disable = dcss_plane_atomic_disable,
};
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos)
{
struct dcss_plane *dcss_plane;
const u64 *format_modifiers = dcss_video_format_modifiers;
const u32 *formats = dcss_video_formats;
u32 formats_size = ARRAY_SIZE(dcss_video_formats);
struct drm_property *prop;
int ret;
if (zpos > 2)
return ERR_PTR(-EINVAL);
dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL);
if (!dcss_plane) {
DRM_ERROR("failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
}
if (type == DRM_PLANE_TYPE_PRIMARY) {
formats = dcss_graphics_formats;
formats_size = ARRAY_SIZE(dcss_graphics_formats);
format_modifiers = dcss_graphics_format_modifiers;
}
ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs,
&dcss_plane_funcs, formats,
formats_size,
format_modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(dcss_plane);
return ERR_PTR(ret);
}
drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs);
ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos);
if (ret)
return ERR_PTR(ret);
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
dcss_plane->ch_num = 2 - zpos;
dcss_plane->type = type;
if (type == DRM_PLANE_TYPE_PRIMARY)
return dcss_plane;
prop = drm_property_create_range(drm, 0, "dtrc_table_ofs",
0, ULLONG_MAX);
if (!prop) {
DRM_ERROR("cannot create dtrc_table_ofs property\n");
return ERR_PTR(-ENOMEM);
}
dcss_plane->dtrc_table_ofs_prop = prop;
drm_object_attach_property(&dcss_plane->base.base, prop, 0);
return dcss_plane;
}

View File

@ -0,0 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include "dcss-dev.h"
#define DCSS_RDSRC_CTRL_STATUS 0x00
#define RDSRC_RD_ERR BIT(31)
#define RDSRC_FRAME_COMP BIT(30)
#define RDSRC_FIFO_SIZE_POS 16
#define RDSRC_FIFO_SIZE_MASK GENMASK(22, 16)
#define RDSRC_RD_ERR_EN BIT(15)
#define RDSRC_FRAME_COMP_EN BIT(14)
#define RDSRC_P_SIZE_POS 7
#define RDSRC_P_SIZE_MASK GENMASK(9, 7)
#define RDSRC_T_SIZE_POS 5
#define RDSRC_T_SIZE_MASK GENMASK(6, 5)
#define RDSRC_BPP_POS 2
#define RDSRC_BPP_MASK GENMASK(4, 2)
#define RDSRC_ENABLE BIT(0)
#define DCSS_RDSRC_BASE_ADDR 0x10
#define DCSS_RDSRC_PITCH 0x14
#define DCSS_RDSRC_WIDTH 0x18
#define DCSS_RDSRC_HEIGHT 0x1C
struct dcss_rdsrc {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
u32 buf_addr;
u32 ctrl_status;
};
static void dcss_rdsrc_write(struct dcss_rdsrc *rdsrc, u32 val, u32 ofs)
{
dcss_ctxld_write(rdsrc->ctxld, rdsrc->ctx_id, val,
rdsrc->base_ofs + ofs);
}
int dcss_rdsrc_init(struct dcss_dev *dcss, unsigned long rdsrc_base)
{
struct dcss_rdsrc *rdsrc;
rdsrc = devm_kzalloc(dcss->dev, sizeof(*rdsrc), GFP_KERNEL);
if (!rdsrc)
return -ENOMEM;
rdsrc->base_reg = devm_ioremap(dcss->dev, rdsrc_base, SZ_4K);
if (!rdsrc->base_reg) {
dev_err(dcss->dev, "rdsrc: unable to remap base\n");
devm_kfree(dcss->dev, rdsrc);
return -ENOMEM;
}
dcss->rdsrc = rdsrc;
rdsrc->dev = dcss->dev;
rdsrc->base_ofs = rdsrc_base;
rdsrc->ctxld = dcss->ctxld;
rdsrc->ctx_id = CTX_SB_HP;
return 0;
}
void dcss_rdsrc_exit(struct dcss_rdsrc *rdsrc)
{
devm_iounmap(rdsrc->dev, rdsrc->base_reg);
devm_kfree(rdsrc->dev, rdsrc);
}
void dcss_rdsrc_setup(struct dcss_rdsrc *rdsrc, u32 pix_format, u32 dst_xres,
u32 dst_yres, u32 base_addr)
{
u32 buf_size, pitch, bpp;
/* since the scaler output is YUV444, the RDSRC output has to match */
bpp = 4;
rdsrc->ctrl_status = FIFO_512 << RDSRC_FIFO_SIZE_POS;
rdsrc->ctrl_status |= PSIZE_256 << RDSRC_P_SIZE_POS;
rdsrc->ctrl_status |= TSIZE_256 << RDSRC_T_SIZE_POS;
rdsrc->ctrl_status |= BPP_32_10BIT_OUTPUT << RDSRC_BPP_POS;
buf_size = dst_xres * dst_yres * bpp;
pitch = dst_xres * bpp;
rdsrc->buf_addr = base_addr;
dcss_rdsrc_write(rdsrc, rdsrc->buf_addr, DCSS_RDSRC_BASE_ADDR);
dcss_rdsrc_write(rdsrc, pitch, DCSS_RDSRC_PITCH);
dcss_rdsrc_write(rdsrc, dst_xres, DCSS_RDSRC_WIDTH);
dcss_rdsrc_write(rdsrc, dst_yres, DCSS_RDSRC_HEIGHT);
}
void dcss_rdsrc_enable(struct dcss_rdsrc *rdsrc)
{
dcss_rdsrc_write(rdsrc, rdsrc->ctrl_status, DCSS_RDSRC_CTRL_STATUS);
}
void dcss_rdsrc_disable(struct dcss_rdsrc *rdsrc)
{
/* RDSRC is turned off by setting the width and height to 0 */
dcss_rdsrc_write(rdsrc, 0, DCSS_RDSRC_WIDTH);
dcss_rdsrc_write(rdsrc, 0, DCSS_RDSRC_HEIGHT);
dcss_rdsrc_write(rdsrc, rdsrc->ctrl_status, DCSS_RDSRC_CTRL_STATUS);
}

View File

@ -0,0 +1,911 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*
* Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com>
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_SCALER_CTRL 0x00
#define SCALER_EN BIT(0)
#define REPEAT_EN BIT(4)
#define SCALE2MEM_EN BIT(8)
#define MEM2OFIFO_EN BIT(12)
#define DCSS_SCALER_OFIFO_CTRL 0x04
#define OFIFO_LOW_THRES_POS 0
#define OFIFO_LOW_THRES_MASK GENMASK(9, 0)
#define OFIFO_HIGH_THRES_POS 16
#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16)
#define UNDERRUN_DETECT_CLR BIT(26)
#define LOW_THRES_DETECT_CLR BIT(27)
#define HIGH_THRES_DETECT_CLR BIT(28)
#define UNDERRUN_DETECT_EN BIT(29)
#define LOW_THRES_DETECT_EN BIT(30)
#define HIGH_THRES_DETECT_EN BIT(31)
#define DCSS_SCALER_SDATA_CTRL 0x08
#define YUV_EN BIT(0)
#define RTRAM_8LINES BIT(1)
#define Y_UV_BYTE_SWAP BIT(4)
#define A2R10G10B10_FORMAT_POS 8
#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8)
#define DCSS_SCALER_BIT_DEPTH 0x0C
#define LUM_BIT_DEPTH_POS 0
#define LUM_BIT_DEPTH_MASK GENMASK(1, 0)
#define CHR_BIT_DEPTH_POS 4
#define CHR_BIT_DEPTH_MASK GENMASK(5, 4)
#define DCSS_SCALER_SRC_FORMAT 0x10
#define DCSS_SCALER_DST_FORMAT 0x14
#define FORMAT_MASK GENMASK(1, 0)
#define DCSS_SCALER_SRC_LUM_RES 0x18
#define DCSS_SCALER_SRC_CHR_RES 0x1C
#define DCSS_SCALER_DST_LUM_RES 0x20
#define DCSS_SCALER_DST_CHR_RES 0x24
#define WIDTH_POS 0
#define WIDTH_MASK GENMASK(11, 0)
#define HEIGHT_POS 16
#define HEIGHT_MASK GENMASK(27, 16)
#define DCSS_SCALER_V_LUM_START 0x48
#define V_START_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_LUM_INC 0x4C
#define V_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_H_LUM_START 0x50
#define H_START_MASK GENMASK(18, 0)
#define DCSS_SCALER_H_LUM_INC 0x54
#define H_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_CHR_START 0x58
#define DCSS_SCALER_V_CHR_INC 0x5C
#define DCSS_SCALER_H_CHR_START 0x60
#define DCSS_SCALER_H_CHR_INC 0x64
#define DCSS_SCALER_COEF_VLUM 0x80
#define DCSS_SCALER_COEF_HLUM 0x140
#define DCSS_SCALER_COEF_VCHR 0x200
#define DCSS_SCALER_COEF_HCHR 0x300
struct dcss_scaler_ch {
void __iomem *base_reg;
u32 base_ofs;
struct dcss_scaler *scl;
u32 sdata_ctrl;
u32 scaler_ctrl;
bool scaler_ctrl_chgd;
u32 c_vstart;
u32 c_hstart;
int ch_num;
};
struct dcss_scaler {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_scaler_ch ch[3];
struct dcss_wrscl *wrscl;
struct dcss_rdsrc *rdsrc;
int ch_using_wrscl;
};
/* scaler coefficients generator */
#define PSC_FRAC_BITS 30
#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS)
#define PSC_BITS_FOR_PHASE 4
#define PSC_NUM_PHASES 16
#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1)
#define PSC_NUM_TAPS 7
#define PSC_NUM_TAPS_RGBA 5
#define PSC_COEFF_PRECISION 10
#define PSC_PHASE_FRACTION_BITS 13
#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1)
#define PSC_Q_FRACTION 19
#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1))
/**
* mult_q() - Performs fixed-point multiplication.
* @A: multiplier
* @B: multiplicand
*/
static int mult_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A * (int64_t)B;
temp += PSC_Q_ROUND_OFFSET;
result = (int)(temp >> PSC_Q_FRACTION);
return result;
}
/**
* div_q() - Performs fixed-point division.
* @A: dividend
* @B: divisor
*/
static int div_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A << PSC_Q_FRACTION;
if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0))
temp += B / 2;
else
temp -= B / 2;
result = (int)(temp / B);
return result;
}
/**
* exp_approx_q() - Compute approximation to exp(x) function using Taylor
* series.
* @x: fixed-point argument of exp function
*/
static int exp_approx_q(int x)
{
int sum = 1 << PSC_Q_FRACTION;
int term = 1 << PSC_Q_FRACTION;
term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION));
sum += term;
return sum;
}
/**
* dcss_scaler_gaussian_filter() - Generate gaussian prototype filter.
* @fc_q: fixed-point cutoff frequency normalized to range [0, 1]
* @use_5_taps: indicates whether to use 5 taps or 7 taps
* @coef: output filter coefficients
*/
static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int sigma_q, g0_q, g1_q, g2_q;
int tap_cnt1, tap_cnt2, tap_idx, phase_cnt;
int mid;
int phase;
int i;
int taps;
if (use_5_taps)
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
coef[phase][0] = 0;
coef[phase][PSC_NUM_TAPS - 1] = 0;
}
/* seed coefficient scanner */
taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS;
mid = (PSC_NUM_PHASES * taps) / 2 - 1;
phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2;
tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
/* seed gaussian filter generator */
sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q);
g0_q = 1 << PSC_Q_FRACTION;
g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET,
mult_q(sigma_q, sigma_q)));
g2_q = mult_q(g1_q, g1_q);
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q;
for (i = 0; i < mid; i++) {
phase_cnt++;
tap_cnt1--;
tap_cnt2++;
g0_q = mult_q(g0_q, g1_q);
g1_q = mult_q(g1_q, g2_q);
if ((phase_cnt & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE;
coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q;
}
if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE;
coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q;
}
}
phase_cnt++;
tap_cnt1--;
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0;
/* override phase 0 with identity filter if specified */
if (phase0_identity)
for (i = 0; i < PSC_NUM_TAPS; i++)
coef[0][i] = i == (PSC_NUM_TAPS >> 1) ?
(1 << PSC_COEFF_PRECISION) : 0;
/* normalize coef */
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
int sum = 0;
s64 ll_temp;
for (i = 0; i < PSC_NUM_TAPS; i++)
sum += coef[phase][i];
for (i = 0; i < PSC_NUM_TAPS; i++) {
ll_temp = coef[phase][i];
ll_temp <<= PSC_COEFF_PRECISION;
ll_temp += sum >> 1;
ll_temp /= sum;
coef[phase][i] = (int)ll_temp;
}
}
}
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
* @src_length: length of input
* @dst_length: length of output
* @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps
* @coef: output coefficients
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int fc_q;
/* compute cutoff frequency */
if (dst_length >= src_length)
fc_q = div_q(1, PSC_NUM_PHASES);
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
/* compute gaussian filter coefficients */
dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
{
struct dcss_scaler *scl = ch->scl;
dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
unsigned long scaler_base)
{
struct dcss_scaler_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &scl->ch[i];
ch->base_ofs = scaler_base + i * 0x400;
ch->base_reg = devm_ioremap(scl->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(scl->dev, "scaler: unable to remap ch base\n");
return -ENOMEM;
}
ch->scl = scl;
ch->ch_num = i;
}
return 0;
}
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
{
struct dcss_scaler *scaler;
scaler = devm_kzalloc(dcss->dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
dcss->scaler = scaler;
scaler->dev = dcss->dev;
scaler->ctxld = dcss->ctxld;
scaler->ctx_id = CTX_SB_HP;
scaler->wrscl = dcss->wrscl;
scaler->rdsrc = dcss->rdsrc;
scaler->ch_using_wrscl = -1;
if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
int i;
for (i = 0; i < 3; i++) {
if (scaler->ch[i].base_reg)
devm_iounmap(scaler->dev,
scaler->ch[i].base_reg);
}
devm_kfree(scaler->dev, scaler);
return -ENOMEM;
}
return 0;
}
void dcss_scaler_exit(struct dcss_scaler *scl)
{
int ch_no;
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_scaler_ch *ch = &scl->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
if (ch->base_reg)
devm_iounmap(scl->dev, ch->base_reg);
}
devm_kfree(scl->dev, scl);
}
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
u32 scaler_ctrl;
if (scl->ch_using_wrscl == ch_num) {
if (en) {
scaler_ctrl = SCALE2MEM_EN | MEM2OFIFO_EN | REPEAT_EN;
} else {
dcss_wrscl_disable(scl->wrscl);
dcss_rdsrc_disable(scl->rdsrc);
scl->ch_using_wrscl = -1;
scaler_ctrl = 0;
}
} else {
scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0;
}
if (en)
dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL);
if (ch->scaler_ctrl != scaler_ctrl)
ch->scaler_ctrl_chgd = true;
ch->scaler_ctrl = scaler_ctrl;
}
static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~YUV_EN;
ch->sdata_ctrl |= en ? YUV_EN : 0;
}
static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~RTRAM_8LINES;
ch->sdata_ctrl |= en ? RTRAM_8LINES : 0;
}
static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth)
{
u32 val;
val = depth == 30 ? 2 : 0;
dcss_scaler_write(ch,
((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) |
((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK),
DCSS_SCALER_BIT_DEPTH);
}
enum buffer_format {
BUF_FMT_YUV420,
BUF_FMT_YUV422,
BUF_FMT_ARGB8888_YUV444,
};
enum chroma_location {
PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1,
PSC_LOC_HORZ_0_VERT_0 = 2,
PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3,
PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5
};
static void dcss_scaler_format_set(struct dcss_scaler_ch *ch,
enum buffer_format src_fmt,
enum buffer_format dst_fmt)
{
dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT);
dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT);
}
static void dcss_scaler_res_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 pix_format, enum buffer_format dst_format)
{
u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres;
u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres;
bool src_is_444 = true;
lsrc_xres = src_xres;
csrc_xres = src_xres;
lsrc_yres = src_yres;
csrc_yres = src_yres;
ldst_xres = dst_xres;
cdst_xres = dst_xres;
ldst_yres = dst_yres;
cdst_yres = dst_yres;
if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) {
csrc_xres >>= 1;
src_is_444 = false;
} else if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40) {
csrc_xres >>= 1;
csrc_yres >>= 1;
src_is_444 = false;
}
if (dst_format == BUF_FMT_YUV422)
cdst_xres >>= 1;
/* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */
if (src_is_444 && dst_format == BUF_FMT_YUV422) {
lsrc_yres--;
csrc_yres--;
}
dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_LUM_RES);
dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_CHR_RES);
dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_LUM_RES);
dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_CHR_RES);
}
#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos))
#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor))
struct dcss_scaler_factors {
int downscale;
int upscale;
};
static const struct dcss_scaler_factors dcss_scaler_factors[] = {
{3, 8}, {5, 8}, {5, 8},
};
static const struct dcss_scaler_factors dcss_scaler_wrscl_factors[] = {
{5, 8}, {7, 8}, {7, 8},
};
static bool dcss_scaler_fractions_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 src_format, u32 dst_format,
enum chroma_location src_chroma_loc)
{
int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres;
u32 l_vinc, l_hinc, c_vinc, c_hinc;
u32 c_vstart, c_hstart;
u8 upscale_factor, downscale_factor;
src_c_xres = src_xres;
src_c_yres = src_yres;
dst_c_xres = dst_xres;
dst_c_yres = dst_yres;
c_vstart = 0;
c_hstart = 0;
/* adjustments for source chroma location */
if (src_format == BUF_FMT_YUV420) {
/* vertical input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_0_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
/*
* move chroma up to first luma line
* (1/4 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
case PSC_LOC_HORZ_0_VERT_1_OVER_2:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/*
* move chroma up to first luma line
* (1/2 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1));
break;
default:
break;
}
/* horizontal input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_0:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/* move chroma left 1/4 chroma input sample spacing */
c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
default:
break;
}
}
/* adjustments to chroma resolution */
if (src_format == BUF_FMT_YUV420) {
src_c_xres >>= 1;
src_c_yres >>= 1;
} else if (src_format == BUF_FMT_YUV422) {
src_c_xres >>= 1;
}
if (dst_format == BUF_FMT_YUV422)
dst_c_xres >>= 1;
l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres;
c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres;
l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres;
c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres;
/* save chroma start phase */
ch->c_vstart = c_vstart;
ch->c_hstart = c_hstart;
dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START);
dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC);
dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START);
dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC);
dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START);
dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC);
dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START);
dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC);
downscale_factor = dcss_scaler_factors[ch->ch_num].downscale;
upscale_factor = dcss_scaler_factors[ch->ch_num].upscale;
/* return if WR_SCL/RD_SRC is needed to scale */
return l_vinc > downscale_fp(downscale_factor, 13) ||
l_vinc < upscale_fp(upscale_factor, 13) ||
l_hinc > downscale_fp(downscale_factor, 13) ||
l_hinc < upscale_fp(upscale_factor, 13);
}
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max)
{
const struct dcss_scaler_factors *factors_map = dcss_scaler_factors;
if (scl->ch_using_wrscl == -1 || scl->ch_using_wrscl == ch_num)
factors_map = dcss_scaler_wrscl_factors;
*min = upscale_fp(factors_map[ch_num].upscale, 16);
*max = downscale_fp(factors_map[ch_num].downscale, 16);
return 0;
}
static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 |
(coef[i][2] & 0xfff) << 4 |
(coef[i][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 |
(coef[i][4] & 0xfff) << 8 |
(coef[i][5] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 |
(coef[phase][4] & 0xfff) << 4 |
(coef[phase][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 |
(coef[phase][2] & 0xfff) << 8 |
(coef[phase][1] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 |
(coef[i][1] & 0xfff) << 4 |
(coef[i][2] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 |
(coef[i][3] & 0xfff) << 8 |
(coef[i][4] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 |
(coef[i][5] & 0xfff) << 12 |
(coef[i][6] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 |
(coef[phase][5] & 0xfff) << 4 |
(coef[phase][4] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 |
(coef[phase][3] & 0xfff) << 8 |
(coef[phase][2] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 |
(coef[phase][1] & 0xfff) << 12 |
(coef[phase][0] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
enum buffer_format src_format,
enum buffer_format dst_format,
bool use_5_taps,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
bool program_5_taps = use_5_taps ||
(dst_format == BUF_FMT_YUV422 &&
src_format == BUF_FMT_ARGB8888_YUV444);
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
src_yres == dst_yres, coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
/* adjust chroma resolution */
if (src_format != BUF_FMT_ARGB8888_YUV444)
src_xres >>= 1;
if (src_format == BUF_FMT_YUV420)
src_yres >>= 1;
if (dst_format != BUF_FMT_ARGB8888_YUV444)
dst_xres >>= 1;
if (dst_format == BUF_FMT_YUV420) /* should not happen */
dst_yres >>= 1;
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
}
static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
src_yres == dst_yres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
const struct drm_format_info *format)
{
u32 a2r10g10b10_format;
if (format->is_yuv)
return;
ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK;
if (format->depth != 30)
return;
switch (format->format) {
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XRGB2101010:
a2r10g10b10_format = 0;
break;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR2101010:
a2r10g10b10_format = 5;
break;
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_RGBX1010102:
a2r10g10b10_format = 6;
break;
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_BGRX1010102:
a2r10g10b10_format = 11;
break;
default:
a2r10g10b10_format = 0;
break;
}
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
static void dcss_scaler_setup_path(struct dcss_scaler_ch *ch,
u32 pix_format, int dst_xres,
int dst_yres, u32 vrefresh_hz,
bool wrscl_needed)
{
struct dcss_scaler *scl = ch->scl;
u32 base_addr;
/* nothing to do if WRSCL path is needed but it's already used */
if (wrscl_needed && scl->ch_using_wrscl != -1 &&
scl->ch_using_wrscl != ch->ch_num)
return;
if (!wrscl_needed) {
/* Channel has finished using WRSCL. Release WRSCL/RDSRC. */
if (scl->ch_using_wrscl == ch->ch_num) {
dcss_wrscl_disable(scl->wrscl);
dcss_rdsrc_disable(scl->rdsrc);
scl->ch_using_wrscl = -1;
}
return;
}
base_addr = dcss_wrscl_setup(scl->wrscl, pix_format, vrefresh_hz,
dst_xres, dst_yres);
dcss_rdsrc_setup(scl->rdsrc, pix_format, dst_xres, dst_yres,
base_addr);
dcss_wrscl_enable(scl->wrscl);
dcss_rdsrc_enable(scl->rdsrc);
scl->ch_using_wrscl = ch->ch_num;
}
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
unsigned int pixel_depth = 0;
bool rtr_8line_en = false;
bool use_5_taps = false;
enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444;
enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444;
u32 pix_format = format->format;
bool use_wrscl;
if (format->is_yuv) {
dcss_scaler_yuv_enable(ch, true);
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40) {
rtr_8line_en = true;
src_format = BUF_FMT_YUV420;
} else if (pix_format == DRM_FORMAT_UYVY ||
pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV ||
pix_format == DRM_FORMAT_YVYU) {
src_format = BUF_FMT_YUV422;
}
use_5_taps = !rtr_8line_en;
if (pix_format == DRM_FORMAT_NV12_10LE40)
pixel_depth = 30;
} else {
dcss_scaler_yuv_enable(ch, false);
pixel_depth = format->depth;
}
use_wrscl = dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres,
dst_yres, src_format, dst_format,
PSC_LOC_HORZ_0_VERT_1_OVER_4);
if (format->is_yuv)
dcss_scaler_yuv_coef_set(ch, src_format, dst_format,
use_5_taps, src_xres, src_yres,
dst_xres, dst_yres);
else
dcss_scaler_rgb_coef_set(ch, src_xres, src_yres,
dst_xres, dst_yres);
dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en);
dcss_scaler_bit_depth_set(ch, pixel_depth);
dcss_scaler_set_rgb10_order(ch, format);
dcss_scaler_format_set(ch, src_format, dst_format);
dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres,
pix_format, dst_format);
dcss_scaler_setup_path(ch, pix_format, dst_xres,
dst_yres, vrefresh_hz, use_wrscl);
}
/* This function will be called from interrupt context. */
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl)
{
int chnum;
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_scaler_ch *ch = &scl->ch[chnum];
if (ch->scaler_ctrl_chgd) {
dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id,
ch->scaler_ctrl,
ch->base_ofs +
DCSS_SCALER_CTRL);
ch->scaler_ctrl_chgd = false;
}
}
}

View File

@ -0,0 +1,179 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_SS_SYS_CTRL 0x00
#define RUN_EN BIT(0)
#define DCSS_SS_DISPLAY 0x10
#define LRC_X_POS 0
#define LRC_X_MASK GENMASK(12, 0)
#define LRC_Y_POS 16
#define LRC_Y_MASK GENMASK(28, 16)
#define DCSS_SS_HSYNC 0x20
#define DCSS_SS_VSYNC 0x30
#define SYNC_START_POS 0
#define SYNC_START_MASK GENMASK(12, 0)
#define SYNC_END_POS 16
#define SYNC_END_MASK GENMASK(28, 16)
#define SYNC_POL BIT(31)
#define DCSS_SS_DE_ULC 0x40
#define ULC_X_POS 0
#define ULC_X_MASK GENMASK(12, 0)
#define ULC_Y_POS 16
#define ULC_Y_MASK GENMASK(28, 16)
#define ULC_POL BIT(31)
#define DCSS_SS_DE_LRC 0x50
#define DCSS_SS_MODE 0x60
#define PIPE_MODE_POS 0
#define PIPE_MODE_MASK GENMASK(1, 0)
#define DCSS_SS_COEFF 0x70
#define HORIZ_A_POS 0
#define HORIZ_A_MASK GENMASK(3, 0)
#define HORIZ_B_POS 4
#define HORIZ_B_MASK GENMASK(7, 4)
#define HORIZ_C_POS 8
#define HORIZ_C_MASK GENMASK(11, 8)
#define HORIZ_H_NORM_POS 12
#define HORIZ_H_NORM_MASK GENMASK(14, 12)
#define VERT_A_POS 16
#define VERT_A_MASK GENMASK(19, 16)
#define VERT_B_POS 20
#define VERT_B_MASK GENMASK(23, 20)
#define VERT_C_POS 24
#define VERT_C_MASK GENMASK(27, 24)
#define VERT_H_NORM_POS 28
#define VERT_H_NORM_MASK GENMASK(30, 28)
#define DCSS_SS_CLIP_CB 0x80
#define DCSS_SS_CLIP_CR 0x90
#define CLIP_MIN_POS 0
#define CLIP_MIN_MASK GENMASK(9, 0)
#define CLIP_MAX_POS 0
#define CLIP_MAX_MASK GENMASK(23, 16)
#define DCSS_SS_INTER_MODE 0xA0
#define INT_EN BIT(0)
#define VSYNC_SHIFT BIT(1)
struct dcss_ss {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
bool in_use;
};
static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs)
{
if (!ss->in_use)
dcss_writel(val, ss->base_reg + ofs);
dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
ss->base_ofs + ofs);
}
int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
{
struct dcss_ss *ss;
ss = devm_kzalloc(dcss->dev, sizeof(*ss), GFP_KERNEL);
if (!ss)
return -ENOMEM;
dcss->ss = ss;
ss->dev = dcss->dev;
ss->ctxld = dcss->ctxld;
ss->base_reg = devm_ioremap(dcss->dev, ss_base, SZ_4K);
if (!ss->base_reg) {
dev_err(dcss->dev, "ss: unable to remap ss base\n");
devm_kfree(ss->dev, ss);
return -ENOMEM;
}
ss->base_ofs = ss_base;
ss->ctx_id = CTX_SB_HP;
return 0;
}
void dcss_ss_exit(struct dcss_ss *ss)
{
/* stop SS */
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
if (ss->base_reg)
devm_iounmap(ss->dev, ss->base_reg);
devm_kfree(ss->dev, ss);
}
void dcss_ss_subsam_set(struct dcss_ss *ss, bool out_is_yuv)
{
dcss_ss_write(ss, out_is_yuv ? 0x21612161 : 0x41614161, DCSS_SS_COEFF);
dcss_ss_write(ss, out_is_yuv ? 2 : 0, DCSS_SS_MODE);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR);
}
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync)
{
u16 lrc_x, lrc_y;
u16 hsync_start, hsync_end;
u16 vsync_start, vsync_end;
u16 de_ulc_x, de_ulc_y;
u16 de_lrc_x, de_lrc_y;
lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY);
hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
hsync_end = vm->hsync_len - 1;
dcss_ss_write(ss, (phsync ? SYNC_POL : 0) |
((u32)hsync_end << SYNC_END_POS) | hsync_start,
DCSS_SS_HSYNC);
vsync_start = vm->vfront_porch - 1;
vsync_end = vm->vfront_porch + vm->vsync_len - 1;
dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) |
((u32)vsync_end << SYNC_END_POS) | vsync_start,
DCSS_SS_VSYNC);
de_ulc_x = vm->hsync_len + vm->hback_porch - 1;
de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch;
dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x,
DCSS_SS_DE_ULC);
de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC);
}
void dcss_ss_enable(struct dcss_ss *ss)
{
dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL);
ss->in_use = true;
}
void dcss_ss_disable(struct dcss_ss *ss)
{
dcss_ss_write(ss, 0, DCSS_SS_SYS_CTRL);
ss->in_use = false;
}

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include "dcss-dev.h"
#define DCSS_WRSCL_CTRL_STATUS 0x00
#define WRSCL_ERR BIT(31)
#define WRSCL_ERR_EN BIT(30)
#define WRSCL_FRAME_COMP BIT(29)
#define WRSCL_FRAME_COMP_EN BIT(28)
#define WRSCL_FIFO_SIZE_POS 18
#define WRSCL_FIFO_SIZE_MASK GENMAK(24, 18)
#define WRSCL_P_FREQ_POS 10
#define WRSCL_P_FREQ_MASK GENMASK(17, 10)
#define WRSCL_P_SIZE_POS 7
#define WRSCL_P_SIZE_MASK GENMASK(9, 7)
#define WRSCL_T_SIZE_POS 5
#define WRSCL_T_SIZE_MASK GENMASK(6, 5)
#define WRSCL_BPP_POS 2
#define WRSCL_BPP_MASK GENMASK(4, 2)
#define WRSCL_REPEAT BIT(1)
#define WRSCL_ENABLE BIT(0)
#define DCSS_WRSCL_BASE_ADDR 0x10
#define DCSS_WRSCL_PITCH 0x14
struct dcss_wrscl {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
u32 buf_size;
u32 buf_addr;
void *buf_vaddr;
struct clk *bclk;
u32 ctrl_status;
};
static void dcss_wrscl_write(struct dcss_wrscl *wrscl, u32 val, u32 ofs)
{
dcss_ctxld_write(wrscl->ctxld, wrscl->ctx_id,
val, wrscl->base_ofs + ofs);
}
int dcss_wrscl_init(struct dcss_dev *dcss, unsigned long wrscl_base)
{
struct dcss_wrscl *wrscl;
wrscl = devm_kzalloc(dcss->dev, sizeof(*wrscl), GFP_KERNEL);
if (!wrscl)
return -ENOMEM;
wrscl->base_reg = devm_ioremap(dcss->dev, wrscl_base, SZ_4K);
if (!wrscl->base_reg) {
dev_err(dcss->dev, "wrscl: unable to remap base\n");
devm_kfree(dcss->dev, wrscl);
return -ENOMEM;
}
dcss->wrscl = wrscl;
wrscl->dev = dcss->dev;
wrscl->base_ofs = wrscl_base;
wrscl->ctxld = dcss->ctxld;
wrscl->ctx_id = CTX_SB_HP;
wrscl->bclk = dcss->axi_clk;
return 0;
}
void dcss_wrscl_exit(struct dcss_wrscl *wrscl)
{
devm_iounmap(wrscl->dev, wrscl->base_reg);
devm_kfree(wrscl->dev, wrscl);
}
static const u16 dcss_wrscl_psize_map[] = {64, 128, 256, 512, 1024, 2048, 4096};
u32 dcss_wrscl_setup(struct dcss_wrscl *wrscl, u32 pix_format, u32 vrefresh_hz,
u32 dst_xres, u32 dst_yres)
{
u32 pitch, p_size, p_freq, bpp;
dma_addr_t dma_handle;
u32 bclk_rate = clk_get_rate(wrscl->bclk);
/* we'd better release the old buffer */
if (wrscl->buf_addr)
dmam_free_coherent(wrscl->dev, wrscl->buf_size,
wrscl->buf_vaddr, wrscl->buf_addr);
p_size = PSIZE_256;
/* scaler output is YUV444 */
bpp = 4;
/* spread the load over the entire frame */
p_freq = ((u64)bclk_rate * dcss_wrscl_psize_map[p_size]) /
((u64)dst_xres * dst_yres * vrefresh_hz * bpp * 8);
/* choose a slightly smaller p_freq */
p_freq = p_freq - 3 > 255 ? 255 : p_freq - 3;
wrscl->ctrl_status = FIFO_512 << WRSCL_FIFO_SIZE_POS;
wrscl->ctrl_status |= p_size << WRSCL_P_SIZE_POS;
wrscl->ctrl_status |= TSIZE_256 << WRSCL_T_SIZE_POS;
wrscl->ctrl_status |= BPP_32_10BIT_OUTPUT << WRSCL_BPP_POS;
wrscl->ctrl_status |= p_freq << WRSCL_P_FREQ_POS;
wrscl->buf_size = dst_xres * dst_yres * bpp;
pitch = dst_xres * bpp;
wrscl->buf_vaddr = dmam_alloc_coherent(wrscl->dev, wrscl->buf_size,
&dma_handle, GFP_KERNEL);
if (!wrscl->buf_vaddr) {
dev_err(wrscl->dev, "wrscl: cannot alloc buf mem\n");
return 0;
}
wrscl->buf_addr = dma_handle;
dcss_wrscl_write(wrscl, wrscl->buf_addr, DCSS_WRSCL_BASE_ADDR);
dcss_wrscl_write(wrscl, pitch, DCSS_WRSCL_PITCH);
return wrscl->buf_addr;
}
void dcss_wrscl_enable(struct dcss_wrscl *wrscl)
{
wrscl->ctrl_status |= WRSCL_ENABLE | WRSCL_REPEAT;
dcss_wrscl_write(wrscl, wrscl->ctrl_status, DCSS_WRSCL_CTRL_STATUS);
}
void dcss_wrscl_disable(struct dcss_wrscl *wrscl)
{
wrscl->ctrl_status &= ~(WRSCL_ENABLE | WRSCL_REPEAT);
dcss_wrscl_write(wrscl, wrscl->ctrl_status, DCSS_WRSCL_CTRL_STATUS);
if (wrscl->buf_addr) {
dmam_free_coherent(wrscl->dev, wrscl->buf_size,
wrscl->buf_vaddr, wrscl->buf_addr);
wrscl->buf_addr = 0;
}
}

View File

@ -0,0 +1,6 @@
config DRM_IMX_DPU
tristate
depends on DRM_IMX
depends on IMX_DPU_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m

View File

@ -0,0 +1,7 @@
ccflags-y += -I $(srctree)/$(src)/../
imx-dpu-crtc-objs := dpu-crtc.o dpu-kms.o dpu-plane.o
obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-crtc.o
imx-dpu-render-objs := dpu-blit.o
obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-render.o

View File

@ -0,0 +1,332 @@
/*
* Copyright 2017 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/imx_drm.h>
#include <linux/component.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <video/dpu.h>
#include "imx-drm.h"
struct imx_drm_dpu_bliteng {
struct dpu_bliteng *dpu_be;
struct list_head list;
};
static DEFINE_MUTEX(imx_drm_dpu_bliteng_lock);
static LIST_HEAD(imx_drm_dpu_bliteng_list);
static int imx_dpu_num;
int dpu_be_get(struct dpu_bliteng *dpu_be);
void dpu_be_put(struct dpu_bliteng *dpu_be);
s32 dpu_bliteng_get_id(struct dpu_bliteng *dpu_be);
void dpu_be_configure_prefetch(struct dpu_bliteng *dpu_be,
u32 width, u32 height,
u32 x_offset, u32 y_offset,
u32 stride, u32 format, u64 modifier,
u64 baddr, u64 uv_addr);
u32 *dpu_bliteng_get_cmd_list(struct dpu_bliteng *dpu_be);
void dpu_be_wait(struct dpu_bliteng *dpu_be);
int dpu_bliteng_get_empty_instance(struct dpu_bliteng **dpu_be,
struct device *dev);
void dpu_bliteng_set_id(struct dpu_bliteng *dpu_be, int id);
void dpu_bliteng_set_dev(struct dpu_bliteng *dpu_be, struct device *dev);
int dpu_bliteng_init(struct dpu_bliteng *dpu_bliteng);
void dpu_bliteng_fini(struct dpu_bliteng *dpu_bliteng);
int dpu_be_blit(struct dpu_bliteng *dpu_be,
u32 *cmdlist, u32 cmdnum);
static struct imx_drm_dpu_bliteng *imx_drm_dpu_bliteng_find_by_id(s32 id)
{
struct imx_drm_dpu_bliteng *bliteng;
mutex_lock(&imx_drm_dpu_bliteng_lock);
list_for_each_entry(bliteng, &imx_drm_dpu_bliteng_list, list) {
if (id == dpu_bliteng_get_id(bliteng->dpu_be)) {
mutex_unlock(&imx_drm_dpu_bliteng_lock);
return bliteng;
}
}
mutex_unlock(&imx_drm_dpu_bliteng_lock);
return NULL;
}
static int imx_drm_dpu_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_imx_dpu_set_cmdlist *req;
struct imx_drm_dpu_bliteng *bliteng;
struct dpu_bliteng *dpu_be;
u32 cmd_nr, *cmd, *cmd_list;
void *user_data;
s32 id = 0;
struct drm_imx_dpu_frame_info frame_info;
int ret;
req = data;
user_data = (void *)(unsigned long)req->user_data;
if (copy_from_user(&id, (void __user *)user_data,
sizeof(id))) {
return -EFAULT;
}
if (id != 0 && id != 1)
return -EINVAL;
user_data += sizeof(id);
if (copy_from_user(&frame_info, (void __user *)user_data,
sizeof(frame_info))) {
return -EFAULT;
}
bliteng = imx_drm_dpu_bliteng_find_by_id(id);
if (!bliteng) {
DRM_ERROR("Failed to get dpu_bliteng\n");
return -ENODEV;
}
dpu_be = bliteng->dpu_be;
ret = dpu_be_get(dpu_be);
cmd_nr = req->cmd_nr;
cmd = (u32 *)(unsigned long)req->cmd;
cmd_list = dpu_bliteng_get_cmd_list(dpu_be);
if (copy_from_user(cmd_list, (void __user *)cmd,
sizeof(*cmd) * cmd_nr)) {
ret = -EFAULT;
goto err;
}
dpu_be_configure_prefetch(dpu_be, frame_info.width, frame_info.height,
frame_info.x_offset, frame_info.y_offset,
frame_info.stride, frame_info.format,
frame_info.modifier, frame_info.baddr,
frame_info.uv_addr);
ret = dpu_be_blit(dpu_be, cmd_list, cmd_nr);
err:
dpu_be_put(dpu_be);
return ret;
}
static int imx_drm_dpu_wait_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_imx_dpu_wait *wait;
struct imx_drm_dpu_bliteng *bliteng;
struct dpu_bliteng *dpu_be;
void *user_data;
s32 id = 0;
int ret;
wait = data;
user_data = (void *)(unsigned long)wait->user_data;
if (copy_from_user(&id, (void __user *)user_data,
sizeof(id))) {
return -EFAULT;
}
if (id != 0 && id != 1)
return -EINVAL;
bliteng = imx_drm_dpu_bliteng_find_by_id(id);
if (!bliteng) {
DRM_ERROR("Failed to get dpu_bliteng\n");
return -ENODEV;
}
dpu_be = bliteng->dpu_be;
ret = dpu_be_get(dpu_be);
dpu_be_wait(dpu_be);
dpu_be_put(dpu_be);
return ret;
}
static int imx_drm_dpu_get_param_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
enum drm_imx_dpu_param *param = data;
int ret;
switch (*param) {
case (DRM_IMX_MAX_DPUS):
ret = imx_dpu_num;
break;
default:
ret = -EINVAL;
DRM_ERROR("Unknown param![%d]\n", *param);
break;
}
return ret;
}
static struct drm_ioctl_desc imx_drm_dpu_ioctls[] = {
DRM_IOCTL_DEF_DRV(IMX_DPU_SET_CMDLIST, imx_drm_dpu_set_cmdlist_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(IMX_DPU_WAIT, imx_drm_dpu_wait_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(IMX_DPU_GET_PARAM, imx_drm_dpu_get_param_ioctl,
DRM_RENDER_ALLOW),
};
static int dpu_bliteng_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = (struct drm_device *)data;
struct imx_drm_dpu_bliteng *bliteng;
struct dpu_bliteng *dpu_bliteng = NULL;
int ret;
bliteng = devm_kzalloc(dev, sizeof(*bliteng), GFP_KERNEL);
if (!bliteng)
return -ENOMEM;
INIT_LIST_HEAD(&bliteng->list);
ret = dpu_bliteng_get_empty_instance(&dpu_bliteng, dev);
if (ret)
return ret;
dpu_bliteng_set_id(dpu_bliteng, imx_dpu_num);
dpu_bliteng_set_dev(dpu_bliteng, dev);
ret = dpu_bliteng_init(dpu_bliteng);
if (ret)
return ret;
mutex_lock(&imx_drm_dpu_bliteng_lock);
bliteng->dpu_be = dpu_bliteng;
list_add_tail(&bliteng->list, &imx_drm_dpu_bliteng_list);
mutex_unlock(&imx_drm_dpu_bliteng_lock);
dev_set_drvdata(dev, dpu_bliteng);
imx_dpu_num++;
if (drm->driver->num_ioctls == 0) {
drm->driver->ioctls = imx_drm_dpu_ioctls;
drm->driver->num_ioctls = ARRAY_SIZE(imx_drm_dpu_ioctls);
}
return 0;
}
static void dpu_bliteng_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = (struct drm_device *)data;
struct imx_drm_dpu_bliteng *bliteng;
struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
s32 id = dpu_bliteng_get_id(dpu_bliteng);
bliteng = imx_drm_dpu_bliteng_find_by_id(id);
list_del(&bliteng->list);
dpu_bliteng_fini(dpu_bliteng);
dev_set_drvdata(dev, NULL);
imx_dpu_num--;
if (drm->driver->num_ioctls != 0) {
drm->driver->ioctls = NULL;
drm->driver->num_ioctls = 0;
}
}
static const struct component_ops dpu_bliteng_ops = {
.bind = dpu_bliteng_bind,
.unbind = dpu_bliteng_unbind,
};
static int dpu_bliteng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (!dev->platform_data)
return -EINVAL;
return component_add(dev, &dpu_bliteng_ops);
}
static int dpu_bliteng_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dpu_bliteng_ops);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int dpu_bliteng_suspend(struct device *dev)
{
struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
int ret;
if (dpu_bliteng == NULL)
return 0;
ret = dpu_be_get(dpu_bliteng);
dpu_be_wait(dpu_bliteng);
dpu_be_put(dpu_bliteng);
dpu_bliteng_fini(dpu_bliteng);
return 0;
}
static int dpu_bliteng_resume(struct device *dev)
{
struct dpu_bliteng *dpu_bliteng = dev_get_drvdata(dev);
if (dpu_bliteng != NULL)
dpu_bliteng_init(dpu_bliteng);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(dpu_bliteng_pm_ops,
dpu_bliteng_suspend, dpu_bliteng_resume);
struct platform_driver dpu_bliteng_driver = {
.driver = {
.name = "imx-drm-dpu-bliteng",
.pm = &dpu_bliteng_pm_ops,
},
.probe = dpu_bliteng_probe,
.remove = dpu_bliteng_remove,
};
module_platform_driver(dpu_bliteng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
MODULE_DESCRIPTION("i.MX DRM DPU BLITENG");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,94 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_CRTC_H_
#define _DPU_CRTC_H_
#include <drm/drm_vblank.h>
#include <video/dpu.h>
#include "dpu-plane.h"
#include "../imx-drm.h"
struct dpu_crtc {
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
struct dpu_constframe *pa_cf;
struct dpu_constframe *sa_cf;
struct dpu_disengcfg *dec;
struct dpu_extdst *ed;
struct dpu_framegen *fg;
struct dpu_tcon *tcon;
struct dpu_store *st;
struct dpu_constframe *aux_pa_cf;
struct dpu_constframe *aux_sa_cf;
struct dpu_disengcfg *aux_dec;
struct dpu_extdst *aux_ed;
struct dpu_framegen *aux_fg;
struct dpu_tcon *aux_tcon;
/* master */
struct dpu_constframe *m_pa_cf;
struct dpu_constframe *m_sa_cf;
struct dpu_disengcfg *m_dec;
struct dpu_extdst *m_ed;
struct dpu_framegen *m_fg;
struct dpu_tcon *m_tcon;
/* slave */
struct dpu_constframe *s_pa_cf;
struct dpu_constframe *s_sa_cf;
struct dpu_disengcfg *s_dec;
struct dpu_extdst *s_ed;
struct dpu_framegen *s_fg;
struct dpu_tcon *s_tcon;
struct dpu_plane **plane;
unsigned int hw_plane_num;
unsigned int stream_id;
unsigned int crtc_grp_id;
unsigned int syncmode_min_prate;
unsigned int singlemode_max_width;
unsigned int master_stream_id;
int vbl_irq;
int safety_shdld_irq;
int content_shdld_irq;
int dec_shdld_irq;
bool aux_is_master;
struct completion safety_shdld_done;
struct completion content_shdld_done;
struct completion dec_shdld_done;
struct drm_pending_vblank_event *event;
};
struct dpu_crtc_state {
struct imx_crtc_state imx_crtc_state;
struct dpu_plane_state **dpu_plane_states;
bool use_pc;
};
static inline struct dpu_crtc_state *to_dpu_crtc_state(struct imx_crtc_state *s)
{
return container_of(s, struct dpu_crtc_state, imx_crtc_state);
}
static inline struct dpu_crtc *to_dpu_crtc(struct drm_crtc *crtc)
{
return container_of(crtc, struct dpu_crtc, base);
}
struct dpu_plane_state **
crtc_state_get_dpu_plane_states(struct drm_crtc_state *state);
#endif

View File

@ -0,0 +1,726 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/sort.h>
#include <video/dpu.h>
#include "dpu-crtc.h"
#include "dpu-plane.h"
#include "../imx-drm.h"
static struct drm_plane_state **
dpu_atomic_alloc_tmp_planes_per_crtc(struct drm_device *dev)
{
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return ERR_PTR(-ENOMEM);
return states;
}
static int zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
return sa->normalized_zpos - sb->normalized_zpos;
}
static int dpu_atomic_sort_planes_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *dev = state->dev;
struct drm_plane *plane;
int n = 0;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
states[n++] = plane_state;
}
sort(states, n, sizeof(*states), zpos_cmp, NULL);
return n;
}
static void
dpu_atomic_compute_plane_lrx_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states, int n)
{
struct dpu_plane_state *dpstate;
struct drm_plane_state *plane_state;
int i;
int half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
bool lo, ro, bo;
/* compute left/right_crtc_x if pixel combiner is needed */
for (i = 0; i < n; i++) {
plane_state = states[i];
dpstate = to_dpu_plane_state(plane_state);
lo = dpstate->left_src_w && !dpstate->right_src_w;
ro = !dpstate->left_src_w && dpstate->right_src_w;
bo = dpstate->left_src_w && dpstate->right_src_w;
if (lo || bo) {
dpstate->left_crtc_x = plane_state->crtc_x;
dpstate->right_crtc_x = 0;
} else if (ro) {
dpstate->left_crtc_x = 0;
dpstate->right_crtc_x =
plane_state->crtc_x - half_hdisplay;
}
}
}
static void
dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n,
bool use_pc)
{
struct dpu_plane_state *dpstate;
bool found_l_top = false, found_r_top = false;
int i;
for (i = n - 1; i >= 0; i--) {
dpstate = to_dpu_plane_state(states[i]);
if (use_pc) {
if (dpstate->left_src_w && !found_l_top) {
dpstate->is_left_top = true;
found_l_top = true;
} else {
dpstate->is_left_top = false;
}
if (dpstate->right_src_w && !found_r_top) {
dpstate->is_right_top = true;
found_r_top = true;
} else {
dpstate->is_right_top = false;
}
} else {
dpstate->is_top = (i == (n - 1)) ? true : false;
}
}
}
static int
dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states,
int n, bool use_pc)
{
struct dpu_plane_state *dpstate;
struct dpu_plane *dplane;
struct dpu_plane_grp *grp;
struct drm_framebuffer *fb;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe;
struct dpu_hscaler *hs;
struct dpu_vscaler *vs;
lb_prim_sel_t stage;
dpu_block_id_t blend;
unsigned int sid, src_sid;
unsigned int num_planes;
int bit;
int i, j, k = 0, m;
int total_asrc_num;
int s0_layer_cnt = 0, s1_layer_cnt = 0;
int s0_n = 0, s1_n = 0;
u32 src_a_mask, cap_mask, fe_mask, hs_mask, vs_mask;
bool need_fetcheco, need_hscaler, need_vscaler;
bool fmt_is_yuv;
bool alloc_aux_source;
if (use_pc) {
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
if (dpstate->left_src_w)
s0_n++;
if (dpstate->right_src_w)
s1_n++;
}
} else {
s0_n = n;
s1_n = n;
}
/* for active planes only */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
dplane = to_dpu_plane(states[i]->plane);
fb = states[i]->fb;
num_planes = fb->format->num_planes;
fmt_is_yuv = drm_format_is_yuv(fb->format->format);
grp = dplane->grp;
alloc_aux_source = false;
if (use_pc)
sid = dpstate->left_src_w ? 0 : 1;
else
sid = dplane->stream_id;
again:
if (alloc_aux_source)
sid ^= 1;
need_fetcheco = (num_planes > 1);
need_hscaler = (states[i]->src_w >> 16 != states[i]->crtc_w);
need_vscaler = (states[i]->src_h >> 16 != states[i]->crtc_h);
total_asrc_num = 0;
src_a_mask = grp->src_a_mask;
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
for_each_set_bit(bit, (unsigned long *)&src_a_mask, 32)
total_asrc_num++;
/* assign source */
mutex_lock(&grp->mutex);
for (j = 0; j < total_asrc_num; j++) {
k = ffs(src_a_mask) - 1;
if (k < 0)
return -EINVAL;
fu = source_to_fu(&grp->res, sources[k]);
if (!fu)
return -EINVAL;
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fu);
if (src_sid && src_sid != BIT(sid))
goto next;
if (fetchunit_is_fetchdecode(fu)) {
cap_mask = fetchdecode_get_vproc_mask(fu);
if (need_fetcheco) {
fe = fetchdecode_get_fetcheco(fu);
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fe);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the fetcheco cap? */
if (!dpu_vproc_has_fetcheco_cap(cap_mask))
goto next;
fe_mask =
dpu_vproc_get_fetcheco_cap(cap_mask);
/* fetcheco available? */
if (grp->src_use_vproc_mask & fe_mask)
goto next;
}
if (need_hscaler) {
hs = fetchdecode_get_hscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = hscaler_get_stream_id(hs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the hscale cap */
if (!dpu_vproc_has_hscale_cap(cap_mask))
goto next;
hs_mask =
dpu_vproc_get_hscale_cap(cap_mask);
/* hscaler available? */
if (grp->src_use_vproc_mask & hs_mask)
goto next;
}
if (need_vscaler) {
vs = fetchdecode_get_vscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = vscaler_get_stream_id(vs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the vscale cap? */
if (!dpu_vproc_has_vscale_cap(cap_mask))
goto next;
vs_mask =
dpu_vproc_get_vscale_cap(cap_mask);
/* vscaler available? */
if (grp->src_use_vproc_mask & vs_mask)
goto next;
}
} else {
if (fmt_is_yuv || need_fetcheco ||
need_hscaler || need_vscaler)
goto next;
}
grp->src_a_mask &= ~BIT(k);
grp->src_use_vproc_mask |= fe_mask | hs_mask | vs_mask;
break;
next:
src_a_mask &= ~BIT(k);
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
}
mutex_unlock(&grp->mutex);
if (j == total_asrc_num)
return -EINVAL;
if (alloc_aux_source)
dpstate->aux_source = sources[k];
else
dpstate->source = sources[k];
/* assign stage and blend */
if (sid) {
m = grp->hw_plane_num - (s1_n - s1_layer_cnt);
stage = s1_layer_cnt ? stages[m - 1] : cf_stages[sid];
blend = blends[m];
s1_layer_cnt++;
} else {
stage = s0_layer_cnt ?
stages[s0_layer_cnt - 1] : cf_stages[sid];
blend = blends[s0_layer_cnt];
s0_layer_cnt++;
}
if (alloc_aux_source) {
dpstate->aux_stage = stage;
dpstate->aux_blend = blend;
} else {
dpstate->stage = stage;
dpstate->blend = blend;
}
if (dpstate->need_aux_source && !alloc_aux_source) {
alloc_aux_source = true;
goto again;
}
}
return 0;
}
static void
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(struct drm_crtc *crtc,
u32 crtc_mask,
struct drm_atomic_state *state,
bool *puts)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool found_pstate = false;
int i;
if ((crtc_mask & drm_crtc_mask(crtc)) == 0) {
for_each_new_plane_in_state(state, plane, plane_state, i) {
if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
found_pstate = true;
break;
}
}
if (!found_pstate)
puts[drm_crtc_index(crtc)] = true;
}
}
static void
dpu_atomic_put_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
int index = drm_plane_index(plane);
plane->funcs->atomic_destroy_state(plane, state->planes[index].state);
state->planes[index].ptr = NULL;
state->planes[index].state = NULL;
drm_modeset_unlock(&plane->mutex);
}
static void
dpu_atomic_put_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int index = drm_crtc_index(crtc);
crtc->funcs->atomic_destroy_state(crtc, state->crtcs[index].state);
state->crtcs[index].ptr = NULL;
state->crtcs[index].state = NULL;
drm_modeset_unlock(&crtc->mutex);
}
static void
dpu_atomic_put_possible_states_per_crtc(struct drm_crtc_state *crtc_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_crtc_state *old_crtc_state = crtc->state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_state **old_dpstates;
struct dpu_plane_state *old_dpstate, *new_dpstate;
u32 active_mask = 0;
int i;
old_dpstates = crtc_state_get_dpu_plane_states(old_crtc_state);
if (WARN_ON(!old_dpstates))
return;
for (i = 0; i < dplane->grp->hw_plane_num; i++) {
old_dpstate = old_dpstates[i];
if (!old_dpstate)
continue;
active_mask |= BIT(i);
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
if (drm_plane_index(plane) !=
drm_plane_index(old_dpstate->base.plane))
continue;
plane_state =
drm_atomic_get_existing_plane_state(state,
plane);
if (WARN_ON(!plane_state))
return;
new_dpstate = to_dpu_plane_state(plane_state);
active_mask &= ~BIT(i);
/*
* Should be enough to check the below real HW plane
* resources only.
* Things like vproc resources should be fine.
*/
if (old_dpstate->stage != new_dpstate->stage ||
old_dpstate->source != new_dpstate->source ||
old_dpstate->blend != new_dpstate->blend ||
old_dpstate->aux_stage != new_dpstate->aux_stage ||
old_dpstate->aux_source != new_dpstate->aux_source ||
old_dpstate->aux_blend != new_dpstate->aux_blend)
return;
}
}
/* pure software check */
if (WARN_ON(active_mask))
return;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
dpu_atomic_put_plane_state(state, plane);
dpu_atomic_put_crtc_state(state, crtc);
}
static int dpu_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct dpu_plane *dpu_plane;
struct drm_plane_state *plane_state;
struct dpu_plane_state *dpstate;
struct drm_framebuffer *fb;
struct dpu_plane_grp *grp[MAX_DPU_PLANE_GRP];
int ret, i, grp_id;
int active_plane[MAX_DPU_PLANE_GRP];
int active_plane_fetcheco[MAX_DPU_PLANE_GRP];
int active_plane_hscale[MAX_DPU_PLANE_GRP];
int active_plane_vscale[MAX_DPU_PLANE_GRP];
int half_hdisplay = 0;
bool pipe_states_prone_to_put[MAX_CRTC];
bool use_pc[MAX_DPU_PLANE_GRP];
u32 crtc_mask_in_state = 0;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret) {
DRM_DEBUG_KMS("%s: failed to check modeset\n", __func__);
return ret;
}
for (i = 0; i < MAX_CRTC; i++)
pipe_states_prone_to_put[i] = false;
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
active_plane[i] = 0;
active_plane_fetcheco[i] = 0;
active_plane_hscale[i] = 0;
active_plane_vscale[i] = 0;
use_pc[i] = false;
grp[i] = NULL;
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
crtc_mask_in_state |= drm_crtc_mask(crtc);
drm_for_each_crtc(crtc, dev) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state;
struct dpu_crtc_state *dcstate;
bool need_left, need_right, need_aux_source, use_pc_per_crtc;
use_pc_per_crtc = false;
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(crtc,
crtc_mask_in_state, state,
pipe_states_prone_to_put);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
imx_crtc_state = to_imx_crtc_state(crtc_state);
dcstate = to_dpu_crtc_state(imx_crtc_state);
if (crtc_state->enable) {
if (use_pc[dpu_crtc->crtc_grp_id]) {
DRM_DEBUG_KMS("other crtc needs pixel combiner\n");
return -EINVAL;
}
if (crtc_state->adjusted_mode.clock >
dpu_crtc->syncmode_min_prate ||
crtc_state->adjusted_mode.hdisplay >
dpu_crtc->singlemode_max_width)
use_pc_per_crtc = true;
}
if (use_pc_per_crtc) {
use_pc[dpu_crtc->crtc_grp_id] = true;
half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
}
dcstate->use_pc = use_pc_per_crtc;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
DRM_DEBUG_KMS("failed to get plane state\n");
return PTR_ERR(plane_state);
}
dpstate = to_dpu_plane_state(plane_state);
fb = plane_state->fb;
dpu_plane = to_dpu_plane(plane);
grp_id = dpu_plane->grp->id;
active_plane[grp_id]++;
need_left = false;
need_right = false;
need_aux_source = false;
if (use_pc_per_crtc) {
if (plane_state->crtc_x < half_hdisplay)
need_left = true;
if ((plane_state->crtc_w +
plane_state->crtc_x) > half_hdisplay)
need_right = true;
if (need_left && need_right) {
need_aux_source = true;
active_plane[grp_id]++;
}
}
if (need_left && need_right) {
dpstate->left_crtc_w = half_hdisplay;
dpstate->left_crtc_w -= plane_state->crtc_x;
dpstate->left_src_w = dpstate->left_crtc_w;
} else if (need_left) {
dpstate->left_crtc_w = plane_state->crtc_w;
dpstate->left_src_w = plane_state->src_w >> 16;
} else {
dpstate->left_crtc_w = 0;
dpstate->left_src_w = 0;
}
if (need_right && need_left) {
dpstate->right_crtc_w = plane_state->crtc_x +
plane_state->crtc_w;
dpstate->right_crtc_w -= half_hdisplay;
dpstate->right_src_w = dpstate->right_crtc_w;
} else if (need_right) {
dpstate->right_crtc_w = plane_state->crtc_w;
dpstate->right_src_w = plane_state->src_w >> 16;
} else {
dpstate->right_crtc_w = 0;
dpstate->right_src_w = 0;
}
if (fb->format->num_planes > 1) {
active_plane_fetcheco[grp_id]++;
if (need_aux_source)
active_plane_fetcheco[grp_id]++;
}
if (plane_state->src_w >> 16 != plane_state->crtc_w) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_hscale[grp_id]++;
}
if (plane_state->src_h >> 16 != plane_state->crtc_h) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_vscale[grp_id]++;
}
if (grp[grp_id] == NULL)
grp[grp_id] = dpu_plane->grp;
dpstate->need_aux_source = need_aux_source;
}
}
/* enough resources? */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (!grp[i])
continue;
if (active_plane[i] > grp[i]->hw_plane_num) {
DRM_DEBUG_KMS("no enough fetch units\n");
return -EINVAL;
}
if (active_plane_fetcheco[i] > grp[i]->hw_plane_fetcheco_num) {
DRM_DEBUG_KMS("no enough FetchEcos\n");
return -EINVAL;
}
if (active_plane_hscale[i] > grp[i]->hw_plane_hscaler_num) {
DRM_DEBUG_KMS("no enough Hscalers\n");
return -EINVAL;
}
if (active_plane_vscale[i] > grp[i]->hw_plane_vscaler_num) {
DRM_DEBUG_KMS("no enough Vscalers\n");
return -EINVAL;
}
}
/* initialize resource mask */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (!grp[i])
continue;
mutex_lock(&grp[i]->mutex);
grp[i]->src_a_mask = grp[i]->src_mask;
grp[i]->src_use_vproc_mask = 0;
mutex_unlock(&grp[i]->mutex);
}
ret = drm_atomic_normalize_zpos(dev, state);
if (ret)
return ret;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_plane_state **states;
int n;
states = dpu_atomic_alloc_tmp_planes_per_crtc(dev);
if (IS_ERR(states)) {
DRM_DEBUG_KMS(
"[CRTC:%d:%s] cannot alloc plane state ptrs\n",
crtc->base.id, crtc->name);
return PTR_ERR(states);
}
n = dpu_atomic_sort_planes_per_crtc(crtc_state, states);
if (n < 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] failed to sort planes\n",
crtc->base.id, crtc->name);
kfree(states);
return n;
}
/* no active planes? */
if (n == 0) {
kfree(states);
continue;
}
if (use_pc[dpu_crtc->crtc_grp_id])
dpu_atomic_compute_plane_lrx_per_crtc(crtc_state,
states, n);
dpu_atomic_set_top_plane_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
ret = dpu_atomic_assign_plane_source_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
if (ret) {
DRM_DEBUG_KMS("[CRTC:%d:%s] cannot assign plane rscs\n",
crtc->base.id, crtc->name);
kfree(states);
return ret;
}
kfree(states);
}
drm_for_each_crtc(crtc, dev) {
if (pipe_states_prone_to_put[drm_crtc_index(crtc)]) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
dpu_atomic_put_possible_states_per_crtc(crtc_state);
}
}
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
DRM_DEBUG_KMS("%s: failed to check planes\n", __func__);
return ret;
}
return ret;
}
const struct drm_mode_config_funcs dpu_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = dpu_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};

View File

@ -0,0 +1,20 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_KMS_H_
#define _DPU_KMS_H_
extern const struct drm_mode_config_funcs dpu_drm_mode_config_funcs;
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,195 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __DPU_PLANE_H__
#define __DPU_PLANE_H__
#include <video/dpu.h>
#include "../imx-drm.h"
#define MAX_DPU_PLANE_GRP (MAX_CRTC / 2)
enum dpu_plane_src_type {
DPU_PLANE_SRC_FL,
DPU_PLANE_SRC_FW,
DPU_PLANE_SRC_FD,
};
struct dpu_plane {
struct drm_plane base;
struct dpu_plane_grp *grp;
struct list_head head;
unsigned int stream_id;
};
struct dpu_plane_state {
struct drm_plane_state base;
lb_prim_sel_t stage;
lb_sec_sel_t source;
dpu_block_id_t blend;
lb_prim_sel_t aux_stage;
lb_sec_sel_t aux_source;
dpu_block_id_t aux_blend;
bool is_top;
bool use_prefetch;
bool use_aux_prefetch;
bool need_aux_source;
/* used when pixel combiner is needed */
unsigned int left_src_w;
unsigned int left_crtc_w;
unsigned int left_crtc_x;
unsigned int right_src_w;
unsigned int right_crtc_w;
unsigned int right_crtc_x;
bool is_left_top;
bool is_right_top;
};
static const lb_prim_sel_t cf_stages[] = {LB_PRIM_SEL__CONSTFRAME0,
LB_PRIM_SEL__CONSTFRAME1};
static const lb_prim_sel_t stages[] = {LB_PRIM_SEL__LAYERBLEND0,
LB_PRIM_SEL__LAYERBLEND1,
LB_PRIM_SEL__LAYERBLEND2,
LB_PRIM_SEL__LAYERBLEND3};
/* TODO: Add source entries for subsidiary layers. */
static const lb_sec_sel_t sources[] = {LB_SEC_SEL__FETCHLAYER0,
LB_SEC_SEL__FETCHWARP2,
LB_SEC_SEL__FETCHDECODE0,
LB_SEC_SEL__FETCHDECODE1};
static const dpu_block_id_t blends[] = {ID_LAYERBLEND0, ID_LAYERBLEND1,
ID_LAYERBLEND2, ID_LAYERBLEND3};
static inline struct dpu_plane *to_dpu_plane(struct drm_plane *plane)
{
return container_of(plane, struct dpu_plane, base);
}
static inline struct dpu_plane_state *
to_dpu_plane_state(struct drm_plane_state *plane_state)
{
return container_of(plane_state, struct dpu_plane_state, base);
}
static inline int source_to_type(lb_sec_sel_t source)
{
switch (source) {
case LB_SEC_SEL__FETCHLAYER0:
return DPU_PLANE_SRC_FL;
case LB_SEC_SEL__FETCHWARP2:
return DPU_PLANE_SRC_FW;
case LB_SEC_SEL__FETCHDECODE0:
case LB_SEC_SEL__FETCHDECODE1:
return DPU_PLANE_SRC_FD;
default:
break;
}
WARN_ON(1);
return -EINVAL;
}
static inline int source_to_id(lb_sec_sel_t source)
{
int i, offset = 0;
int type = source_to_type(source);
for (i = 0; i < ARRAY_SIZE(sources); i++) {
if (source != sources[i])
continue;
/* FetchLayer */
if (type == DPU_PLANE_SRC_FL)
return i;
/* FetchWarp or FetchDecode */
while (offset < ARRAY_SIZE(sources)) {
if (source_to_type(sources[offset]) == type)
break;
offset++;
}
return i - offset;
}
WARN_ON(1);
return -EINVAL;
}
static inline struct dpu_fetchunit *
source_to_fu(struct dpu_plane_res *res, lb_sec_sel_t source)
{
int fu_type = source_to_type(source);
int fu_id = source_to_id(source);
if (fu_type < 0 || fu_id < 0)
return NULL;
switch (fu_type) {
case DPU_PLANE_SRC_FD:
return res->fd[fu_id];
case DPU_PLANE_SRC_FL:
return res->fl[fu_id];
case DPU_PLANE_SRC_FW:
return res->fw[fu_id];
}
return NULL;
}
static inline struct dpu_fetchunit *
dpstate_to_fu(struct dpu_plane_state *dpstate)
{
struct drm_plane *plane = dpstate->base.plane;
struct dpu_plane *dplane = to_dpu_plane(plane);
struct dpu_plane_res *res = &dplane->grp->res;
return source_to_fu(res, dpstate->source);
}
static inline int blend_to_id(dpu_block_id_t blend)
{
int i;
for (i = 0; i < ARRAY_SIZE(blends); i++) {
if (blend == blends[i])
return i;
}
WARN_ON(1);
return -EINVAL;
}
static inline bool drm_format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return true;
default:
break;
}
return false;
}
struct dpu_plane *dpu_plane_create(struct drm_device *drm,
unsigned int possible_crtcs,
unsigned int stream_id,
struct dpu_plane_grp *grp,
enum drm_plane_type type);
#endif

View File

@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <video/imx-ipu-v3.h>
#include <video/imx-lcdif.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@ -24,11 +25,9 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <video/dpu.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
#define MAX_CRTC 4
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
@ -48,81 +47,6 @@ void imx_drm_encoder_destroy(struct drm_encoder *encoder)
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
/*
* Check modeset again in case crtc_state->mode_changed is
* updated in plane's ->atomic_check callback.
*/
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
/* Assign PRG/PRE channels and check if all constrains are satisfied. */
ret = ipu_planes_assign_pre(dev, state);
if (ret)
return ret;
return ret;
}
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false;
int i;
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY |
DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
drm_atomic_helper_commit_modeset_enables(dev, state);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
if (drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_disabling = true;
}
/*
* The flip done wait is only strictly required by imx-drm if a deferred
* plane disable is in-flight. As the core requires blocking commits
* to wait for the flip it is done here unconditionally. This keeps the
* workitem around a bit longer than required for the majority of
* non-blocking commits, but we accept that for the sake of simplicity.
*/
drm_atomic_helper_wait_for_flip_done(dev, state);
if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
}
drm_atomic_helper_commit_hw_done(state);
}
static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
.atomic_commit_tail = imx_drm_atomic_commit_tail,
};
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np)
{
@ -182,6 +106,29 @@ static int compare_of(struct device *dev, void *data)
if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
struct ipu_client_platformdata *pdata = dev->platform_data;
return pdata->of_node == np;
} else if (strcmp(dev->driver->name, "imx-dpu-crtc") == 0) {
struct dpu_client_platformdata *pdata = dev->platform_data;
return pdata->of_node == np;
} else if (strcmp(dev->driver->name, "imx-lcdif-crtc") == 0) {
struct lcdif_client_platformdata *pdata = dev->platform_data;
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
/* set legacyfb_depth to be 32 for lcdif, since
* default format of the connectors attached to
* lcdif is usually RGB888
*/
if (pdata->of_node == np)
legacyfb_depth = 32;
#endif
return pdata->of_node == np;
}
/* This is a special case for dpu bliteng. */
if (strcmp(dev->driver->name, "imx-drm-dpu-bliteng") == 0) {
struct dpu_client_platformdata *pdata = dev->platform_data;
return pdata->of_node == np;
}
@ -194,11 +141,104 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == np;
}
static const char *const imx_drm_dpu_comp_parents[] = {
"fsl,imx8qm-dpu",
"fsl,imx8qxp-dpu",
};
static bool imx_drm_parent_is_compatible(struct device *dev,
const char *const comp_parents[],
int comp_parents_size)
{
struct device_node *port, *parent;
bool ret = false;
int i;
port = of_parse_phandle(dev->of_node, "ports", 0);
if (!port)
return ret;
parent = of_get_parent(port);
for (i = 0; i < comp_parents_size; i++) {
if (of_device_is_compatible(parent, comp_parents[i])) {
ret = true;
break;
}
}
of_node_put(parent);
of_node_put(port);
return ret;
}
static inline bool has_dpu(struct device *dev)
{
return imx_drm_parent_is_compatible(dev, imx_drm_dpu_comp_parents,
ARRAY_SIZE(imx_drm_dpu_comp_parents));
}
static void add_dpu_bliteng_components(struct device *dev,
struct component_match **matchptr)
{
/*
* As there may be two dpu bliteng device,
* so need add something in compare data to distinguish.
* Use its parent dpu's of_node as the data here.
*/
struct device_node *port, *parent;
/* assume max dpu number is 8 */
struct device_node *dpu[8];
int num_dpu = 0;
int i, j;
bool found = false;
for (i = 0; ; i++) {
port = of_parse_phandle(dev->of_node, "ports", i);
if (!port)
break;
parent = of_get_parent(port);
for (j = 0; j < num_dpu; j++) {
if (dpu[j] == parent) {
found = true;
break;
}
}
if (found) {
found = false;
} else {
if (num_dpu >= ARRAY_SIZE(dpu)) {
dev_err(dev, "The number of found dpu is greater than max [%ld].\n",
ARRAY_SIZE(dpu));
of_node_put(parent);
of_node_put(port);
break;
}
dpu[num_dpu] = parent;
num_dpu++;
component_match_add(dev, matchptr, compare_of, parent);
}
of_node_put(parent);
of_node_put(port);
}
}
static int imx_drm_bind(struct device *dev)
{
struct drm_device *drm;
int ret;
if (has_dpu(dev))
imx_drm_driver.driver_features |= DRIVER_RENDER;
drm = drm_dev_alloc(&imx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
@ -223,9 +263,6 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.min_height = 1;
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
drm_mode_config_init(drm);
@ -234,8 +271,6 @@ static int imx_drm_bind(struct device *dev)
if (ret)
goto err_kms;
dev_set_drvdata(dev, drm);
/* Now try and bind all our sub-components */
ret = component_bind_all(dev, drm);
if (ret)
@ -261,6 +296,8 @@ static int imx_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm, legacyfb_depth);
dev_set_drvdata(dev, drm);
return 0;
err_poll_fini:
@ -277,6 +314,9 @@ static void imx_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
if (has_dpu(dev))
imx_drm_driver.driver_features &= ~DRIVER_RENDER;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
@ -296,7 +336,14 @@ static const struct component_master_ops imx_drm_ops = {
static int imx_drm_platform_probe(struct platform_device *pdev)
{
int ret = drm_of_component_probe(&pdev->dev, compare_of, &imx_drm_ops);
struct component_match *match = NULL;
int ret;
if (has_dpu(&pdev->dev))
add_dpu_bliteng_components(&pdev->dev, &match);
ret = drm_of_component_probe_with_match(&pdev->dev, match, compare_of,
&imx_drm_ops);
if (!ret)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
@ -343,23 +390,7 @@ static struct platform_driver imx_drm_pdrv = {
.of_match_table = imx_drm_dt_ids,
},
};
static struct platform_driver * const drivers[] = {
&imx_drm_pdrv,
&ipu_drm_driver,
};
static int __init imx_drm_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(imx_drm_init);
static void __exit imx_drm_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(imx_drm_exit);
module_platform_driver(imx_drm_pdrv);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");

View File

@ -2,6 +2,8 @@
#ifndef _IMX_DRM_H_
#define _IMX_DRM_H_
#define MAX_CRTC 4
struct device_node;
struct drm_crtc;
struct drm_connector;
@ -28,8 +30,6 @@ int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
int imx_drm_exit_drm(void);
extern struct platform_driver ipu_drm_driver;
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
@ -40,7 +40,4 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
void imx_drm_connector_destroy(struct drm_connector *connector);
void imx_drm_encoder_destroy(struct drm_encoder *encoder);
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
#endif /* _IMX_DRM_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m

View File

@ -0,0 +1,4 @@
ccflags-y += -I $(srctree)/$(src)/../
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o ipuv3-kms.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o

View File

@ -24,6 +24,7 @@
#include <drm/drm_vblank.h>
#include "imx-drm.h"
#include "ipuv3-kms.h"
#include "ipuv3-plane.h"
#define DRIVER_DESC "i.MX IPUv3 Graphics"
@ -105,7 +106,7 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
static void ipu_drm_crtc_reset(struct drm_crtc *crtc)
{
struct imx_crtc_state *state;
@ -125,7 +126,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
state->base.crtc = crtc;
}
static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
static struct drm_crtc_state *ipu_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct imx_crtc_state *state;
@ -141,7 +142,7 @@ static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc
return &state->base;
}
static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
static void ipu_drm_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
@ -168,9 +169,9 @@ static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
.atomic_destroy_state = imx_drm_crtc_destroy_state,
.reset = ipu_drm_crtc_reset,
.atomic_duplicate_state = ipu_drm_crtc_duplicate_state,
.atomic_destroy_state = ipu_drm_crtc_destroy_state,
.enable_vblank = ipu_enable_vblank,
.disable_vblank = ipu_disable_vblank,
};
@ -450,6 +451,10 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
drm->mode_config.funcs = &ipuv3_drm_mode_config_funcs;
drm->mode_config.helper_private = &ipuv3_drm_mode_config_helpers;
drm->mode_config.allow_fb_modifiers = true;
dev_set_drvdata(dev, ipu_crtc);
return 0;
@ -492,10 +497,16 @@ static int ipu_drm_remove(struct platform_device *pdev)
return 0;
}
struct platform_driver ipu_drm_driver = {
static struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
module_platform_driver(ipu_drm_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-ipuv3-crtc");

View File

@ -0,0 +1,94 @@
/*
* Copyright 2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
static int ipuv3_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
/*
* Check modeset again in case crtc_state->mode_changed is
* updated in plane's ->atomic_check callback.
*/
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
/* Assign PRG/PRE channels and check if all constrains are satisfied. */
ret = ipu_planes_assign_pre(dev, state);
if (ret)
return ret;
return ret;
}
const struct drm_mode_config_funcs ipuv3_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = ipuv3_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void ipuv3_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false;
int i;
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY |
DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
drm_atomic_helper_commit_modeset_enables(dev, state);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
if (drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_disabling = true;
}
/*
* The flip done wait is only strictly required by imx-drm if a deferred
* plane disable is in-flight. As the core requires blocking commits
* to wait for the flip it is done here unconditionally. This keeps the
* workitem around a bit longer than required for the majority of
* non-blocking commits, but we accept that for the sake of simplicity.
*/
drm_atomic_helper_wait_for_flip_done(dev, state);
if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
}
drm_atomic_helper_commit_hw_done(state);
}
const struct drm_mode_config_helper_funcs ipuv3_drm_mode_config_helpers = {
.atomic_commit_tail = ipuv3_drm_atomic_commit_tail,
};

View File

@ -0,0 +1,21 @@
/*
* Copyright 2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _IPUV3_KMS_H_
#define _IPUV3_KMS_H_
extern const struct drm_mode_config_funcs ipuv3_drm_mode_config_funcs;
extern struct drm_mode_config_helper_funcs ipuv3_drm_mode_config_helpers;
#endif

View File

@ -50,4 +50,7 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
void ipu_plane_disable_deferred(struct drm_plane *plane);
bool ipu_plane_atomic_update_pending(struct drm_plane *plane);
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
#endif

View File

@ -0,0 +1,8 @@
config DRM_IMX_LCDIF
tristate "i.MX LCDIF controller DRM driver"
depends on DRM_IMX
depends on IMX_LCDIF_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
help
enable i.MX LCDIF controller DRM driver under DRM_IMX.

View File

@ -0,0 +1,4 @@
ccflags-y += -I $(srctree)/$(src)/../
imx-lcdif-crtc-objs := lcdif-crtc.o lcdif-plane.o lcdif-kms.o
obj-$(CONFIG_DRM_IMX_LCDIF) += imx-lcdif-crtc.o

View File

@ -0,0 +1,389 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <video/imx-lcdif.h>
#include <video/videomode.h>
#include "imx-drm.h"
#include "lcdif-plane.h"
#include "lcdif-kms.h"
struct lcdif_crtc {
struct device *dev;
struct drm_crtc base;
struct lcdif_plane *plane[2];
int vbl_irq;
u32 pix_fmt; /* drm fourcc */
};
#define to_lcdif_crtc(crtc) container_of(crtc, struct lcdif_crtc, base)
static void lcdif_crtc_reset(struct drm_crtc *crtc)
{
struct imx_crtc_state *state;
if (crtc->state) {
__drm_atomic_helper_crtc_destroy_state(crtc->state);
state = to_imx_crtc_state(crtc->state);
kfree(state);
crtc->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return;
crtc->state = &state->base;
crtc->state->crtc = crtc;
}
static struct drm_crtc_state *lcdif_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct imx_crtc_state *state, *orig_state;
if (WARN_ON(!crtc->state))
return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
orig_state = to_imx_crtc_state(crtc->state);
state->bus_format = orig_state->bus_format;
state->bus_flags = orig_state->bus_flags;
state->di_hsync_pin = orig_state->di_hsync_pin;
state->di_vsync_pin = orig_state->di_vsync_pin;
return &state->base;
}
static void lcdif_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(state);
kfree(to_imx_crtc_state(state));
}
static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct lcdif_crtc *lcdif_crtc = to_lcdif_crtc(crtc);
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(state);
/* Don't check 'bus_format' when CRTC is
* going to be disabled.
*/
if (!state->enable)
return 0;
/* For the commit that the CRTC is active
* without planes attached to it should be
* invalid.
*/
if (state->active && !state->plane_mask)
return -EINVAL;
/* check the requested bus format can be
* supported by LCDIF CTRC or not
*/
switch (imx_crtc_state->bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
break;
default:
dev_err(lcdif_crtc->dev,
"unsupported bus format: %#x\n",
imx_crtc_state->bus_format);
return -EINVAL;
}
return 0;
}
static void lcdif_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
drm_crtc_vblank_on(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static void lcdif_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
/* LCDIF doesn't have command buffer */
return;
}
static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct lcdif_crtc *lcdif_crtc = to_lcdif_crtc(crtc);
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct videomode vm;
drm_display_mode_to_videomode(mode, &vm);
if (imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_HIGH)
vm.flags |= DISPLAY_FLAGS_DE_HIGH;
else
vm.flags |= DISPLAY_FLAGS_DE_LOW;
if (imx_crtc_state->bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
else
vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
pm_runtime_get_sync(lcdif_crtc->dev->parent);
lcdif_set_mode(lcdif, &vm);
/* config LCDIF output bus format */
lcdif_set_bus_fmt(lcdif, imx_crtc_state->bus_format);
/* defer the lcdif controller enable to plane update,
* since until then the lcdif config is complete to
* enable the controller to run actually.
*/
}
static void lcdif_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct lcdif_crtc *lcdif_crtc = to_lcdif_crtc(crtc);
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_off(crtc);
lcdif_disable_controller(lcdif);
pm_runtime_put(lcdif_crtc->dev->parent);
}
static const struct drm_crtc_helper_funcs lcdif_helper_funcs = {
.atomic_check = lcdif_crtc_atomic_check,
.atomic_begin = lcdif_crtc_atomic_begin,
.atomic_flush = lcdif_crtc_atomic_flush,
.atomic_enable = lcdif_crtc_atomic_enable,
.atomic_disable = lcdif_crtc_atomic_disable,
};
static int lcdif_enable_vblank(struct drm_crtc *crtc)
{
struct lcdif_crtc *lcdif_crtc = to_lcdif_crtc(crtc);
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
lcdif_vblank_irq_enable(lcdif);
enable_irq(lcdif_crtc->vbl_irq);
return 0;
}
static void lcdif_disable_vblank(struct drm_crtc *crtc)
{
struct lcdif_crtc *lcdif_crtc = to_lcdif_crtc(crtc);
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
disable_irq_nosync(lcdif_crtc->vbl_irq);
lcdif_vblank_irq_disable(lcdif);
}
static const struct drm_crtc_funcs lcdif_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = lcdif_crtc_reset,
.atomic_duplicate_state = lcdif_crtc_duplicate_state,
.atomic_destroy_state = lcdif_crtc_destroy_state,
.enable_vblank = lcdif_enable_vblank,
.disable_vblank = lcdif_disable_vblank,
};
static irqreturn_t lcdif_crtc_vblank_irq_handler(int irq, void *dev_id)
{
struct lcdif_crtc *lcdif_crtc = dev_id;
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
drm_crtc_handle_vblank(&lcdif_crtc->base);
lcdif_vblank_irq_clear(lcdif);
return IRQ_HANDLED;
}
static int lcdif_crtc_init(struct lcdif_crtc *lcdif_crtc,
struct lcdif_client_platformdata *pdata,
struct drm_device *drm)
{
int ret;
struct lcdif_plane *primary = lcdif_crtc->plane[0];
struct lcdif_soc *lcdif = dev_get_drvdata(lcdif_crtc->dev->parent);
/* Primary plane
* The 'possible_crtcs' of primary plane will be
* recalculated during the 'crtc' initialization
* later.
*/
primary = lcdif_plane_init(drm, lcdif, 0, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary))
return PTR_ERR(primary);
lcdif_crtc->plane[0] = primary;
/* TODO: Overlay plane */
lcdif_crtc->base.port = pdata->of_node;
drm_crtc_helper_add(&lcdif_crtc->base, &lcdif_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &lcdif_crtc->base,
&lcdif_crtc->plane[0]->base, NULL,
&lcdif_crtc_funcs, NULL);
if (ret) {
dev_err(lcdif_crtc->dev, "failed to init crtc\n");
goto primary_plane_deinit;
}
lcdif_crtc->vbl_irq = lcdif_vblank_irq_get(lcdif);
WARN_ON(lcdif_crtc->vbl_irq < 0);
ret = devm_request_irq(lcdif_crtc->dev, lcdif_crtc->vbl_irq,
lcdif_crtc_vblank_irq_handler, 0,
dev_name(lcdif_crtc->dev), lcdif_crtc);
if (ret) {
dev_err(lcdif_crtc->dev,
"vblank irq request failed: %d\n", ret);
goto primary_plane_deinit;
}
disable_irq(lcdif_crtc->vbl_irq);
return 0;
primary_plane_deinit:
lcdif_plane_deinit(drm, primary);
return ret;
}
static int lcdif_crtc_bind(struct device *dev, struct device *master,
void *data)
{
int ret;
struct drm_device *drm = data;
struct lcdif_crtc *lcdif_crtc;
struct lcdif_client_platformdata *pdata = dev->platform_data;
dev_dbg(dev, "%s: lcdif crtc bind begin\n", __func__);
lcdif_crtc = devm_kzalloc(dev, sizeof(*lcdif_crtc), GFP_KERNEL);
if (!lcdif_crtc)
return -ENOMEM;
lcdif_crtc->dev = dev;
ret = lcdif_crtc_init(lcdif_crtc, pdata, drm);
if (ret)
return ret;
if (!drm->mode_config.funcs)
drm->mode_config.funcs = &lcdif_drm_mode_config_funcs;
if (!drm->mode_config.helper_private)
drm->mode_config.helper_private = &lcdif_drm_mode_config_helpers;
/* limit the max width and height */
drm->mode_config.max_width = 1920;
drm->mode_config.max_height = 1920;
dev_set_drvdata(dev, lcdif_crtc);
dev_dbg(dev, "%s: lcdif crtc bind end\n", __func__);
return 0;
}
static void lcdif_crtc_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
struct lcdif_crtc *lcdif_crtc = dev_get_drvdata(dev);
lcdif_plane_deinit(drm, lcdif_crtc->plane[0]);
}
static const struct component_ops lcdif_crtc_ops = {
.bind = lcdif_crtc_bind,
.unbind = lcdif_crtc_unbind,
};
static int lcdif_crtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
dev_dbg(&pdev->dev, "%s: lcdif crtc probe begin\n", __func__);
if (!dev->platform_data) {
dev_err(dev, "no platform data\n");
return -EINVAL;
}
return component_add(dev, &lcdif_crtc_ops);
}
static int lcdif_crtc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &lcdif_crtc_ops);
return 0;
}
static struct platform_driver lcdif_crtc_driver = {
.probe = lcdif_crtc_probe,
.remove = lcdif_crtc_remove,
.driver = {
.name = "imx-lcdif-crtc",
},
};
module_platform_driver(lcdif_crtc_driver);
MODULE_DESCRIPTION("NXP i.MX LCDIF DRM CRTC driver");
MODULE_AUTHOR("Fancy Fang <chen.fang@nxp.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,47 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
static void lcdif_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
}
const struct drm_mode_config_funcs lcdif_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
struct drm_mode_config_helper_funcs lcdif_drm_mode_config_helpers = {
.atomic_commit_tail = lcdif_drm_atomic_commit_tail,
};

View File

@ -0,0 +1,21 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LCDIF_KMS_H
#define __LCDIF_KMS_H
extern const struct drm_mode_config_funcs lcdif_drm_mode_config_funcs;
extern struct drm_mode_config_helper_funcs lcdif_drm_mode_config_helpers;
#endif

View File

@ -0,0 +1,240 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <video/imx-lcdif.h>
#include "lcdif-plane.h"
static uint32_t lcdif_pixel_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_BGR565,
};
static int lcdif_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
int ret;
struct drm_plane_state *old_state = plane->state;
struct drm_framebuffer *fb = plane_state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
struct drm_rect clip = { 0 };
/* 'fb' should also be NULL which has been checked in
* the core sanity check function 'drm_atomic_plane_check()'
*/
if (!plane_state->crtc) {
WARN_ON(fb);
return 0;
}
/* lcdif crtc can only display from (0,0) for each plane */
if (plane_state->crtc_x || plane_state->crtc_y)
return -EINVAL;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
plane_state->crtc);
mode = &crtc_state->adjusted_mode;
clip.x2 = mode->hdisplay;
clip.y2 = mode->vdisplay;
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
if (ret)
return ret;
if (!plane_state->visible)
return -EINVAL;
/* force 'mode_changed' when fb pitches or format
* changed, since the pitch and format related
* registers configuration of LCDIF can not be
* done when LCDIF is running and 'mode_changed'
* means a full modeset is required.
*/
if (old_fb && likely(!crtc_state->mode_changed)) {
if (old_fb->pitches[0] != fb->pitches[0] ||
old_fb->format->format != fb->format->format)
crtc_state->mode_changed = true;
}
return 0;
}
static void lcdif_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct lcdif_plane *lcdif_plane = to_lcdif_plane(plane);
struct lcdif_soc *lcdif = lcdif_plane->lcdif;
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem_obj = NULL;
u32 fb_addr, src_off, src_w, fb_idx, cpp, stride;
bool crop;
/* plane and crtc is disabling */
if (!fb)
return;
/* TODO: for now we just update the next buf addr
* and the fb pixel format, since the mode set will
* be done in crtc's ->enable() helper func
*/
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
/* TODO: only support RGB */
gem_obj = drm_fb_cma_get_gem_obj(fb, 0);
src_off = (state->src_y >> 16) * fb->pitches[0] +
(state->src_x >> 16) * fb->format->cpp[0];
fb_addr = gem_obj->paddr + fb->offsets[0] + src_off;
fb_idx = 0;
break;
default:
/* TODO: add overlay later */
return;
}
lcdif_set_fb_addr(lcdif, fb_idx, fb_addr);
/* Config pixel format and horizontal cropping
* if CRTC needs a full modeset which needs to
* enable LCDIF to run at the end.
*/
if (unlikely(drm_atomic_crtc_needs_modeset(state->crtc->state))) {
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
lcdif_set_pix_fmt(lcdif, fb->format->format);
cpp = fb->format->cpp[0];
stride = DIV_ROUND_UP(fb->pitches[0], cpp);
src_w = state->src_w >> 16;
WARN_ON(src_w > fb->width);
crop = src_w != stride ? true : false;
lcdif_set_fb_hcrop(lcdif, src_w, stride, crop);
lcdif_enable_controller(lcdif);
}
}
static void lcdif_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
WARN_ON(fb);
/* TODO: CRTC disabled has been done by CRTC helper function,
* so it seems that no more required, the only possible thing
* is to set next buf addr to 0 in CRTC
*/
}
static const struct drm_plane_helper_funcs lcdif_plane_helper_funcs = {
.atomic_check = lcdif_plane_atomic_check,
.atomic_update = lcdif_plane_atomic_update,
.atomic_disable = lcdif_plane_atomic_disable,
};
static void lcdif_plane_destroy(struct drm_plane *plane)
{
struct lcdif_plane *lcdif_plane = to_lcdif_plane(plane);
drm_plane_cleanup(plane);
kfree(lcdif_plane);
}
static const struct drm_plane_funcs lcdif_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = lcdif_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
struct lcdif_plane *lcdif_plane_init(struct drm_device *dev,
struct lcdif_soc *lcdif,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos)
{
int ret;
struct lcdif_plane *lcdif_plane;
/* lcdif doesn't support fb modifiers */
if (zpos || dev->mode_config.allow_fb_modifiers)
return ERR_PTR(-EINVAL);
lcdif_plane = kzalloc(sizeof(*lcdif_plane), GFP_KERNEL);
if (!lcdif_plane)
return ERR_PTR(-ENOMEM);
lcdif_plane->lcdif = lcdif;
drm_plane_helper_add(&lcdif_plane->base, &lcdif_plane_helper_funcs);
ret = drm_universal_plane_init(dev, &lcdif_plane->base, possible_crtcs,
&lcdif_plane_funcs, lcdif_pixel_formats,
ARRAY_SIZE(lcdif_pixel_formats), NULL,
type, NULL);
if (ret) {
kfree(lcdif_plane);
return ERR_PTR(ret);
}
ret = drm_plane_create_zpos_immutable_property(&lcdif_plane->base, zpos);
if (ret) {
kfree(lcdif_plane);
return ERR_PTR(ret);
}
return lcdif_plane;
}
void lcdif_plane_deinit(struct drm_device *dev,
struct lcdif_plane *lcdif_plane)
{
struct drm_plane *plane = &lcdif_plane->base;
if (plane->zpos_property)
drm_property_destroy(dev, plane->zpos_property);
lcdif_plane_destroy(plane);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LCDIF_PLANE_H
#define __LCDIF_PLANE_H
#include <drm/drm_plane.h>
#include <video/imx-lcdif.h>
struct lcdif_plane {
struct drm_plane base;
struct lcdif_soc *lcdif;
};
#define to_lcdif_plane(plane) container_of(plane, struct lcdif_plane, base)
struct lcdif_plane *lcdif_plane_init(struct drm_device *drm,
struct lcdif_soc *lcdif,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos);
void lcdif_plane_deinit(struct drm_device *dev,
struct lcdif_plane *lcdif_plane);
#endif

View File

@ -0,0 +1,227 @@
/*
* Copyright 2018 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SEC_DSIM_DPHY_LN14LPP_H__
#define __SEC_DSIM_DPHY_LN14LPP_H__
#include <drm/bridge/sec_mipi_dsim.h>
/* descending order based on 'bit_clk' value */
static const struct sec_mipi_dsim_dphy_timing dphy_timing_ln14lpp_v1p2[] = {
{ DSIM_DPHY_TIMING(2100, 19, 91, 22, 19, 20, 35, 22, 15, 26), },
{ DSIM_DPHY_TIMING(2090, 19, 91, 22, 19, 19, 35, 22, 15, 26), },
{ DSIM_DPHY_TIMING(2080, 19, 91, 21, 18, 19, 35, 22, 15, 26), },
{ DSIM_DPHY_TIMING(2070, 18, 90, 21, 18, 19, 35, 22, 15, 25), },
{ DSIM_DPHY_TIMING(2060, 18, 90, 21, 18, 19, 34, 22, 15, 25), },
{ DSIM_DPHY_TIMING(2050, 18, 89, 21, 18, 19, 34, 22, 15, 25), },
{ DSIM_DPHY_TIMING(2040, 18, 89, 21, 18, 19, 34, 21, 15, 25), },
{ DSIM_DPHY_TIMING(2030, 18, 88, 21, 18, 19, 34, 21, 15, 25), },
{ DSIM_DPHY_TIMING(2020, 18, 88, 21, 18, 19, 34, 21, 15, 25), },
{ DSIM_DPHY_TIMING(2010, 18, 87, 21, 18, 19, 34, 21, 15, 25), },
{ DSIM_DPHY_TIMING(2000, 18, 87, 21, 18, 19, 33, 21, 15, 25), },
{ DSIM_DPHY_TIMING(1990, 18, 87, 21, 18, 18, 33, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1980, 18, 86, 21, 18, 18, 33, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1970, 17, 86, 21, 17, 18, 33, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1960, 17, 85, 21, 17, 18, 33, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1950, 17, 85, 21, 17, 18, 32, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1940, 17, 84, 20, 17, 18, 32, 21, 14, 24), },
{ DSIM_DPHY_TIMING(1930, 17, 84, 20, 17, 18, 32, 20, 14, 24), },
{ DSIM_DPHY_TIMING(1920, 17, 84, 20, 17, 18, 32, 20, 14, 24), },
{ DSIM_DPHY_TIMING(1910, 17, 83, 20, 17, 18, 32, 20, 14, 23), },
{ DSIM_DPHY_TIMING(1900, 17, 83, 20, 17, 18, 32, 20, 14, 23), },
{ DSIM_DPHY_TIMING(1890, 17, 82, 20, 17, 18, 31, 20, 14, 23), },
{ DSIM_DPHY_TIMING(1880, 17, 82, 20, 17, 17, 31, 20, 14, 23), },
{ DSIM_DPHY_TIMING(1870, 17, 81, 20, 17, 17, 31, 20, 14, 23), },
{ DSIM_DPHY_TIMING(1860, 16, 81, 20, 17, 17, 31, 20, 13, 23), },
{ DSIM_DPHY_TIMING(1850, 16, 80, 20, 16, 17, 31, 20, 13, 23), },
{ DSIM_DPHY_TIMING(1840, 16, 80, 20, 16, 17, 30, 20, 13, 23), },
{ DSIM_DPHY_TIMING(1830, 16, 80, 20, 16, 17, 30, 20, 13, 22), },
{ DSIM_DPHY_TIMING(1820, 16, 79, 20, 16, 17, 30, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1810, 16, 79, 19, 16, 17, 30, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1800, 16, 78, 19, 16, 17, 30, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1790, 16, 78, 19, 16, 17, 30, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1780, 16, 77, 19, 16, 16, 29, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1770, 16, 77, 19, 16, 16, 29, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1760, 16, 77, 19, 16, 16, 29, 19, 13, 22), },
{ DSIM_DPHY_TIMING(1750, 15, 76, 19, 16, 16, 29, 19, 13, 21), },
{ DSIM_DPHY_TIMING(1740, 15, 76, 19, 15, 16, 29, 19, 13, 21), },
{ DSIM_DPHY_TIMING(1730, 15, 75, 19, 15, 16, 28, 19, 12, 21), },
{ DSIM_DPHY_TIMING(1720, 15, 75, 19, 15, 16, 28, 19, 12, 21), },
{ DSIM_DPHY_TIMING(1710, 15, 74, 19, 15, 16, 28, 18, 12, 21), },
{ DSIM_DPHY_TIMING(1700, 15, 74, 19, 15, 16, 28, 18, 12, 21), },
{ DSIM_DPHY_TIMING(1690, 15, 73, 19, 15, 16, 28, 18, 12, 21), },
{ DSIM_DPHY_TIMING(1680, 15, 73, 18, 15, 16, 28, 18, 12, 21), },
{ DSIM_DPHY_TIMING(1670, 15, 73, 18, 15, 15, 27, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1660, 15, 72, 18, 15, 15, 27, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1650, 14, 72, 18, 15, 15, 27, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1640, 14, 71, 18, 15, 15, 27, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1630, 14, 71, 18, 15, 15, 27, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1620, 14, 70, 18, 14, 15, 26, 18, 12, 20), },
{ DSIM_DPHY_TIMING(1610, 14, 70, 18, 14, 15, 26, 17, 12, 20), },
{ DSIM_DPHY_TIMING(1600, 14, 70, 18, 14, 15, 26, 17, 12, 20), },
{ DSIM_DPHY_TIMING(1590, 14, 69, 18, 14, 15, 26, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1580, 14, 69, 18, 14, 15, 26, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1570, 14, 68, 18, 14, 15, 26, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1560, 14, 68, 18, 14, 14, 25, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1550, 14, 67, 18, 14, 14, 25, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1540, 13, 67, 17, 14, 14, 25, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1530, 13, 66, 17, 14, 14, 25, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1520, 13, 66, 17, 14, 14, 25, 17, 11, 19), },
{ DSIM_DPHY_TIMING(1510, 13, 66, 17, 13, 14, 24, 17, 11, 18), },
{ DSIM_DPHY_TIMING(1500, 13, 65, 17, 13, 14, 24, 16, 11, 18), },
{ DSIM_DPHY_TIMING(1490, 13, 65, 17, 13, 14, 24, 16, 11, 18), },
{ DSIM_DPHY_TIMING(1480, 13, 64, 17, 13, 14, 24, 16, 11, 18), },
{ DSIM_DPHY_TIMING(1470, 13, 64, 17, 13, 14, 24, 16, 11, 18), },
{ DSIM_DPHY_TIMING(1460, 13, 63, 17, 13, 13, 24, 16, 10, 18), },
{ DSIM_DPHY_TIMING(1450, 13, 63, 17, 13, 13, 23, 16, 10, 18), },
{ DSIM_DPHY_TIMING(1440, 13, 63, 17, 13, 13, 23, 16, 10, 18), },
{ DSIM_DPHY_TIMING(1430, 12, 62, 17, 13, 13, 23, 16, 10, 17), },
{ DSIM_DPHY_TIMING(1420, 12, 62, 17, 13, 13, 23, 16, 10, 17), },
{ DSIM_DPHY_TIMING(1410, 12, 61, 16, 13, 13, 23, 16, 10, 17), },
{ DSIM_DPHY_TIMING(1400, 12, 61, 16, 13, 13, 23, 16, 10, 17), },
{ DSIM_DPHY_TIMING(1390, 12, 60, 16, 12, 13, 22, 15, 10, 17), },
{ DSIM_DPHY_TIMING(1380, 12, 60, 16, 12, 13, 22, 15, 10, 17), },
{ DSIM_DPHY_TIMING(1370, 12, 59, 16, 12, 13, 22, 15, 10, 17), },
{ DSIM_DPHY_TIMING(1360, 12, 59, 16, 12, 13, 22, 15, 10, 17), },
{ DSIM_DPHY_TIMING(1350, 12, 59, 16, 12, 12, 22, 15, 10, 16), },
{ DSIM_DPHY_TIMING(1340, 12, 58, 16, 12, 12, 21, 15, 10, 16), },
{ DSIM_DPHY_TIMING(1330, 11, 58, 16, 12, 12, 21, 15, 9, 16), },
{ DSIM_DPHY_TIMING(1320, 11, 57, 16, 12, 12, 21, 15, 9, 16), },
{ DSIM_DPHY_TIMING(1310, 11, 57, 16, 12, 12, 21, 15, 9, 16), },
{ DSIM_DPHY_TIMING(1300, 11, 56, 16, 12, 12, 21, 15, 9, 16), },
{ DSIM_DPHY_TIMING(1290, 11, 56, 16, 12, 12, 21, 15, 9, 16), },
{ DSIM_DPHY_TIMING(1280, 11, 56, 15, 11, 12, 20, 14, 9, 16), },
{ DSIM_DPHY_TIMING(1270, 11, 55, 15, 11, 12, 20, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1260, 11, 55, 15, 11, 12, 20, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1250, 11, 54, 15, 11, 11, 20, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1240, 11, 54, 15, 11, 11, 20, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1230, 11, 53, 15, 11, 11, 19, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1220, 10, 53, 15, 11, 11, 19, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1210, 10, 52, 15, 11, 11, 19, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1200, 10, 52, 15, 11, 11, 19, 14, 9, 15), },
{ DSIM_DPHY_TIMING(1190, 10, 52, 15, 11, 11, 19, 14, 8, 14), },
{ DSIM_DPHY_TIMING(1180, 10, 51, 15, 11, 11, 19, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1170, 10, 51, 15, 10, 11, 18, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1160, 10, 50, 15, 10, 11, 18, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1150, 10, 50, 15, 10, 11, 18, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1140, 10, 49, 14, 10, 10, 18, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1130, 10, 49, 14, 10, 10, 18, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1120, 10, 49, 14, 10, 10, 17, 13, 8, 14), },
{ DSIM_DPHY_TIMING(1110, 9, 48, 14, 10, 10, 17, 13, 8, 13), },
{ DSIM_DPHY_TIMING(1100, 9, 48, 14, 10, 10, 17, 13, 8, 13), },
{ DSIM_DPHY_TIMING(1090, 9, 47, 14, 10, 10, 17, 13, 8, 13), },
{ DSIM_DPHY_TIMING(1080, 9, 47, 14, 10, 10, 17, 13, 8, 13), },
{ DSIM_DPHY_TIMING(1070, 9, 46, 14, 10, 10, 17, 12, 8, 13), },
{ DSIM_DPHY_TIMING(1060, 9, 46, 14, 10, 10, 16, 12, 7, 13), },
{ DSIM_DPHY_TIMING(1050, 9, 45, 14, 9, 10, 16, 12, 7, 13), },
{ DSIM_DPHY_TIMING(1040, 9, 45, 14, 9, 10, 16, 12, 7, 13), },
{ DSIM_DPHY_TIMING(1030, 9, 45, 14, 9, 9, 16, 12, 7, 12), },
{ DSIM_DPHY_TIMING(1020, 9, 44, 14, 9, 9, 16, 12, 7, 12), },
{ DSIM_DPHY_TIMING(1010, 8, 44, 13, 9, 9, 15, 12, 7, 12), },
{ DSIM_DPHY_TIMING(1000, 8, 43, 13, 9, 9, 15, 12, 7, 12), },
{ DSIM_DPHY_TIMING( 990, 8, 43, 13, 9, 9, 15, 12, 7, 12), },
{ DSIM_DPHY_TIMING( 980, 8, 42, 13, 9, 9, 15, 12, 7, 12), },
{ DSIM_DPHY_TIMING( 970, 8, 42, 13, 9, 9, 15, 12, 7, 12), },
{ DSIM_DPHY_TIMING( 960, 8, 42, 13, 9, 9, 15, 11, 7, 12), },
{ DSIM_DPHY_TIMING( 950, 8, 41, 13, 9, 9, 14, 11, 7, 11), },
{ DSIM_DPHY_TIMING( 940, 8, 41, 13, 8, 9, 14, 11, 7, 11), },
{ DSIM_DPHY_TIMING( 930, 8, 40, 13, 8, 8, 14, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 920, 8, 40, 13, 8, 8, 14, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 910, 8, 39, 13, 8, 8, 14, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 900, 7, 39, 13, 8, 8, 13, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 890, 7, 38, 13, 8, 8, 13, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 880, 7, 38, 12, 8, 8, 13, 11, 6, 11), },
{ DSIM_DPHY_TIMING( 870, 7, 38, 12, 8, 8, 13, 11, 6, 10), },
{ DSIM_DPHY_TIMING( 860, 7, 37, 12, 8, 8, 13, 11, 6, 10), },
{ DSIM_DPHY_TIMING( 850, 7, 37, 12, 8, 8, 13, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 840, 7, 36, 12, 8, 8, 12, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 830, 7, 36, 12, 8, 8, 12, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 820, 7, 35, 12, 7, 7, 12, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 810, 7, 35, 12, 7, 7, 12, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 800, 7, 35, 12, 7, 7, 12, 10, 6, 10), },
{ DSIM_DPHY_TIMING( 790, 6, 34, 12, 7, 7, 11, 10, 5, 9), },
{ DSIM_DPHY_TIMING( 780, 6, 34, 12, 7, 7, 11, 10, 5, 9), },
{ DSIM_DPHY_TIMING( 770, 6, 33, 12, 7, 7, 11, 10, 5, 9), },
{ DSIM_DPHY_TIMING( 760, 6, 33, 12, 7, 7, 11, 10, 5, 9), },
{ DSIM_DPHY_TIMING( 750, 6, 32, 12, 7, 7, 11, 9, 5, 9), },
{ DSIM_DPHY_TIMING( 740, 6, 32, 11, 7, 7, 11, 9, 5, 9), },
{ DSIM_DPHY_TIMING( 730, 6, 31, 11, 7, 7, 10, 9, 5, 9), },
{ DSIM_DPHY_TIMING( 720, 6, 31, 11, 7, 6, 10, 9, 5, 9), },
{ DSIM_DPHY_TIMING( 710, 6, 31, 11, 6, 6, 10, 9, 5, 8), },
{ DSIM_DPHY_TIMING( 700, 6, 30, 11, 6, 6, 10, 9, 5, 8), },
{ DSIM_DPHY_TIMING( 690, 5, 30, 11, 6, 6, 10, 9, 5, 8), },
{ DSIM_DPHY_TIMING( 680, 5, 29, 11, 6, 6, 9, 9, 5, 8), },
{ DSIM_DPHY_TIMING( 670, 5, 29, 11, 6, 6, 9, 9, 5, 8), },
{ DSIM_DPHY_TIMING( 660, 5, 28, 11, 6, 6, 9, 9, 4, 8), },
{ DSIM_DPHY_TIMING( 650, 5, 28, 11, 6, 6, 9, 9, 4, 8), },
{ DSIM_DPHY_TIMING( 640, 5, 28, 11, 6, 6, 9, 8, 4, 8), },
{ DSIM_DPHY_TIMING( 630, 5, 27, 11, 6, 6, 9, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 620, 5, 27, 11, 6, 6, 8, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 610, 5, 26, 10, 6, 5, 8, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 600, 5, 26, 10, 6, 5, 8, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 590, 5, 25, 10, 5, 5, 8, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 580, 4, 25, 10, 5, 5, 8, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 570, 4, 24, 10, 5, 5, 7, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 560, 4, 24, 10, 5, 5, 7, 8, 4, 7), },
{ DSIM_DPHY_TIMING( 550, 4, 24, 10, 5, 5, 7, 8, 4, 6), },
{ DSIM_DPHY_TIMING( 540, 4, 23, 10, 5, 5, 7, 8, 4, 6), },
{ DSIM_DPHY_TIMING( 530, 4, 23, 10, 5, 5, 7, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 520, 4, 22, 10, 5, 5, 7, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 510, 4, 22, 10, 5, 5, 6, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 500, 4, 21, 10, 5, 4, 6, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 490, 4, 21, 10, 5, 4, 6, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 480, 4, 21, 9, 4, 4, 6, 7, 3, 6), },
{ DSIM_DPHY_TIMING( 470, 3, 20, 9, 4, 4, 6, 7, 3, 5), },
{ DSIM_DPHY_TIMING( 460, 3, 20, 9, 4, 4, 5, 7, 3, 5), },
{ DSIM_DPHY_TIMING( 450, 3, 19, 9, 4, 4, 5, 7, 3, 5), },
{ DSIM_DPHY_TIMING( 440, 3, 19, 9, 4, 4, 5, 7, 3, 5), },
{ DSIM_DPHY_TIMING( 430, 3, 18, 9, 4, 4, 5, 7, 3, 5), },
{ DSIM_DPHY_TIMING( 420, 3, 18, 9, 4, 4, 5, 6, 3, 5), },
{ DSIM_DPHY_TIMING( 410, 3, 17, 9, 4, 4, 5, 6, 3, 5), },
{ DSIM_DPHY_TIMING( 400, 3, 17, 9, 4, 3, 4, 6, 3, 5), },
{ DSIM_DPHY_TIMING( 390, 3, 17, 9, 4, 3, 4, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 380, 3, 16, 9, 4, 3, 4, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 370, 2, 16, 9, 3, 3, 4, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 360, 2, 15, 9, 3, 3, 4, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 350, 2, 15, 9, 3, 3, 3, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 340, 2, 14, 8, 3, 3, 3, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 330, 2, 14, 8, 3, 3, 3, 6, 2, 4), },
{ DSIM_DPHY_TIMING( 320, 2, 14, 8, 3, 3, 3, 5, 2, 4), },
{ DSIM_DPHY_TIMING( 310, 2, 13, 8, 3, 3, 3, 5, 2, 3), },
{ DSIM_DPHY_TIMING( 300, 2, 13, 8, 3, 3, 3, 5, 2, 3), },
{ DSIM_DPHY_TIMING( 290, 2, 12, 8, 3, 2, 2, 5, 2, 3), },
{ DSIM_DPHY_TIMING( 280, 2, 12, 8, 3, 2, 2, 5, 2, 3), },
{ DSIM_DPHY_TIMING( 270, 2, 11, 8, 3, 2, 2, 5, 2, 3), },
{ DSIM_DPHY_TIMING( 260, 1, 11, 8, 3, 2, 2, 5, 1, 3), },
{ DSIM_DPHY_TIMING( 250, 1, 10, 8, 2, 2, 2, 5, 1, 3), },
{ DSIM_DPHY_TIMING( 240, 1, 9, 8, 2, 2, 1, 5, 1, 3), },
{ DSIM_DPHY_TIMING( 230, 1, 8, 8, 2, 2, 1, 5, 1, 2), },
{ DSIM_DPHY_TIMING( 220, 1, 8, 8, 2, 2, 1, 5, 1, 2), },
{ DSIM_DPHY_TIMING( 210, 1, 7, 7, 2, 2, 1, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 200, 1, 7, 7, 2, 2, 1, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 190, 1, 7, 7, 2, 1, 1, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 180, 1, 6, 7, 2, 1, 0, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 170, 1, 6, 7, 2, 1, 0, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 160, 1, 6, 7, 2, 1, 0, 4, 1, 2), },
{ DSIM_DPHY_TIMING( 150, 0, 5, 7, 2, 1, 0, 4, 1, 1), },
{ DSIM_DPHY_TIMING( 140, 0, 5, 7, 1, 1, 0, 4, 1, 1), },
{ DSIM_DPHY_TIMING( 130, 0, 4, 7, 1, 1, 0, 4, 0, 1), },
{ DSIM_DPHY_TIMING( 120, 0, 4, 7, 1, 1, 0, 4, 0, 1), },
{ DSIM_DPHY_TIMING( 110, 0, 3, 7, 1, 0, 0, 4, 0, 1), },
{ DSIM_DPHY_TIMING( 100, 0, 3, 7, 1, 0, 0, 3, 0, 1), },
{ DSIM_DPHY_TIMING( 90, 0, 2, 7, 1, 0, 0, 3, 0, 1), },
{ DSIM_DPHY_TIMING( 80, 0, 2, 6, 1, 0, 0, 3, 0, 1), },
};
#endif

View File

@ -0,0 +1,433 @@
/*
* Samsung MIPI DSI Host Controller on IMX
*
* Copyright 2018-2019 NXP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/busfreq-imx.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <drm/bridge/sec_mipi_dsim.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "imx-drm.h"
#include "sec_mipi_dphy_ln14lpp.h"
#include "sec_mipi_pll_1432x.h"
#define DRIVER_NAME "imx_sec_dsim_drv"
struct imx_sec_dsim_device {
struct device *dev;
struct drm_encoder encoder;
struct reset_control *soft_resetn;
struct reset_control *clk_enable;
struct reset_control *mipi_reset;
atomic_t rpm_suspended;
};
#define enc_to_dsim(enc) container_of(enc, struct imx_sec_dsim_device, encoder)
static struct imx_sec_dsim_device *dsim_dev;
#if CONFIG_PM
static int imx_sec_dsim_runtime_suspend(struct device *dev);
static int imx_sec_dsim_runtime_resume(struct device *dev);
#else
static int imx_sec_dsim_runtime_suspend(struct device *dev)
{
return 0;
}
static int imx_sec_dsim_runtime_resume(struct device *dev)
{
return 0;
}
#endif
static int sec_dsim_rstc_reset(struct reset_control *rstc, bool assert)
{
int ret;
if (!rstc)
return 0;
ret = assert ? reset_control_assert(rstc) :
reset_control_deassert(rstc);
return ret;
}
static void imx_sec_dsim_encoder_helper_enable(struct drm_encoder *encoder)
{
int ret;
struct imx_sec_dsim_device *dsim_dev = enc_to_dsim(encoder);
pm_runtime_get_sync(dsim_dev->dev);
ret = sec_dsim_rstc_reset(dsim_dev->mipi_reset, false);
if (ret)
dev_err(dsim_dev->dev, "deassert mipi_reset failed\n");
}
static void imx_sec_dsim_encoder_helper_disable(struct drm_encoder *encoder)
{
int ret;
struct imx_sec_dsim_device *dsim_dev = enc_to_dsim(encoder);
ret = sec_dsim_rstc_reset(dsim_dev->mipi_reset, true);
if (ret)
dev_err(dsim_dev->dev, "deassert mipi_reset failed\n");
pm_runtime_put_sync(dsim_dev->dev);
}
static int imx_sec_dsim_encoder_helper_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
int i, ret;
u32 bus_format;
unsigned int num_bus_formats;
struct imx_sec_dsim_device *dsim_dev = enc_to_dsim(encoder);
struct drm_bridge *bridge = encoder->bridge;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *display_info = &conn_state->connector->display_info;
num_bus_formats = display_info->num_bus_formats;
if (unlikely(!num_bus_formats))
dev_warn(dsim_dev->dev, "no bus formats assigned by connector\n");
bus_format = adjusted_mode->private_flags & 0xffff;
for (i = 0; i < num_bus_formats; i++) {
if (display_info->bus_formats[i] != bus_format)
continue;
break;
}
if (i && i == num_bus_formats) {
dev_err(dsim_dev->dev, "invalid bus format for connector\n");
return -EINVAL;
}
/* check pll out */
ret = sec_mipi_dsim_check_pll_out(bridge->driver_private,
adjusted_mode);
if (ret)
return ret;
/* sec dsim can only accept active hight DE */
imx_crtc_state->bus_flags |= DRM_BUS_FLAG_DE_HIGH;
/* For the dotclock polarity, default is neg edge;
* and in the dsim spec, there is no explict words
* to illustrate the dotclock polarity requirement.
*/
imx_crtc_state->bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
/* set the bus format for CRTC output which should be
* the same as the bus format between dsim and connector,
* since dsim cannot do any pixel conversions.
*/
imx_crtc_state->bus_format = bus_format;
return 0;
}
static const struct drm_encoder_helper_funcs imx_sec_dsim_encoder_helper_funcs = {
.enable = imx_sec_dsim_encoder_helper_enable,
.disable = imx_sec_dsim_encoder_helper_disable,
.atomic_check = imx_sec_dsim_encoder_helper_atomic_check,
};
static const struct drm_encoder_funcs imx_sec_dsim_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
static const struct sec_mipi_dsim_plat_data imx8mm_mipi_dsim_plat_data = {
.version = 0x1060200,
.max_data_lanes = 4,
.max_data_rate = 1500000000ULL,
.dphy_pll = &pll_1432x,
.dphy_timing = dphy_timing_ln14lpp_v1p2,
.num_dphy_timing = ARRAY_SIZE(dphy_timing_ln14lpp_v1p2),
.dphy_timing_cmp = dphy_timing_default_cmp,
.mode_valid = NULL,
};
static const struct of_device_id imx_sec_dsim_dt_ids[] = {
{
.compatible = "fsl,imx8mm-mipi-dsim",
.data = &imx8mm_mipi_dsim_plat_data,
},
{
.compatible = "fsl,imx8mn-mipi-dsim",
.data = &imx8mm_mipi_dsim_plat_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_sec_dsim_dt_ids);
static int sec_dsim_of_parse_resets(struct imx_sec_dsim_device *dsim)
{
int ret;
struct device *dev = dsim->dev;
struct device_node *np = dev->of_node;
struct device_node *parent, *child;
struct of_phandle_args args;
struct reset_control *rstc;
const char *compat;
uint32_t len, rstc_num = 0;
ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
0, &args);
if (ret)
return ret;
parent = args.np;
for_each_child_of_node(parent, child) {
compat = of_get_property(child, "compatible", NULL);
if (!compat)
continue;
rstc = of_reset_control_array_get(child, false, false, true);
if (IS_ERR(rstc))
continue;
len = strlen(compat);
if (!of_compat_cmp("dsi,soft-resetn", compat, len)) {
dsim->soft_resetn = rstc;
rstc_num++;
} else if (!of_compat_cmp("dsi,clk-enable", compat, len)) {
dsim->clk_enable = rstc;
rstc_num++;
} else if (!of_compat_cmp("dsi,mipi-reset", compat, len)) {
dsim->mipi_reset = rstc;
rstc_num++;
} else
dev_warn(dev, "invalid dsim reset node: %s\n", compat);
}
if (!rstc_num) {
dev_err(dev, "no invalid reset control exists\n");
return -EINVAL;
}
return 0;
}
static void sec_dsim_of_put_resets(struct imx_sec_dsim_device *dsim)
{
if (dsim->soft_resetn)
reset_control_put(dsim->soft_resetn);
if (dsim->clk_enable)
reset_control_put(dsim->clk_enable);
if (dsim->mipi_reset)
reset_control_put(dsim->mipi_reset);
}
static int imx_sec_dsim_bind(struct device *dev, struct device *master,
void *data)
{
int ret, irq;
struct resource *res;
struct drm_device *drm_dev = data;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct of_device_id *of_id = of_match_device(imx_sec_dsim_dt_ids,
dev);
const struct sec_mipi_dsim_plat_data *pdata = of_id->data;
struct drm_encoder *encoder;
dev_dbg(dev, "%s: dsim bind begin\n", __func__);
dsim_dev = devm_kzalloc(dev, sizeof(*dsim_dev), GFP_KERNEL);
if (!dsim_dev) {
dev_err(dev, "Unable to allocate 'dsim_dev'\n");
return -ENOMEM;
}
dsim_dev->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
ret = sec_dsim_of_parse_resets(dsim_dev);
if (ret)
return ret;
encoder = &dsim_dev->encoder;
ret = imx_drm_encoder_parse_of(drm_dev, encoder, np);
if (ret)
return ret;
drm_encoder_helper_add(encoder, &imx_sec_dsim_encoder_helper_funcs);
ret = drm_encoder_init(drm_dev, encoder,
&imx_sec_dsim_encoder_funcs,
DRM_MODE_ENCODER_DSI, dev_name(dev));
if (ret)
return ret;
atomic_set(&dsim_dev->rpm_suspended, 0);
pm_runtime_enable(dev);
atomic_inc(&dsim_dev->rpm_suspended);
/* bind sec dsim bridge */
ret = sec_mipi_dsim_bind(dev, master, data, encoder, res, irq, pdata);
if (ret) {
dev_err(dev, "failed to bind sec dsim bridge: %d\n", ret);
pm_runtime_disable(dev);
drm_encoder_cleanup(encoder);
sec_dsim_of_put_resets(dsim_dev);
return ret;
}
dev_dbg(dev, "%s: dsim bind end\n", __func__);
return 0;
}
static void imx_sec_dsim_unbind(struct device *dev, struct device *master,
void *data)
{
pm_runtime_disable(dev);
sec_mipi_dsim_unbind(dev, master, data);
drm_encoder_cleanup(&dsim_dev->encoder);
}
static const struct component_ops imx_sec_dsim_ops = {
.bind = imx_sec_dsim_bind,
.unbind = imx_sec_dsim_unbind,
};
static int imx_sec_dsim_probe(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "%s: dsim probe begin\n", __func__);
return component_add(&pdev->dev, &imx_sec_dsim_ops);
}
static int imx_sec_dsim_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &imx_sec_dsim_ops);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int imx_sec_dsim_suspend(struct device *dev)
{
return imx_sec_dsim_runtime_suspend(dev);
}
static int imx_sec_dsim_resume(struct device *dev)
{
return imx_sec_dsim_runtime_resume(dev);
}
#endif
#ifdef CONFIG_PM
static int imx_sec_dsim_runtime_suspend(struct device *dev)
{
if (atomic_inc_return(&dsim_dev->rpm_suspended) > 1)
return 0;
sec_mipi_dsim_suspend(dev);
release_bus_freq(BUS_FREQ_HIGH);
return 0;
}
static int imx_sec_dsim_runtime_resume(struct device *dev)
{
int ret;
if (unlikely(!atomic_read(&dsim_dev->rpm_suspended))) {
dev_warn(dsim_dev->dev,
"Unbalanced %s!\n", __func__);
return 0;
}
if (!atomic_dec_and_test(&dsim_dev->rpm_suspended))
return 0;
request_bus_freq(BUS_FREQ_HIGH);
ret = sec_dsim_rstc_reset(dsim_dev->soft_resetn, false);
if (ret) {
dev_err(dev, "deassert soft_resetn failed\n");
return ret;
}
ret = sec_dsim_rstc_reset(dsim_dev->clk_enable, true);
if (ret) {
dev_err(dev, "assert clk_enable failed\n");
return ret;
}
ret = sec_dsim_rstc_reset(dsim_dev->mipi_reset, false);
if (ret) {
dev_err(dev, "deassert mipi_reset failed\n");
return ret;
}
sec_mipi_dsim_resume(dev);
return 0;
}
#endif
static const struct dev_pm_ops imx_sec_dsim_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_sec_dsim_suspend,
imx_sec_dsim_resume)
SET_RUNTIME_PM_OPS(imx_sec_dsim_runtime_suspend,
imx_sec_dsim_runtime_resume,
NULL)
};
struct platform_driver imx_sec_dsim_driver = {
.probe = imx_sec_dsim_probe,
.remove = imx_sec_dsim_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = imx_sec_dsim_dt_ids,
.pm = &imx_sec_dsim_pm_ops,
},
};
module_platform_driver(imx_sec_dsim_driver);
MODULE_DESCRIPTION("NXP i.MX MIPI DSI Host Controller driver");
MODULE_AUTHOR("Fancy Fang <chen.fang@nxp.com>");
MODULE_LICENSE("GPL");

Some files were not shown because too many files have changed in this diff Show More